diff --git a/CVE-2023-24329-blank-URL-bypass.patch b/CVE-2023-24329-blank-URL-bypass.patch index dad85a7..aadfe6a 100644 --- a/CVE-2023-24329-blank-URL-bypass.patch +++ b/CVE-2023-24329-blank-URL-bypass.patch @@ -1,22 +1,30 @@ --- - Lib/test/test_urlparse.py | 20 ++++++++++ - Lib/urlparse.py | 2 - - Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rs | 2 + - 3 files changed, 23 insertions(+), 1 deletion(-) + Lib/test/test_urlparse.py | 21 ++++++++++ + Lib/urlparse.py | 9 +++- + Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rs | 2 + 3 files changed, 30 insertions(+), 2 deletions(-) ---- a/Lib/test/test_urlparse.py -+++ b/Lib/test/test_urlparse.py -@@ -592,6 +592,26 @@ class UrlParseTestCase(unittest.TestCase +Index: Python-2.7.18/Lib/test/test_urlparse.py +=================================================================== +--- Python-2.7.18.orig/Lib/test/test_urlparse.py ++++ Python-2.7.18/Lib/test/test_urlparse.py +@@ -1,4 +1,5 @@ + from test import test_support ++from urlparse import isascii + import sys + import unicodedata + import unittest +@@ -592,6 +593,26 @@ class UrlParseTestCase(unittest.TestCase self.assertEqual(p.netloc, "www.example.net:foo") self.assertRaises(ValueError, lambda: p.port) + def do_attributes_bad_scheme(self, bytes, parse, scheme): + url = scheme + "://www.example.net" + if bytes: -+ if url.isascii(): ++ if isascii(url): + url = url.encode("ascii") + else: -+ continue ++ return + p = parse(url) + if bytes: + self.assertEqual(p.scheme, b"") @@ -27,25 +35,50 @@ + """Check handling of invalid schemes.""" + for bytes in (False, True): + for parse in (urlparse.urlsplit, urlparse.urlparse): -+ for scheme in (".", "+", "-", "0", "http&", "६http"): ++ for scheme in (".", "+", "-", "0", "http&"): + self.do_attributes_bad_scheme(bytes, parse, scheme) + def test_attributes_without_netloc(self): # This example is straight from RFC 3261. It looks like it # should allow the username, hostname, and port to be filled ---- a/Lib/urlparse.py -+++ b/Lib/urlparse.py -@@ -211,7 +211,7 @@ def urlsplit(url, scheme='', allow_fragm +Index: Python-2.7.18/Lib/urlparse.py +=================================================================== +--- Python-2.7.18.orig/Lib/urlparse.py ++++ Python-2.7.18/Lib/urlparse.py +@@ -31,7 +31,8 @@ test_urlparse.py provides a good indicat + import re + + __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", +- "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"] ++ "urlsplit", "urlunsplit", "parse_qs", "parse_qsl", ++ "isascii"] + + # A classification of schemes ('' means apply by default) + uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', +@@ -68,6 +69,10 @@ _UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r + MAX_CACHE_SIZE = 20 + _parse_cache = {} + ++# Py3k shim ++def isascii(word): ++ return all([ord(c) < 128 for c in word]) ++ + def clear_cache(): + """Clear the parse cache.""" + _parse_cache.clear() +@@ -211,7 +216,7 @@ def urlsplit(url, scheme='', allow_fragm clear_cache() netloc = query = fragment = '' i = url.find(':') - if i > 0: -+ if i > 0 and url[0].isascii() and url[0].isalpha(): ++ if i > 0 and isascii(url[0]) and url[0].isalpha(): if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] +Index: Python-2.7.18/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rs +=================================================================== --- /dev/null -+++ b/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rs ++++ Python-2.7.18/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rs @@ -0,0 +1,2 @@ +Fix bug in :func:`urllib.parse.urlparse` that causes URL schemes that begin +with a digit, a plus sign, or a minus sign to be parsed incorrectly.