- Update to 3.11.14:

- Security
    - gh-139700: Check consistency of the zip64 end of central
      directory record. Support records with “zip64 extensible data”
      if there are no bytes prepended to the ZIP file.
    - gh-139400: xml.parsers.expat: Make sure that parent Expat
      parsers are only garbage-collected once they are no longer
      referenced by subparsers created by
      ExternalEntityParserCreate(). Patch by Sebastian Pipping.
    - gh-135661: Fix parsing start and end tags in
      html.parser.HTMLParser according to the HTML5 standard.
      * Whitespaces no longer accepted between </ and the tag name. E.g.
        </ script> does not end the script section.
      * Vertical tabulation (\v) and non-ASCII whitespaces no longer
        recognized as whitespaces. The only whitespaces are \t\n\r\f and
        space.
      * Null character (U+0000) no longer ends the tag name.
      * Attributes and slashes after the tag name in end tags are now
        ignored, instead of terminating after the first > in quoted
        attribute value. E.g. </script/foo=">"/>.
      * Multiple slashes and whitespaces between the last attribute and
        closing > are now ignored in both start and end tags. E.g. <a
        foo=bar/ //>.
      * Multiple = between attribute name and value are no longer
        collapsed. E.g. <a foo==bar> produces attribute “foo” with value
        “=bar”.
    - gh-135661: Fix CDATA section parsing in html.parser.HTMLParser
      according to the HTML5 standard: ] ]> and ]] > no longer end the
      CDATA section. Add private method _set_support_cdata() which can
      be used to specify how to parse <[CDATA[ — as a CDATA section in

OBS-URL: https://build.opensuse.org/package/show/devel:languages:python:Factory/python311?expand=0&rev=199
This commit is contained in:
2025-10-16 16:27:30 +00:00
committed by Git OBS Bridge
parent 8c7f831926
commit fb669c4584
8 changed files with 65 additions and 414 deletions

View File

@@ -1,190 +0,0 @@
From 9043edabc7e2f0dd655146e0a4571e2a0b2906af Mon Sep 17 00:00:00 2001
From: Serhiy Storchaka <storchaka@gmail.com>
Date: Fri, 13 Jun 2025 19:57:48 +0300
Subject: [PATCH] gh-135462: Fix quadratic complexity in processing special
input in HTMLParser (GH-135464)
End-of-file errors are now handled according to the HTML5 specs --
comments and declarations are automatically closed, tags are ignored.
(cherry picked from commit 6eb6c5dbfb528bd07d77b60fd71fd05d81d45c41)
Co-authored-by: Serhiy Storchaka <storchaka@gmail.com>
---
Lib/html/parser.py | 41 +++++---
Lib/test/test_htmlparser.py | 51 +++++++---
Misc/NEWS.d/next/Security/2025-06-13-15-55-22.gh-issue-135462.KBeJpc.rst | 4
3 files changed, 74 insertions(+), 22 deletions(-)
create mode 100644 Misc/NEWS.d/next/Security/2025-06-13-15-55-22.gh-issue-135462.KBeJpc.rst
Index: Python-3.11.13/Lib/html/parser.py
===================================================================
--- Python-3.11.13.orig/Lib/html/parser.py 2025-07-02 18:12:07.084569398 +0200
+++ Python-3.11.13/Lib/html/parser.py 2025-07-02 18:12:12.582519793 +0200
@@ -25,6 +25,7 @@
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
+endtagopen = re.compile('</[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# Note:
@@ -176,7 +177,7 @@
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
- elif (i + 1) < n:
+ elif (i + 1) < n or end:
self.handle_data("<")
k = i + 1
else:
@@ -184,17 +185,35 @@
if k < 0:
if not end:
break
- k = rawdata.find('>', i + 1)
- if k < 0:
- k = rawdata.find('<', i + 1)
- if k < 0:
- k = i + 1
+ if starttagopen.match(rawdata, i): # < + letter
+ pass
+ elif startswith("</", i):
+ if i + 2 == n:
+ self.handle_data("</")
+ elif endtagopen.match(rawdata, i): # </ + letter
+ pass
+ else:
+ # bogus comment
+ self.handle_comment(rawdata[i+2:])
+ elif startswith("<!--", i):
+ j = n
+ for suffix in ("--!", "--", "-"):
+ if rawdata.endswith(suffix, i+4):
+ j -= len(suffix)
+ break
+ self.handle_comment(rawdata[i+4:j])
+ elif startswith("<![CDATA[", i):
+ self.unknown_decl(rawdata[i+3:])
+ elif rawdata[i:i+9].lower() == '<!doctype':
+ self.handle_decl(rawdata[i+2:])
+ elif startswith("<!", i):
+ # bogus comment
+ self.handle_comment(rawdata[i+2:])
+ elif startswith("<?", i):
+ self.handle_pi(rawdata[i+2:])
else:
- k += 1
- if self.convert_charrefs and not self.cdata_elem:
- self.handle_data(unescape(rawdata[i:k]))
- else:
- self.handle_data(rawdata[i:k])
+ raise AssertionError("we should not get here!")
+ k = n
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
Index: Python-3.11.13/Lib/test/test_htmlparser.py
===================================================================
--- Python-3.11.13.orig/Lib/test/test_htmlparser.py 2025-07-02 18:12:08.523658593 +0200
+++ Python-3.11.13/Lib/test/test_htmlparser.py 2025-07-02 18:13:32.674943007 +0200
@@ -4,6 +4,8 @@
import pprint
import unittest
+from test import support
+
class EventCollector(html.parser.HTMLParser):
@@ -391,28 +393,34 @@
('data', '<'),
('starttag', 'bc<', [('a', None)]),
('endtag', 'html'),
- ('data', '\n<img src="URL>'),
- ('comment', '/img'),
- ('endtag', 'html<')])
+ ('data', '\n')])
def test_starttag_junk_chars(self):
+ self._run_check("<", [('data', '<')])
+ self._run_check("<>", [('data', '<>')])
+ self._run_check("< >", [('data', '< >')])
+ self._run_check("< ", [('data', '< ')])
self._run_check("</>", [])
+ self._run_check("<$>", [('data', '<$>')])
self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')])
- self._run_check("</a", [('data', '</a')])
+ self._run_check("</a", [])
+ self._run_check("</ a>", [('endtag', 'a')])
+ self._run_check("</ a", [('comment', ' a')])
self._run_check("<a<a>", [('starttag', 'a<a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')])
- self._run_check("<!", [('data', '<!')])
- self._run_check("<a", [('data', '<a')])
- self._run_check("<a foo='bar'", [('data', "<a foo='bar'")])
- self._run_check("<a foo='bar", [('data', "<a foo='bar")])
- self._run_check("<a foo='>'", [('data', "<a foo='>'")])
- self._run_check("<a foo='>", [('data', "<a foo='>")])
+ self._run_check("<!", [('comment', '')])
+ self._run_check("<a", [])
+ self._run_check("<a foo='bar'", [])
+ self._run_check("<a foo='bar", [])
+ self._run_check("<a foo='>'", [])
+ self._run_check("<a foo='>", [])
self._run_check("<a$>", [('starttag', 'a$', [])])
self._run_check("<a$b>", [('starttag', 'a$b', [])])
self._run_check("<a$b/>", [('startendtag', 'a$b', [])])
self._run_check("<a$b >", [('starttag', 'a$b', [])])
self._run_check("<a$b />", [('startendtag', 'a$b', [])])
+ self._run_check("</a$b>", [('endtag', 'a$b')])
def test_slashes_in_starttag(self):
self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 'var')])])
@@ -549,8 +557,9 @@
('comment', ' -- close enough --'),
('comment', ''),
('comment', '<-- this was an empty comment'),
- ('comment', '!! another bogus comment !!!'),
+ ('comment', '!! another bogus comment !!!')
]
+
self._run_check(html, expected)
def test_broken_condcoms(self):
@@ -598,6 +607,26 @@
('endtag', 'a'), ('data', ' bar & baz')]
)
+ @support.requires_resource('cpu')
+ def test_eof_no_quadratic_complexity(self):
+ # Each of these examples used to take about an hour.
+ # Now they take a fraction of a second.
+ def check(source):
+ parser = html.parser.HTMLParser()
+ parser.feed(source)
+ parser.close()
+ n = 120_000
+ check("<a " * n)
+ check("<a a=" * n)
+ check("</a " * 14 * n)
+ check("</a a=" * 11 * n)
+ check("<!--" * 4 * n)
+ check("<!" * 60 * n)
+ check("<?" * 19 * n)
+ check("</$" * 15 * n)
+ check("<![CDATA[" * 9 * n)
+ check("<!doctype" * 35 * n)
+
class AttributesTestCase(TestCaseBase):
Index: Python-3.11.13/Misc/NEWS.d/next/Security/2025-06-13-15-55-22.gh-issue-135462.KBeJpc.rst
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ Python-3.11.13/Misc/NEWS.d/next/Security/2025-06-13-15-55-22.gh-issue-135462.KBeJpc.rst 2025-07-02 18:12:12.583386736 +0200
@@ -0,0 +1,4 @@
+Fix quadratic complexity in processing specially crafted input in
+:class:`html.parser.HTMLParser`. End-of-file errors are now handled according
+to the HTML5 specs -- comments and declarations are automatically closed,
+tags are ignored.

View File

@@ -1,212 +0,0 @@
From cb3519590c62f9b1abf7f31b92ec37d4b725ce15 Mon Sep 17 00:00:00 2001
From: Alexander Urieles <aeurielesn@users.noreply.github.com>
Date: Mon, 28 Jul 2025 17:37:26 +0200
Subject: [PATCH] gh-130577: tarfile now validates archives to ensure member
offsets are non-negative (GH-137027) (cherry picked from commit
7040aa54f14676938970e10c5f74ea93cd56aa38)
Co-authored-by: Alexander Urieles <aeurielesn@users.noreply.github.com>
Co-authored-by: Gregory P. Smith <greg@krypto.org>
---
Lib/tarfile.py | 3
Lib/test/test_tarfile.py | 156 ++++++++++
Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst | 3
3 files changed, 162 insertions(+)
create mode 100644 Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst
Index: Python-3.11.13/Lib/tarfile.py
===================================================================
--- Python-3.11.13.orig/Lib/tarfile.py 2025-08-01 22:21:29.158050900 +0200
+++ Python-3.11.13/Lib/tarfile.py 2025-08-01 22:21:33.121079687 +0200
@@ -1613,6 +1613,9 @@
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
+ # Only non-negative offsets are allowed
+ if count < 0:
+ raise InvalidHeaderError("invalid offset")
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
Index: Python-3.11.13/Lib/test/test_tarfile.py
===================================================================
--- Python-3.11.13.orig/Lib/test/test_tarfile.py 2025-08-01 22:21:30.644301786 +0200
+++ Python-3.11.13/Lib/test/test_tarfile.py 2025-08-01 22:21:33.121718600 +0200
@@ -50,6 +50,7 @@
xzname = os.path.join(TEMPDIR, "testtar.tar.xz")
tmpname = os.path.join(TEMPDIR, "tmp.tar")
dotlessname = os.path.join(TEMPDIR, "testtar")
+SPACE = b" "
sha256_regtype = (
"e09e4bc8b3c9d9177e77256353b36c159f5f040531bbd4b024a8f9b9196c71ce"
@@ -4386,6 +4387,161 @@
ar.extractall(self.testdir, filter='fully_trusted')
+class OffsetValidationTests(unittest.TestCase):
+ tarname = tmpname
+ invalid_posix_header = (
+ # name: 100 bytes
+ tarfile.NUL * tarfile.LENGTH_NAME
+ # mode, space, null terminator: 8 bytes
+ + b"000755" + SPACE + tarfile.NUL
+ # uid, space, null terminator: 8 bytes
+ + b"000001" + SPACE + tarfile.NUL
+ # gid, space, null terminator: 8 bytes
+ + b"000001" + SPACE + tarfile.NUL
+ # size, space: 12 bytes
+ + b"\xff" * 11 + SPACE
+ # mtime, space: 12 bytes
+ + tarfile.NUL * 11 + SPACE
+ # chksum: 8 bytes
+ + b"0011407" + tarfile.NUL
+ # type: 1 byte
+ + tarfile.REGTYPE
+ # linkname: 100 bytes
+ + tarfile.NUL * tarfile.LENGTH_LINK
+ # magic: 6 bytes, version: 2 bytes
+ + tarfile.POSIX_MAGIC
+ # uname: 32 bytes
+ + tarfile.NUL * 32
+ # gname: 32 bytes
+ + tarfile.NUL * 32
+ # devmajor, space, null terminator: 8 bytes
+ + tarfile.NUL * 6 + SPACE + tarfile.NUL
+ # devminor, space, null terminator: 8 bytes
+ + tarfile.NUL * 6 + SPACE + tarfile.NUL
+ # prefix: 155 bytes
+ + tarfile.NUL * tarfile.LENGTH_PREFIX
+ # padding: 12 bytes
+ + tarfile.NUL * 12
+ )
+ invalid_gnu_header = (
+ # name: 100 bytes
+ tarfile.NUL * tarfile.LENGTH_NAME
+ # mode, null terminator: 8 bytes
+ + b"0000755" + tarfile.NUL
+ # uid, null terminator: 8 bytes
+ + b"0000001" + tarfile.NUL
+ # gid, space, null terminator: 8 bytes
+ + b"0000001" + tarfile.NUL
+ # size, space: 12 bytes
+ + b"\xff" * 11 + SPACE
+ # mtime, space: 12 bytes
+ + tarfile.NUL * 11 + SPACE
+ # chksum: 8 bytes
+ + b"0011327" + tarfile.NUL
+ # type: 1 byte
+ + tarfile.REGTYPE
+ # linkname: 100 bytes
+ + tarfile.NUL * tarfile.LENGTH_LINK
+ # magic: 8 bytes
+ + tarfile.GNU_MAGIC
+ # uname: 32 bytes
+ + tarfile.NUL * 32
+ # gname: 32 bytes
+ + tarfile.NUL * 32
+ # devmajor, null terminator: 8 bytes
+ + tarfile.NUL * 8
+ # devminor, null terminator: 8 bytes
+ + tarfile.NUL * 8
+ # padding: 167 bytes
+ + tarfile.NUL * 167
+ )
+ invalid_v7_header = (
+ # name: 100 bytes
+ tarfile.NUL * tarfile.LENGTH_NAME
+ # mode, space, null terminator: 8 bytes
+ + b"000755" + SPACE + tarfile.NUL
+ # uid, space, null terminator: 8 bytes
+ + b"000001" + SPACE + tarfile.NUL
+ # gid, space, null terminator: 8 bytes
+ + b"000001" + SPACE + tarfile.NUL
+ # size, space: 12 bytes
+ + b"\xff" * 11 + SPACE
+ # mtime, space: 12 bytes
+ + tarfile.NUL * 11 + SPACE
+ # chksum: 8 bytes
+ + b"0010070" + tarfile.NUL
+ # type: 1 byte
+ + tarfile.REGTYPE
+ # linkname: 100 bytes
+ + tarfile.NUL * tarfile.LENGTH_LINK
+ # padding: 255 bytes
+ + tarfile.NUL * 255
+ )
+ valid_gnu_header = tarfile.TarInfo("filename").tobuf(tarfile.GNU_FORMAT)
+ data_block = b"\xff" * tarfile.BLOCKSIZE
+
+ def _write_buffer(self, buffer):
+ with open(self.tarname, "wb") as f:
+ f.write(buffer)
+
+ def _get_members(self, ignore_zeros=None):
+ with open(self.tarname, "rb") as f:
+ with tarfile.open(
+ mode="r", fileobj=f, ignore_zeros=ignore_zeros
+ ) as tar:
+ return tar.getmembers()
+
+ def _assert_raises_read_error_exception(self):
+ with self.assertRaisesRegex(
+ tarfile.ReadError, "file could not be opened successfully"
+ ):
+ self._get_members()
+
+ def test_invalid_offset_header_validations(self):
+ for tar_format, invalid_header in (
+ ("posix", self.invalid_posix_header),
+ ("gnu", self.invalid_gnu_header),
+ ("v7", self.invalid_v7_header),
+ ):
+ with self.subTest(format=tar_format):
+ self._write_buffer(invalid_header)
+ self._assert_raises_read_error_exception()
+
+ def test_early_stop_at_invalid_offset_header(self):
+ buffer = self.valid_gnu_header + self.invalid_gnu_header + self.valid_gnu_header
+ self._write_buffer(buffer)
+ members = self._get_members()
+ self.assertEqual(len(members), 1)
+ self.assertEqual(members[0].name, "filename")
+ self.assertEqual(members[0].offset, 0)
+
+ def test_ignore_invalid_archive(self):
+ # 3 invalid headers with their respective data
+ buffer = (self.invalid_gnu_header + self.data_block) * 3
+ self._write_buffer(buffer)
+ members = self._get_members(ignore_zeros=True)
+ self.assertEqual(len(members), 0)
+
+ def test_ignore_invalid_offset_headers(self):
+ for first_block, second_block, expected_offset in (
+ (
+ (self.valid_gnu_header),
+ (self.invalid_gnu_header + self.data_block),
+ 0,
+ ),
+ (
+ (self.invalid_gnu_header + self.data_block),
+ (self.valid_gnu_header),
+ 1024,
+ ),
+ ):
+ self._write_buffer(first_block + second_block)
+ members = self._get_members(ignore_zeros=True)
+ self.assertEqual(len(members), 1)
+ self.assertEqual(members[0].name, "filename")
+ self.assertEqual(members[0].offset, expected_offset)
+
+
def setUpModule():
os_helper.unlink(TEMPDIR)
os.makedirs(TEMPDIR)
Index: Python-3.11.13/Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ Python-3.11.13/Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst 2025-08-01 22:21:33.122108946 +0200
@@ -0,0 +1,3 @@
+:mod:`tarfile` now validates archives to ensure member offsets are
+non-negative. (Contributed by Alexander Enrique Urieles Nieto in
+:gh:`130577`.)

BIN
Python-3.11.13.tar.xz (Stored with Git LFS)

Binary file not shown.

View File

@@ -1 +0,0 @@
{"mediaType": "application/vnd.dev.sigstore.bundle.v0.3+json", "verificationMaterial": {"certificate": {"rawBytes": "MIICzjCCAlSgAwIBAgIUfnOGm4U1QCsCXWiDvPy5Tgni2HUwCgYIKoZIzj0EAwMwNzEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MR4wHAYDVQQDExVzaWdzdG9yZS1pbnRlcm1lZGlhdGUwHhcNMjUwNjAzMTkyNzM1WhcNMjUwNjAzMTkzNzM1WjAAMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEpTsf/wrCdu4Domf4WOtO4CLkj51wmj4iesYv5N6DYhghPjqQFwGYI9gFc/WX6QMIWh5YHU2NGxrmM7KfbAYzz6OCAXMwggFvMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUkQ2/O4Fivj1bTq7NTQczm1RdtYAwHwYDVR0jBBgwFoAU39Ppz1YkEZb5qNjpKFWixi4YZD8wIgYDVR0RAQH/BBgwFoEUcGFibG9nc2FsQHB5dGhvbi5vcmcwKQYKKwYBBAGDvzABAQQbaHR0cHM6Ly9hY2NvdW50cy5nb29nbGUuY29tMCsGCisGAQQBg78wAQgEHQwbaHR0cHM6Ly9hY2NvdW50cy5nb29nbGUuY29tMIGLBgorBgEEAdZ5AgQCBH0EewB5AHcA3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4AAAGXN0NwGwAABAMASDBGAiEA6pD8PjS+5z2SQre/NS/wOdFSjVMsxvtfF6A1jg+1T3YCIQDC44S/Z3c0dNddM7EkE+A3j7Vft3hqRUoFkNe4U6g5qTAKBggqhkjOPQQDAwNoADBlAjA6lBI2r3KCZFc+2affpH3S3Xj3gMOKh8Lr5Z7TgkGp3Q6QsnExGmJJ0leXhqH6rQkCMQDfxk/6DyhbO7KTrIUfmrbZoa7dV75cresJS69Xk67XN57qsqY52DZj9o4fbUIw4ro="}, "tlogEntries": [{"logIndex": "228953871", "logId": {"keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0="}, "kindVersion": {"kind": "hashedrekord", "version": "0.0.1"}, "integratedTime": "1748978856", "inclusionPromise": {"signedEntryTimestamp": "MEUCIQC9nXmfcRqyOL2Zmw1zI7+kulTbmDE3Yfzew81mXJGU4QIgF8Uhdg2uzttSA6erOuEchX68PCyJ0cVFHE0XJX2+ZfE="}, "inclusionProof": {"logIndex": "107049609", "rootHash": "Ilofw5POqC/C3zqfrdMQP1DyhNW+UfB9fHdjrbK6qaM=", "treeSize": "107049610", "hashes": ["AcD1iyjU7nuIPqAq29ynz7PEdq6zPXglj6e2tkH+/do=", "1BNDCN01B3dbUo/TfLaQgKIYTvPyrkcrHKd69GxuF2E=", "t59A0CV2pHM2S9AgZgcEA6FbXhgNZGo0jMRIXHiqsJ0=", "bCrkgWpJ8MBic+mIfCRsKi+5XAMqgM8Lc6G0LLfzZ7M=", "4iwdOrGkcqdN0qqZUx/gv8a8qpLMqVj8aXRVmhQ558c=", "mAX/zvx1jR0ujLtDApsQpHyxmoDGidClHMOn0BX1aQA=", "u5LKLBPTYgXZg0fBi6/8LuEeNy3EBAxJF0AkkB4Co6E=", "SPUVncwJRVX/n/RICCYqLpAzraqx7S0eMdXRr1RLRgg=", "uEJFtwcGQJMd9kjQhkXb7gl2WD3WMElCc15uDFvFGxs=", "VdOKzpQhJlpXgijzXANf/hNlje1G/N1kUuVnKNskkso=", "mta5fH/gFwxJ/0fT8yGpn3sFCY0G1RY555Iflm0LInM=", "7v8qPHNDLerpduaMx06eb/MwgoQwczTn/cYGKX/9wZ4="], "checkpoint": {"envelope": "rekor.sigstore.dev - 1193050959916656506\n107049610\nIlofw5POqC/C3zqfrdMQP1DyhNW+UfB9fHdjrbK6qaM=\n\n\u2014 rekor.sigstore.dev wNI9ajBGAiEAjtzTnsnrGx0G3Dg99s89cPUh6EA+cxkicQ9j4qYU60wCIQCKcAL4kdakbq2JrBVgk7bRNf3FoJRrEI6SCjv16f7Crg==\n"}}, "canonicalizedBody": "eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiaGFzaGVkcmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiI4ZmI1ZjlmYmM3NjA5ZmE4MjJjYjMxNTQ5ODg0NTc1ZGI3ZmQ5NjU3Y2JmZmI4OTUxMGI1ZDc5NzU5NjNhODNhIn19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FVUNJUUM5Q1JZRjNSWGUzdDNxQlBJd2UrR3pMMTJCOXVLTjIrRFpWa2JjZW1FTS93SWdPMDFKaVhnbUJxZEN5RVhoM05JUEt5QlRBb2hpcjZHTkhZdXhiSUxKNDlRPSIsInB1YmxpY0tleSI6eyJjb250ZW50IjoiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTjZha05EUVd4VFowRjNTVUpCWjBsVlptNVBSMjAwVlRGUlEzTkRXRmRwUkhaUWVUVlVaMjVwTWtoVmQwTm5XVWxMYjFwSmVtb3dSVUYzVFhjS1RucEZWazFDVFVkQk1WVkZRMmhOVFdNeWJHNWpNMUoyWTIxVmRWcEhWakpOVWpSM1NFRlpSRlpSVVVSRmVGWjZZVmRrZW1SSE9YbGFVekZ3WW01U2JBcGpiVEZzV2tkc2FHUkhWWGRJYUdOT1RXcFZkMDVxUVhwTlZHdDVUbnBOTVZkb1kwNU5hbFYzVG1wQmVrMVVhM3BPZWsweFYycEJRVTFHYTNkRmQxbElDa3R2V2tsNmFqQkRRVkZaU1V0dldrbDZhakJFUVZGalJGRm5RVVZ3VkhObUwzZHlRMlIxTkVSdmJXWTBWMDkwVHpSRFRHdHFOVEYzYldvMGFXVnpXWFlLTlU0MlJGbG9aMmhRYW5GUlJuZEhXVWs1WjBaakwxZFlObEZOU1Zkb05WbElWVEpPUjNoeWJVMDNTMlppUVZsNmVqWlBRMEZZVFhkblowWjJUVUUwUndwQk1WVmtSSGRGUWk5M1VVVkJkMGxJWjBSQlZFSm5UbFpJVTFWRlJFUkJTMEpuWjNKQ1owVkdRbEZqUkVGNlFXUkNaMDVXU0ZFMFJVWm5VVlZyVVRJdkNrODBSbWwyYWpGaVZIRTNUbFJSWTNwdE1WSmtkRmxCZDBoM1dVUldVakJxUWtKbmQwWnZRVlV6T1ZCd2VqRlphMFZhWWpWeFRtcHdTMFpYYVhocE5Ga0tXa1E0ZDBsbldVUldVakJTUVZGSUwwSkNaM2RHYjBWVlkwZEdhV0pIT1c1ak1rWnpVVWhDTldSSGFIWmlhVFYyWTIxamQwdFJXVXRMZDFsQ1FrRkhSQXAyZWtGQ1FWRlJZbUZJVWpCalNFMDJUSGs1YUZreVRuWmtWelV3WTNrMWJtSXlPVzVpUjFWMVdUSTVkRTFEYzBkRGFYTkhRVkZSUW1jM09IZEJVV2RGQ2toUmQySmhTRkl3WTBoTk5reDVPV2haTWs1MlpGYzFNR041Tlc1aU1qbHVZa2RWZFZreU9YUk5TVWRNUW1kdmNrSm5SVVZCWkZvMVFXZFJRMEpJTUVVS1pYZENOVUZJWTBFelZEQjNZWE5pU0VWVVNtcEhValJqYlZkak0wRnhTa3RZY21wbFVFc3pMMmcwY0hsblF6aHdOMjgwUVVGQlIxaE9NRTUzUjNkQlFRcENRVTFCVTBSQ1IwRnBSVUUyY0VRNFVHcFRLelY2TWxOUmNtVXZUbE12ZDA5a1JsTnFWazF6ZUhaMFprWTJRVEZxWnlzeFZETlpRMGxSUkVNME5GTXZDbG96WXpCa1RtUmtUVGRGYTBVclFUTnFOMVptZEROb2NWSlZiMFpyVG1VMFZUWm5OWEZVUVV0Q1oyZHhhR3RxVDFCUlVVUkJkMDV2UVVSQ2JFRnFRVFlLYkVKSk1uSXpTME5hUm1Nck1tRm1abkJJTTFNeldHb3paMDFQUzJnNFRISTFXamRVWjJ0SGNETlJObEZ6YmtWNFIyMUtTakJzWlZob2NVZzJjbEZyUXdwTlVVUm1lR3N2TmtSNWFHSlBOMHRVY2tsVlptMXlZbHB2WVRka1ZqYzFZM0psYzBwVE5qbFlhelkzV0U0MU4zRnpjVmsxTWtSYWFqbHZOR1ppVlVsM0NqUnliejBLTFMwdExTMUZUa1FnUTBWU1ZFbEdTVU5CVkVVdExTMHRMUW89In19fX0="}], "timestampVerificationData": {}}, "messageSignature": {"messageDigest": {"algorithm": "SHA2_256", "digest": "j7X5+8dgn6giyzFUmIRXXbf9llfL/7iVELXXl1ljqDo="}, "signature": "MEUCIQC9CRYF3RXe3t3qBPIwe+GzL12B9uKN2+DZVkbcemEM/wIgO01JiXgmBqdCyEXh3NIPKyBTAohir6GNHYuxbILJ49Q="}}

3
Python-3.11.14.tar.xz Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8d3ed8ec5c88c1c95f5e558612a725450d2452813ddad5e58fdb1a53b1209b78
size 20326860

File diff suppressed because one or more lines are too long

View File

@@ -1,3 +1,62 @@
-------------------------------------------------------------------
Wed Oct 15 08:52:35 UTC 2025 - Daniel Garcia <daniel.garcia@suse.com>
- Update to 3.11.14:
- Security
- gh-139700: Check consistency of the zip64 end of central
directory record. Support records with “zip64 extensible data”
if there are no bytes prepended to the ZIP file.
- gh-139400: xml.parsers.expat: Make sure that parent Expat
parsers are only garbage-collected once they are no longer
referenced by subparsers created by
ExternalEntityParserCreate(). Patch by Sebastian Pipping.
- gh-135661: Fix parsing start and end tags in
html.parser.HTMLParser according to the HTML5 standard.
* Whitespaces no longer accepted between </ and the tag name. E.g.
</ script> does not end the script section.
* Vertical tabulation (\v) and non-ASCII whitespaces no longer
recognized as whitespaces. The only whitespaces are \t\n\r\f and
space.
* Null character (U+0000) no longer ends the tag name.
* Attributes and slashes after the tag name in end tags are now
ignored, instead of terminating after the first > in quoted
attribute value. E.g. </script/foo=">"/>.
* Multiple slashes and whitespaces between the last attribute and
closing > are now ignored in both start and end tags. E.g. <a
foo=bar/ //>.
* Multiple = between attribute name and value are no longer
collapsed. E.g. <a foo==bar> produces attribute “foo” with value
“=bar”.
- gh-135661: Fix CDATA section parsing in html.parser.HTMLParser
according to the HTML5 standard: ] ]> and ]] > no longer end the
CDATA section. Add private method _set_support_cdata() which can
be used to specify how to parse <[CDATA[ — as a CDATA section in
foreign content (SVG or MathML) or as a bogus comment in the
HTML namespace.
- gh-102555: Fix comment parsing in html.parser.HTMLParser
according to the HTML5 standard. --!> now ends the comment. -- >
no longer ends the comment. Support abnormally ended empty
comments <--> and <--->.
- gh-135462: Fix quadratic complexity in processing specially
crafted input in html.parser.HTMLParser. End-of-file errors are
now handled according to the HTML5 specs comments and
declarations are automatically closed, tags are ignored.
- gh-118350: Fix support of escapable raw text mode (elements
“textarea” and “title”) in html.parser.HTMLParser.
- gh-86155: html.parser.HTMLParser.close() no longer loses data
when the <script> tag is not closed. Patch by Waylan Limberg.
- Library
- gh-139312: Upgrade bundled libexpat to 2.7.3
- gh-138998: Update bundled libexpat to 2.7.2
- gh-130577: tarfile now validates archives to ensure member
offsets are non-negative. (Contributed by Alexander Enrique
Urieles Nieto in gh-130577.)
- gh-135374: Update the bundled copy of setuptools to 79.0.1.
- Drop upstreamed patches:
- CVE-2025-8194-tarfile-no-neg-offsets.patch
- CVE-2025-6069-quad-complex-HTMLParser.patch
------------------------------------------------------------------- -------------------------------------------------------------------
Mon Sep 29 06:52:07 UTC 2025 - Daniel Garcia <daniel.garcia@suse.com> Mon Sep 29 06:52:07 UTC 2025 - Daniel Garcia <daniel.garcia@suse.com>

View File

@@ -107,7 +107,7 @@
# _md5.cpython-38m-x86_64-linux-gnu.so # _md5.cpython-38m-x86_64-linux-gnu.so
%define dynlib() %{sitedir}/lib-dynload/%{1}.cpython-%{abi_tag}-%{archname}-%{_os}%{?_gnu}%{?armsuffix}.so %define dynlib() %{sitedir}/lib-dynload/%{1}.cpython-%{abi_tag}-%{archname}-%{_os}%{?_gnu}%{?armsuffix}.so
Name: %{python_pkg_name}%{psuffix} Name: %{python_pkg_name}%{psuffix}
Version: 3.11.13 Version: 3.11.14
Release: 0 Release: 0
Summary: Python 3 Interpreter Summary: Python 3 Interpreter
License: Python-2.0 License: Python-2.0
@@ -186,14 +186,8 @@ Patch19: bso1227999-reproducible-builds.patch
Patch22: gh120226-fix-sendfile-test-kernel-610.patch Patch22: gh120226-fix-sendfile-test-kernel-610.patch
# PATCH-FIX-UPSTREAM Add platform triplets for 64-bit LoongArch gh#python/cpython#30939 glaubitz@suse.com # PATCH-FIX-UPSTREAM Add platform triplets for 64-bit LoongArch gh#python/cpython#30939 glaubitz@suse.com
Patch24: add-loongarch64-support.patch Patch24: add-loongarch64-support.patch
# PATCH-FIX-UPSTREAM CVE-2025-6069-quad-complex-HTMLParser.patch bsc#1244705 mcepl@suse.com
# avoid quadratic complexity when processing malformed inputs with HTMLParser
Patch25: CVE-2025-6069-quad-complex-HTMLParser.patch
# PATCH-FIX-UPSTREAM CVE-2025-8194-tarfile-no-neg-offsets.patch bsc#1247249 mcepl@suse.com
# tarfile now validates archives to ensure member offsets are non-negative
Patch26: CVE-2025-8194-tarfile-no-neg-offsets.patch
# PATCH-FIX-OPENSUSE gh139257-Support-docutils-0.22.patch gh#python/cpython#139257 daniel.garcia@suse.com # PATCH-FIX-OPENSUSE gh139257-Support-docutils-0.22.patch gh#python/cpython#139257 daniel.garcia@suse.com
Patch27: gh139257-Support-docutils-0.22.patch Patch25: gh139257-Support-docutils-0.22.patch
BuildRequires: autoconf-archive BuildRequires: autoconf-archive
BuildRequires: automake BuildRequires: automake
BuildRequires: crypto-policies-scripts BuildRequires: crypto-policies-scripts