forked from pool/python-Twisted
* 24.7.0.rc2 fixed an unreleased regression caused by PR 12109. (#12279) * twisted.web.util.redirectTo now HTML-escapes the provided URL in the fallback response body it returns (GHSA-cf56-g6w6-pqq2, CVE-2024-41810). (#9839) * The HTTP 1.0 and 1.1 server provided by twisted.web could process pipelined HTTP requests out-of-order, possibly resulting in information disclosure (CVE-2024-41671/GHSA-c8m8-j448-xjx7) (#12248) * twisted.protocols.ftp now supports the IPv6 extensions defined in RFC 2428. (#9645) * twisted.internet.defer.inlineCallbacks can now yield a coroutine. (#9972) * twisted.python._shellcomp.ZshArgumentsGenerator was updated for Python 3.13. (#12065) * twisted.web.wsgi request environment now contains the peer port number as `REMOTE_PORT`. (#12096) * twisted.internet.defer.Deferred.callback() and twisted.internet.defer.Deferred.addCallbacks() no longer use `assert` to check the type of the arguments. You should now use type checking to validate your code. These changes were done to reduce the CPU usage. (#12122) * Added two new methods, twisted.logger.Logger.failuresHandled and twisted.logger.Logger.\ failureHandler, which allow for more concise and convenient handling of exceptions when dispatching out to application code. The former can arbitrarily customize failure handling at the call site, and the latter can be used for performance-sensitive cases where no additional information needs to be logged. (#12188) * twisted.internet.defer.Deferred.addCallback now runs about 10% faster. (#12223) * twisted.internet.defer.Deferred error handling is now faster, taking 40% less time to run. (#12227) * twisted.internet.ssl.Certificate.__repr__ can now handle certificates without a common name (CN) in the certificate itself or the signing CA. (#5851) * Type annotations have been added to twisted.conch.interfaces.IKnownHostEntry and its implementations, twisted.conch.client.knownhosts.PlainHost and twisted.conch.client.knownhosts.HashedHost, correcting a variety of type confusion issues throughout the conch client code. (#9713) * twisted.python.failure.Failure once again utilizes the custom pickling logic it used to in the past. (#12112) * twisted.conch.client.knownhosts.KnownHostsFile.verifyHostKey no longer logs OBS-URL: https://build.opensuse.org/package/show/devel:languages:python/python-Twisted?expand=0&rev=154
243 lines
9.1 KiB
Diff
243 lines
9.1 KiB
Diff
Index: twisted-24.3.0/src/twisted/web/http.py
|
|
===================================================================
|
|
--- twisted-24.3.0.orig/src/twisted/web/http.py
|
|
+++ twisted-24.3.0/src/twisted/web/http.py
|
|
@@ -1973,16 +1973,21 @@ class _ChunkedTransferDecoder:
|
|
@returns: C{False}, as there is either insufficient data to continue,
|
|
or no data remains.
|
|
"""
|
|
- if (
|
|
- self._receivedTrailerHeadersSize + len(self._buffer)
|
|
- > self._maxTrailerHeadersSize
|
|
- ):
|
|
- raise _MalformedChunkedDataError("Trailer headers data is too long.")
|
|
-
|
|
eolIndex = self._buffer.find(b"\r\n", self._start)
|
|
|
|
if eolIndex == -1:
|
|
# Still no end of network line marker found.
|
|
+ #
|
|
+ # Check if we've run up against the trailer size limit: if the next
|
|
+ # read contains the terminating CRLF then we'll have this many bytes
|
|
+ # of trailers (including the CRLFs).
|
|
+ minTrailerSize = (
|
|
+ self._receivedTrailerHeadersSize
|
|
+ + len(self._buffer)
|
|
+ + (1 if self._buffer.endswith(b"\r") else 2)
|
|
+ )
|
|
+ if minTrailerSize > self._maxTrailerHeadersSize:
|
|
+ raise _MalformedChunkedDataError("Trailer headers data is too long.")
|
|
# Continue processing more data.
|
|
return False
|
|
|
|
@@ -1992,6 +1997,8 @@ class _ChunkedTransferDecoder:
|
|
del self._buffer[0 : eolIndex + 2]
|
|
self._start = 0
|
|
self._receivedTrailerHeadersSize += eolIndex + 2
|
|
+ if self._receivedTrailerHeadersSize > self._maxTrailerHeadersSize:
|
|
+ raise _MalformedChunkedDataError("Trailer headers data is too long.")
|
|
return True
|
|
|
|
# eolIndex in this part of code is equal to 0
|
|
@@ -2315,8 +2322,8 @@ class HTTPChannel(basic.LineReceiver, po
|
|
self.__header = line
|
|
|
|
def _finishRequestBody(self, data):
|
|
- self.allContentReceived()
|
|
self._dataBuffer.append(data)
|
|
+ self.allContentReceived()
|
|
|
|
def _maybeChooseTransferDecoder(self, header, data):
|
|
"""
|
|
Index: twisted-24.3.0/src/twisted/web/newsfragments/12248.bugfix
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ twisted-24.3.0/src/twisted/web/newsfragments/12248.bugfix
|
|
@@ -0,0 +1 @@
|
|
+The HTTP 1.0 and 1.1 server provided by twisted.web could process pipelined HTTP requests out-of-order, possibly resulting in information disclosure (CVE-2024-41671/GHSA-c8m8-j448-xjx7)
|
|
Index: twisted-24.3.0/src/twisted/web/test/test_http.py
|
|
===================================================================
|
|
--- twisted-24.3.0.orig/src/twisted/web/test/test_http.py
|
|
+++ twisted-24.3.0/src/twisted/web/test/test_http.py
|
|
@@ -135,7 +135,7 @@ class DummyHTTPHandler(http.Request):
|
|
data = self.content.read()
|
|
length = self.getHeader(b"content-length")
|
|
if length is None:
|
|
- length = networkString(str(length))
|
|
+ length = str(length).encode()
|
|
request = b"'''\n" + length + b"\n" + data + b"'''\n"
|
|
self.setResponseCode(200)
|
|
self.setHeader(b"Request", self.uri)
|
|
@@ -563,17 +563,23 @@ class HTTP0_9Tests(HTTP1_0Tests):
|
|
|
|
class PipeliningBodyTests(unittest.TestCase, ResponseTestMixin):
|
|
"""
|
|
- Tests that multiple pipelined requests with bodies are correctly buffered.
|
|
+ Pipelined requests get buffered and executed in the order received,
|
|
+ not processed in parallel.
|
|
"""
|
|
|
|
requests = (
|
|
b"POST / HTTP/1.1\r\n"
|
|
b"Content-Length: 10\r\n"
|
|
b"\r\n"
|
|
- b"0123456789POST / HTTP/1.1\r\n"
|
|
- b"Content-Length: 10\r\n"
|
|
- b"\r\n"
|
|
b"0123456789"
|
|
+ # Chunk encoded request.
|
|
+ b"POST / HTTP/1.1\r\n"
|
|
+ b"Transfer-Encoding: chunked\r\n"
|
|
+ b"\r\n"
|
|
+ b"a\r\n"
|
|
+ b"0123456789\r\n"
|
|
+ b"0\r\n"
|
|
+ b"\r\n"
|
|
)
|
|
|
|
expectedResponses = [
|
|
@@ -590,14 +596,16 @@ class PipeliningBodyTests(unittest.TestC
|
|
b"Request: /",
|
|
b"Command: POST",
|
|
b"Version: HTTP/1.1",
|
|
- b"Content-Length: 21",
|
|
- b"'''\n10\n0123456789'''\n",
|
|
+ b"Content-Length: 23",
|
|
+ b"'''\nNone\n0123456789'''\n",
|
|
),
|
|
]
|
|
|
|
- def test_noPipelining(self):
|
|
+ def test_stepwiseTinyTube(self):
|
|
"""
|
|
- Test that pipelined requests get buffered, not processed in parallel.
|
|
+ Imitate a slow connection that delivers one byte at a time.
|
|
+ The request handler (L{DelayedHTTPHandler}) is puppeted to
|
|
+ step through the handling of each request.
|
|
"""
|
|
b = StringTransport()
|
|
a = http.HTTPChannel()
|
|
@@ -606,10 +614,9 @@ class PipeliningBodyTests(unittest.TestC
|
|
# one byte at a time, to stress it.
|
|
for byte in iterbytes(self.requests):
|
|
a.dataReceived(byte)
|
|
- value = b.value()
|
|
|
|
# So far only one request should have been dispatched.
|
|
- self.assertEqual(value, b"")
|
|
+ self.assertEqual(b.value(), b"")
|
|
self.assertEqual(1, len(a.requests))
|
|
|
|
# Now, process each request one at a time.
|
|
@@ -618,8 +625,95 @@ class PipeliningBodyTests(unittest.TestC
|
|
request = a.requests[0].original
|
|
request.delayedProcess()
|
|
|
|
- value = b.value()
|
|
- self.assertResponseEquals(value, self.expectedResponses)
|
|
+ self.assertResponseEquals(b.value(), self.expectedResponses)
|
|
+
|
|
+ def test_stepwiseDumpTruck(self):
|
|
+ """
|
|
+ Imitate a fast connection where several pipelined
|
|
+ requests arrive in a single read. The request handler
|
|
+ (L{DelayedHTTPHandler}) is puppeted to step through the
|
|
+ handling of each request.
|
|
+ """
|
|
+ b = StringTransport()
|
|
+ a = http.HTTPChannel()
|
|
+ a.requestFactory = DelayedHTTPHandlerProxy
|
|
+ a.makeConnection(b)
|
|
+
|
|
+ a.dataReceived(self.requests)
|
|
+
|
|
+ # So far only one request should have been dispatched.
|
|
+ self.assertEqual(b.value(), b"")
|
|
+ self.assertEqual(1, len(a.requests))
|
|
+
|
|
+ # Now, process each request one at a time.
|
|
+ while a.requests:
|
|
+ self.assertEqual(1, len(a.requests))
|
|
+ request = a.requests[0].original
|
|
+ request.delayedProcess()
|
|
+
|
|
+ self.assertResponseEquals(b.value(), self.expectedResponses)
|
|
+
|
|
+ def test_immediateTinyTube(self):
|
|
+ """
|
|
+ Imitate a slow connection that delivers one byte at a time.
|
|
+
|
|
+ (L{DummyHTTPHandler}) immediately responds, but no more
|
|
+ than one
|
|
+ """
|
|
+ b = StringTransport()
|
|
+ a = http.HTTPChannel()
|
|
+ a.requestFactory = DummyHTTPHandlerProxy # "sync"
|
|
+ a.makeConnection(b)
|
|
+
|
|
+ # one byte at a time, to stress it.
|
|
+ for byte in iterbytes(self.requests):
|
|
+ a.dataReceived(byte)
|
|
+ # There is never more than one request dispatched at a time:
|
|
+ self.assertLessEqual(len(a.requests), 1)
|
|
+
|
|
+ self.assertResponseEquals(b.value(), self.expectedResponses)
|
|
+
|
|
+ def test_immediateDumpTruck(self):
|
|
+ """
|
|
+ Imitate a fast connection where several pipelined
|
|
+ requests arrive in a single read. The request handler
|
|
+ (L{DummyHTTPHandler}) immediately responds.
|
|
+
|
|
+ This doesn't check the at-most-one pending request
|
|
+ invariant but exercises otherwise uncovered code paths.
|
|
+ See GHSA-c8m8-j448-xjx7.
|
|
+ """
|
|
+ b = StringTransport()
|
|
+ a = http.HTTPChannel()
|
|
+ a.requestFactory = DummyHTTPHandlerProxy
|
|
+ a.makeConnection(b)
|
|
+
|
|
+ # All bytes at once to ensure there's stuff to buffer.
|
|
+ a.dataReceived(self.requests)
|
|
+
|
|
+ self.assertResponseEquals(b.value(), self.expectedResponses)
|
|
+
|
|
+ def test_immediateABiggerTruck(self):
|
|
+ """
|
|
+ Imitate a fast connection where a so many pipelined
|
|
+ requests arrive in a single read that backpressure is indicated.
|
|
+ The request handler (L{DummyHTTPHandler}) immediately responds.
|
|
+
|
|
+ This doesn't check the at-most-one pending request
|
|
+ invariant but exercises otherwise uncovered code paths.
|
|
+ See GHSA-c8m8-j448-xjx7.
|
|
+
|
|
+ @see: L{http.HTTPChannel._optimisticEagerReadSize}
|
|
+ """
|
|
+ b = StringTransport()
|
|
+ a = http.HTTPChannel()
|
|
+ a.requestFactory = DummyHTTPHandlerProxy
|
|
+ a.makeConnection(b)
|
|
+
|
|
+ overLimitCount = a._optimisticEagerReadSize // len(self.requests) * 10
|
|
+ a.dataReceived(self.requests * overLimitCount)
|
|
+
|
|
+ self.assertResponseEquals(b.value(), self.expectedResponses * overLimitCount)
|
|
|
|
def test_pipeliningReadLimit(self):
|
|
"""
|
|
@@ -1522,7 +1616,11 @@ class ChunkedTransferEncodingTests(unitt
|
|
lambda b: None, # pragma: nocov
|
|
)
|
|
p._maxTrailerHeadersSize = 10
|
|
- p.dataReceived(b"3\r\nabc\r\n0\r\n0123456789")
|
|
+ # 9 bytes are received so far, in 2 packets.
|
|
+ # For now, all is ok.
|
|
+ p.dataReceived(b"3\r\nabc\r\n0\r\n01234567")
|
|
+ p.dataReceived(b"\r")
|
|
+ # Once the 10th byte is received, the processing fails.
|
|
self.assertRaises(
|
|
http._MalformedChunkedDataError,
|
|
p.dataReceived,
|