Sync from SUSE:SLFO:Main python-waitress revision 4938941a88c43ccc513f0ad9bf2849a4

This commit is contained in:
2024-12-13 12:36:37 +01:00
parent 0b9f0f7949
commit 241df4eb88
6 changed files with 53 additions and 745 deletions

View File

@@ -1,334 +0,0 @@
From 6943dcf556610ece2ff3cddb39e59a05ef110661 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sat, 26 Oct 2024 22:10:36 -0600
Subject: [PATCH 1/4] Make DummySock() look more like an actual socket
This forces DummySock() to look like a properly connected socket where
there is a buffer that is read from by the remote, and a buffer that is
written to by the remote.
The local side does the opposite, this way data written by the local
side can be read by the remote without operating on the same buffer.
---
tests/test_channel.py | 57 +++++++++++++++++++++++++++++++++----------
1 file changed, 44 insertions(+), 13 deletions(-)
diff --git a/tests/test_channel.py b/tests/test_channel.py
index 8467ae7a..7d677e91 100644
--- a/tests/test_channel.py
+++ b/tests/test_channel.py
@@ -18,7 +18,7 @@ def _makeOneWithMap(self, adj=None):
map = {}
inst = self._makeOne(sock, "127.0.0.1", adj, map=map)
inst.outbuf_lock = DummyLock()
- return inst, sock, map
+ return inst, sock.local(), map
def test_ctor(self):
inst, _, map = self._makeOneWithMap()
@@ -218,7 +218,7 @@ def test_write_soon_nonempty_byte(self):
def send(_):
return 0
- sock.send = send
+ sock.remote.send = send
wrote = inst.write_soon(b"a")
self.assertEqual(wrote, 1)
@@ -236,7 +236,7 @@ def test_write_soon_filewrapper(self):
def send(_):
return 0
- sock.send = send
+ sock.remote.send = send
outbufs = inst.outbufs
wrote = inst.write_soon(wrapper)
@@ -270,7 +270,7 @@ def test_write_soon_rotates_outbuf_on_overflow(self):
def send(_):
return 0
- sock.send = send
+ sock.remote.send = send
inst.adj.outbuf_high_watermark = 3
inst.current_outbuf_count = 4
@@ -286,7 +286,7 @@ def test_write_soon_waits_on_backpressure(self):
def send(_):
return 0
- sock.send = send
+ sock.remote.send = send
inst.adj.outbuf_high_watermark = 3
inst.total_outbufs_len = 4
@@ -315,7 +315,7 @@ def send(_):
inst.connected = False
raise Exception()
- sock.send = send
+ sock.remote.send = send
inst.adj.outbuf_high_watermark = 3
inst.total_outbufs_len = 4
@@ -345,7 +345,7 @@ def send(_):
inst.connected = False
raise Exception()
- sock.send = send
+ sock.remote.send = send
wrote = inst.write_soon(b"xyz")
self.assertEqual(wrote, 3)
@@ -376,7 +376,7 @@ def test_handle_write_no_notify_after_flush(self):
inst.total_outbufs_len = len(inst.outbufs[0])
inst.adj.send_bytes = 1
inst.adj.outbuf_high_watermark = 2
- sock.send = lambda x, do_close=True: False
+ sock.remote.send = lambda x, do_close=True: False
inst.will_close = False
inst.last_activity = 0
result = inst.handle_write()
@@ -400,7 +400,7 @@ def test__flush_some_full_outbuf_socket_returns_nonzero(self):
def test__flush_some_full_outbuf_socket_returns_zero(self):
inst, sock, map = self._makeOneWithMap()
- sock.send = lambda x: False
+ sock.remote.send = lambda x: False
inst.outbufs[0].append(b"abc")
inst.total_outbufs_len = sum(len(x) for x in inst.outbufs)
result = inst._flush_some()
@@ -907,7 +907,8 @@ class DummySock:
closed = False
def __init__(self):
- self.sent = b""
+ self.local_sent = b""
+ self.remote_sent = b""
def setblocking(self, *arg):
self.blocking = True
@@ -925,14 +926,44 @@ def close(self):
self.closed = True
def send(self, data):
- self.sent += data
+ self.remote_sent += data
return len(data)
def recv(self, buffer_size):
- result = self.sent[:buffer_size]
- self.sent = self.sent[buffer_size:]
+ result = self.local_sent[:buffer_size]
+ self.local_sent = self.local_sent[buffer_size:]
return result
+ def local(self):
+ outer = self
+
+ class LocalDummySock:
+ def send(self, data):
+ outer.local_sent += data
+ return len(data)
+
+ def recv(self, buffer_size):
+ result = outer.remote_sent[:buffer_size]
+ outer.remote_sent = outer.remote_sent[buffer_size:]
+ return result
+
+ def close(self):
+ outer.closed = True
+
+ @property
+ def sent(self):
+ return outer.remote_sent
+
+ @property
+ def closed(self):
+ return outer.closed
+
+ @property
+ def remote(self):
+ return outer
+
+ return LocalDummySock()
+
class DummyLock:
notified = False
From 7e7f11e61d358ab1cb853fcadf2b46b1f00f5993 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sat, 26 Oct 2024 22:12:14 -0600
Subject: [PATCH 2/4] Add a new test to validate the lookahead race condition
---
tests/test_channel.py | 55 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 54 insertions(+), 1 deletion(-)
diff --git a/tests/test_channel.py b/tests/test_channel.py
index 7d677e91..d798091d 100644
--- a/tests/test_channel.py
+++ b/tests/test_channel.py
@@ -805,11 +805,12 @@ def app_check_disconnect(self, environ, start_response):
)
return [body]
- def _make_app_with_lookahead(self):
+ def _make_app_with_lookahead(self, recv_bytes=8192):
"""
Setup a channel with lookahead and store it and the socket in self
"""
adj = DummyAdjustments()
+ adj.recv_bytes = recv_bytes
adj.channel_request_lookahead = 5
channel, sock, map = self._makeOneWithMap(adj=adj)
channel.server.application = self.app_check_disconnect
@@ -901,6 +902,58 @@ def test_lookahead_continue(self):
self.assertEqual(data.split("\r\n")[-1], "finished")
self.assertEqual(self.request_body, b"x")
+ def test_lookahead_bad_request_drop_extra_data(self):
+ """
+ Send two requests, the first one being bad, split on the recv_bytes
+ limit, then emulate a race that could happen whereby we read data from
+ the socket while the service thread is cleaning up due to an error
+ processing the request.
+ """
+
+ invalid_request = [
+ "GET / HTTP/1.1",
+ "Host: localhost:8080",
+ "Content-length: -1",
+ "",
+ ]
+
+ invalid_request_len = len("".join([x + "\r\n" for x in invalid_request]))
+
+ second_request = [
+ "POST / HTTP/1.1",
+ "Host: localhost:8080",
+ "Content-Length: 1",
+ "",
+ "x",
+ ]
+
+ full_request = invalid_request + second_request
+
+ self._make_app_with_lookahead(recv_bytes=invalid_request_len)
+ self._send(*full_request)
+ self.channel.handle_read()
+ self.assertEqual(len(self.channel.requests), 1)
+ self.channel.server.tasks[0].service()
+ self.assertTrue(self.channel.close_when_flushed)
+ # Read all of the next request
+ self.channel.handle_read()
+ self.channel.handle_read()
+ # Validate that there is no more data to be read
+ self.assertEqual(self.sock.remote.local_sent, b"")
+ # Validate that we dropped the data from the second read, and did not
+ # create a new request
+ self.assertEqual(len(self.channel.requests), 0)
+ data = self.sock.recv(256).decode("ascii")
+ self.assertFalse(self.channel.readable())
+ self.assertTrue(self.channel.writable())
+
+ # Handle the write, which will close the socket
+ self.channel.handle_write()
+ self.assertTrue(self.sock.closed)
+
+ data = self.sock.recv(256)
+ self.assertEqual(len(data), 0)
+
class DummySock:
blocking = False
From f4ba1c260cf17156b582c6252496213ddc96b591 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sat, 26 Oct 2024 22:13:08 -0600
Subject: [PATCH 3/4] Fix a race condition on recv_bytes boundary when request
is invalid
A remote client may send a request that is exactly recv_bytes long,
followed by a secondary request using HTTP pipelining.
When request lookahead is disabled (default) we won't read any more
requests, and when the first request fails due to a parsing error, we
simply close the connection.
However when request lookahead is enabled, it is possible to process and
receive the first request, start sending the error message back to the
client while we read the next request and queue it. This will allow the
secondar request to be serviced by the worker thread while the
connection should be closed.
The fix here checks if we should not have read the data in the first
place (because the conection is going to be torn down) while we hold the
`requests_lock` which means the service thread can't be in the middle of
flipping the `close_when_flushed` flag.
---
src/waitress/channel.py | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/src/waitress/channel.py b/src/waitress/channel.py
index 3860ed51..f4d96776 100644
--- a/src/waitress/channel.py
+++ b/src/waitress/channel.py
@@ -140,7 +140,7 @@ def readable(self):
# 1. We're not already about to close the connection.
# 2. We're not waiting to flush remaining data before closing the
# connection
- # 3. There are not too many tasks already queued
+ # 3. There are not too many tasks already queued (if lookahead is enabled)
# 4. There's no data in the output buffer that needs to be sent
# before we potentially create a new task.
@@ -196,6 +196,15 @@ def received(self, data):
return False
with self.requests_lock:
+ # Don't bother processing anymore data if this connection is about
+ # to close. This may happen if readable() returned True, on the
+ # main thread before the service thread set the close_when_flushed
+ # flag, and we read data but our service thread is attempting to
+ # shut down the connection due to an error. We want to make sure we
+ # do this while holding the request_lock so that we can't race
+ if self.will_close or self.close_when_flushed:
+ return False
+
while data:
if self.request is None:
self.request = self.parser_class(self.adj)
From 810a435f9e9e293bd3446a5ce2df86f59c4e7b1b Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sat, 26 Oct 2024 22:22:32 -0600
Subject: [PATCH 4/4] Add documentation for channel_request_lookahead
---
docs/arguments.rst | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/docs/arguments.rst b/docs/arguments.rst
index 0b6ca458..b8a856aa 100644
--- a/docs/arguments.rst
+++ b/docs/arguments.rst
@@ -314,3 +314,17 @@ url_prefix
be stripped of the prefix.
Default: ``''``
+
+channel_request_lookahead
+ Sets the amount of requests we can continue to read from the socket, while
+ we are processing current requests. The default value won't allow any
+ lookahead, increase it above ``0`` to enable.
+
+ When enabled this inserts a callable ``waitress.client_disconnected`` into
+ the environment that allows the task to check if the client disconnected
+ while waiting for the response at strategic points in the execution and to
+ cancel the operation.
+
+ Default: ``0``
+
+ .. versionadded:: 2.0.0

View File

@@ -1,384 +0,0 @@
From 03cc640fe7106902899f82115c26e37002bca7f1 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:15:51 -0700
Subject: [PATCH 1/6] HTTPChannel is always created from accept, explicitly set
self.connected to True
---
src/waitress/channel.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/src/waitress/channel.py b/src/waitress/channel.py
index eb59dd3f..ea019d3f 100644
--- a/src/waitress/channel.py
+++ b/src/waitress/channel.py
@@ -67,8 +67,7 @@ def __init__(self, server, sock, addr, adj, map=None):
self.outbuf_lock = threading.Condition()
wasyncore.dispatcher.__init__(self, sock, map=map)
-
- # Don't let wasyncore.dispatcher throttle self.addr on us.
+ self.connected = True
self.addr = addr
self.requests = []
From 840aebce1c4c1bfd9036f402c1f5d5a4d2f4a1c2 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:16:48 -0700
Subject: [PATCH 2/6] Assume socket is not connected when passed to
wasyncore.dispatcher
No longer call getpeername() on the remote socket either, as it is not
necessary for any of the places where waitress requires that self.addr
in a subclass of the dispatcher needs it.
This removes a race condition when setting up a HTTPChannel where we
accepted the socket, and know the remote address, yet call getpeername()
again which would have the unintended side effect of potentially setting
self.connected to False because the remote has already shut down part of
the socket.
This issue was uncovered in #418, where the server would go into a hard
loop because self.connected was used in various parts of the code base.
---
src/waitress/wasyncore.py | 16 ----------------
1 file changed, 16 deletions(-)
diff --git a/src/waitress/wasyncore.py b/src/waitress/wasyncore.py
index 86155578..97943085 100644
--- a/src/waitress/wasyncore.py
+++ b/src/waitress/wasyncore.py
@@ -297,22 +297,6 @@ def __init__(self, sock=None, map=None):
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
- self.connected = True
- # The constructor no longer requires that the socket
- # passed be connected.
- try:
- self.addr = sock.getpeername()
- except OSError as err:
- if err.args[0] in (ENOTCONN, EINVAL):
- # To handle the case where we got an unconnected
- # socket.
- self.connected = False
- else:
- # The socket is broken in some unknown way, alert
- # the user and remove it from the map (to prevent
- # polling of broken sockets).
- self.del_channel(map)
- raise
else:
self.socket = None
From 86c680df4e4bdd40c78dec771cddcee059e802c4 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:23:33 -0700
Subject: [PATCH 3/6] Remove test for getpeername()
---
tests/test_wasyncore.py | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/tests/test_wasyncore.py b/tests/test_wasyncore.py
index 55c40191..a8a480a8 100644
--- a/tests/test_wasyncore.py
+++ b/tests/test_wasyncore.py
@@ -1454,17 +1454,6 @@ def _makeOne(self, sock=None, map=None):
return dispatcher(sock=sock, map=map)
- def test_unexpected_getpeername_exc(self):
- sock = dummysocket()
-
- def getpeername():
- raise OSError(errno.EBADF)
-
- map = {}
- sock.getpeername = getpeername
- self.assertRaises(socket.error, self._makeOne, sock=sock, map=map)
- self.assertEqual(map, {})
-
def test___repr__accepting(self):
sock = dummysocket()
map = {}
From 8cba302b1ac08c2874ae179b2af2445e89311bac Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:26:22 -0700
Subject: [PATCH 4/6] Don't exit handle_write early -- even if socket is not
connected
Calling handle_close() multiple times does not hurt anything, and is
safe.
---
src/waitress/channel.py | 6 ------
1 file changed, 6 deletions(-)
diff --git a/src/waitress/channel.py b/src/waitress/channel.py
index ea019d3f..3860ed51 100644
--- a/src/waitress/channel.py
+++ b/src/waitress/channel.py
@@ -91,13 +91,7 @@ def handle_write(self):
# Precondition: there's data in the out buffer to be sent, or
# there's a pending will_close request
- if not self.connected:
- # we dont want to close the channel twice
-
- return
-
# try to flush any pending output
-
if not self.requests:
# 1. There are no running tasks, so we don't need to try to lock
# the outbuf before sending
From 63678e652d912e67621580123c603e37c319d8c4 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:35:39 -0700
Subject: [PATCH 5/6] Remove code not used by waitress from vendored asyncore
---
src/waitress/wasyncore.py | 45 ------------------
tests/test_wasyncore.py | 96 ++++++++-------------------------------
2 files changed, 18 insertions(+), 123 deletions(-)
diff --git a/src/waitress/wasyncore.py b/src/waitress/wasyncore.py
index 97943085..49829f98 100644
--- a/src/waitress/wasyncore.py
+++ b/src/waitress/wasyncore.py
@@ -378,23 +378,6 @@ def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
- def connect(self, address):
- self.connected = False
- self.connecting = True
- err = self.socket.connect_ex(address)
- if (
- err in (EINPROGRESS, EALREADY, EWOULDBLOCK)
- or err == EINVAL
- and os.name == "nt"
- ): # pragma: no cover
- self.addr = address
- return
- if err in (0, EISCONN):
- self.addr = address
- self.handle_connect_event()
- else:
- raise OSError(err, errorcode[err])
-
def accept(self):
# XXX can return either an address pair or None
try:
@@ -556,34 +539,6 @@ def handle_close(self):
self.close()
-# ---------------------------------------------------------------------------
-# adds simple buffered output capability, useful for simple clients.
-# [for more sophisticated usage use asynchat.async_chat]
-# ---------------------------------------------------------------------------
-
-
-class dispatcher_with_send(dispatcher):
- def __init__(self, sock=None, map=None):
- dispatcher.__init__(self, sock, map)
- self.out_buffer = b""
-
- def initiate_send(self):
- num_sent = 0
- num_sent = dispatcher.send(self, self.out_buffer[:65536])
- self.out_buffer = self.out_buffer[num_sent:]
-
- handle_write = initiate_send
-
- def writable(self):
- return (not self.connected) or len(self.out_buffer)
-
- def send(self, data):
- if self.debug: # pragma: no cover
- self.log_info("sending %s" % repr(data))
- self.out_buffer = self.out_buffer + data
- self.initiate_send()
-
-
def close_all(map=None, ignore_all=False):
if map is None: # pragma: no cover
map = socket_map
diff --git a/tests/test_wasyncore.py b/tests/test_wasyncore.py
index a8a480a8..20f68f6e 100644
--- a/tests/test_wasyncore.py
+++ b/tests/test_wasyncore.py
@@ -1,6 +1,7 @@
import _thread as thread
import contextlib
import errno
+from errno import EALREADY, EINPROGRESS, EINVAL, EISCONN, EWOULDBLOCK, errorcode
import functools
import gc
from io import BytesIO
@@ -641,62 +642,6 @@ def test_strerror(self):
self.assertTrue(err != "")
-class dispatcherwithsend_noread(asyncore.dispatcher_with_send): # pragma: no cover
- def readable(self):
- return False
-
- def handle_connect(self):
- pass
-
-
-class DispatcherWithSendTests(unittest.TestCase):
- def setUp(self):
- pass
-
- def tearDown(self):
- asyncore.close_all()
-
- @reap_threads
- def test_send(self):
- evt = threading.Event()
- sock = socket.socket()
- sock.settimeout(3)
- port = bind_port(sock)
-
- cap = BytesIO()
- args = (evt, cap, sock)
- t = threading.Thread(target=capture_server, args=args)
- t.start()
- try:
- # wait a little longer for the server to initialize (it sometimes
- # refuses connections on slow machines without this wait)
- time.sleep(0.2)
-
- data = b"Suppose there isn't a 16-ton weight?"
- d = dispatcherwithsend_noread()
- d.create_socket()
- d.connect((HOST, port))
-
- # give time for socket to connect
- time.sleep(0.1)
-
- d.send(data)
- d.send(data)
- d.send(b"\n")
-
- n = 1000
-
- while d.out_buffer and n > 0: # pragma: no cover
- asyncore.poll()
- n -= 1
-
- evt.wait()
-
- self.assertEqual(cap.getvalue(), data * 2)
- finally:
- join_thread(t, timeout=TIMEOUT)
-
-
@unittest.skipUnless(
hasattr(asyncore, "file_wrapper"), "asyncore.file_wrapper required"
)
@@ -839,6 +784,23 @@ def __init__(self, family, address):
self.create_socket(family)
self.connect(address)
+ def connect(self, address):
+ self.connected = False
+ self.connecting = True
+ err = self.socket.connect_ex(address)
+ if (
+ err in (EINPROGRESS, EALREADY, EWOULDBLOCK)
+ or err == EINVAL
+ and os.name == "nt"
+ ): # pragma: no cover
+ self.addr = address
+ return
+ if err in (0, EISCONN):
+ self.addr = address
+ self.handle_connect_event()
+ else:
+ raise OSError(err, errorcode[err])
+
def handle_connect(self):
pass
@@ -1489,13 +1451,6 @@ def setsockopt(*arg, **kw):
inst.set_reuse_addr()
self.assertTrue(sock.errored)
- def test_connect_raise_socket_error(self):
- sock = dummysocket()
- map = {}
- sock.connect_ex = lambda *arg: 1
- inst = self._makeOne(sock=sock, map=map)
- self.assertRaises(socket.error, inst.connect, 0)
-
def test_accept_raise_TypeError(self):
sock = dummysocket()
map = {}
@@ -1664,21 +1619,6 @@ def test_handle_accepted(self):
self.assertTrue(sock.closed)
-class Test_dispatcher_with_send(unittest.TestCase):
- def _makeOne(self, sock=None, map=None):
- from waitress.wasyncore import dispatcher_with_send
-
- return dispatcher_with_send(sock=sock, map=map)
-
- def test_writable(self):
- sock = dummysocket()
- map = {}
- inst = self._makeOne(sock=sock, map=map)
- inst.out_buffer = b"123"
- inst.connected = True
- self.assertTrue(inst.writable())
-
-
class Test_close_all(unittest.TestCase):
def _callFUT(self, map=None, ignore_all=False):
from waitress.wasyncore import close_all
From 9d99c89ae4aa8449313eea210a5ec9f3994a87b2 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:37:12 -0700
Subject: [PATCH 6/6] When closing the socket, set it to None
This avoids calling close() twice on the same socket if self.close() or
self.handle_close() is called multiple times
---
src/waitress/wasyncore.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/src/waitress/wasyncore.py b/src/waitress/wasyncore.py
index 49829f98..f42ee37c 100644
--- a/src/waitress/wasyncore.py
+++ b/src/waitress/wasyncore.py
@@ -436,6 +436,8 @@ def close(self):
if why.args[0] not in (ENOTCONN, EBADF):
raise
+ self.socket = None
+
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
@@ -486,7 +488,11 @@ def handle_expt_event(self):
# handle_expt_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ err = (
+ self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if self.socket is not None
+ else 1
+ )
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket

View File

@@ -1,9 +1,37 @@
-------------------------------------------------------------------
Mon Nov 25 10:50:13 UTC 2024 - Nico Krapp <nico.krapp@suse.com>
Wed Oct 30 06:49:46 UTC 2024 - Daniel Garcia <daniel.garcia@suse.com>
- Add security patches:
* CVE-2024-49768.patch (bsc#1232556)
* CVE-2024-49769.patch (bsc#1232554)
- Update to 3.0.1 (bsc#1232554, bsc#1232556, CVE-2024-49769, CVE-2024-49768):
* Fix a bug that would lead to Waitress busy looping on select()
on a half-open socket due to a race condition that existed when
creating a new HTTPChannel. See
https://github.com/Pylons/waitress/pull/435,
https://github.com/Pylons/waitress/issues/418 and
https://github.com/Pylons/waitress/security/advisories/GHSA-3f84-rpwh-47g6
* No longer strip the header values before passing them to the
WSGI environ. See https://github.com/Pylons/waitress/pull/434
and https://github.com/Pylons/waitress/issues/432
* Fix a race condition in Waitress when
`channel_request_lookahead` is enabled that could lead to HTTP
request smuggling.
* See https://github.com/Pylons/waitress/security/advisories/GHSA-9298-4cf8-g4wj
-------------------------------------------------------------------
Sun Jun 30 07:59:06 UTC 2024 - Dirk Müller <dmueller@suse.com>
- update to 3.0.0:
* Fixed testing of vendored asyncore code to not rely on
particular naming for errno's.
* HTTP Request methods and versions are now validated to meet
the HTTP standards thereby dropping invalid requests on the floor.
* No longer close the connection when sending a HEAD request
response.
* Always attempt to send the Connection: close response header
when we are going to close the connection to let the remote
know in more instances.
* Document that trusted_proxy may be set to a wildcard value to
trust all proxies.
* clear_untrusted_proxy_headers is set to True by default.
-------------------------------------------------------------------
Mon Dec 4 15:20:28 UTC 2023 - Ana Guerrero <ana.guerrero@suse.com>
@@ -83,7 +111,7 @@ Thu Mar 17 17:42:42 UTC 2022 - Dirk Müller <dmueller@suse.com>
previously get parsed as 10 and accepted. This stops potential HTTP
desync/HTTP request smuggling Thanks to Zhang Zeyu for reporting this issue.
See
https://github.com/Pylons/waitress/security/advisories/GHSA-4f7p-27jc-3c36
https://github.com/Pylons/waitress/security/advisories/GHSA-4f7p-27jc-3c36
-------------------------------------------------------------------
Fri Aug 27 12:27:31 UTC 2021 - Stefan Schubert <schubi@suse.de>
@@ -164,9 +192,9 @@ Mon May 18 07:25:32 UTC 2020 - Petr Gajdos <pgajdos@suse.com>
Thu Feb 6 17:29:20 UTC 2020 - Marketa Calabkova <mcalabkova@suse.com>
- update to 1.4.3
* Waitress did not properly validate that the HTTP headers it received
were properly formed, thereby potentially allowing a front-end server
to treat a request different from Waitress. This could lead to HTTP
* Waitress did not properly validate that the HTTP headers it received
were properly formed, thereby potentially allowing a front-end server
to treat a request different from Waitress. This could lead to HTTP
request smuggling/splitting.
- drop patch local-intersphinx-inventories.patch
* it was commented out, anyway
@@ -193,7 +221,7 @@ Fri Dec 20 18:28:24 UTC 2019 - Dirk Mueller <dmueller@suse.com>
Thu Aug 29 13:35:14 UTC 2019 - Marketa Calabkova <mcalabkova@suse.com>
- update to 1.3.1
* Waitress wont accidentally throw away part of the path if it
* Waitress wont accidentally throw away part of the path if it
starts with a double slash
-------------------------------------------------------------------
@@ -419,10 +447,10 @@ Tue Aug 13 10:15:30 UTC 2013 - dmueller@suse.com
- update to 0.8.6:
- Do alternate type of checking for UNIX socket support, instead of checking
for platform == windows.
- Functional tests now use multiprocessing module instead of subprocess module,
speeding up test suite and making concurrent execution more reliable.
- Runner now appends the current working directory to ``sys.path`` to support
running WSGI applications from a directory (i.e., not installed in a
virtualenv).
@@ -458,5 +486,5 @@ Mon Apr 29 14:14:25 UTC 2013 - speilicke@suse.com
-------------------------------------------------------------------
Mon Apr 29 13:06:10 UTC 2013 - dmueller@suse.com
- Initial package (0.8.3)
- Initial package (0.8.3)

View File

@@ -1,7 +1,7 @@
#
# spec file
# spec file for package python-waitress
#
# Copyright (c) 2023 SUSE LLC
# Copyright (c) 2024 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
@@ -31,7 +31,7 @@
%endif
%{?sle15_python_module_pythons}
Name: python-waitress%{psuffix}
Version: 2.1.2
Version: 3.0.1
Release: 0
Summary: Waitress WSGI server
License: ZPL-2.1
@@ -42,11 +42,9 @@ Source: https://files.pythonhosted.org/packages/source/w/waitress/waitre
# https://docs.python.org/3/objects.inv -> python3.inv
Source1: python3.inv
Source2: fetch-intersphinx-inventories.sh
# PATCH-FIX-UPSTREAM CVE-2024-49768.patch bsc#1232556
Patch1: CVE-2024-49768.patch
# PATCH-FIX-UPSTREAM CVE-2024-49769.patch bsc#1232554
Patch2: CVE-2024-49769.patch
BuildRequires: %{python_module pip}
BuildRequires: %{python_module setuptools}
BuildRequires: %{python_module wheel}
BuildRequires: fdupes
BuildRequires: python-rpm-macros >= 20210929
BuildArch: noarch
@@ -57,7 +55,7 @@ BuildRequires: alts
Requires: alts
%else
Requires(post): update-alternatives
Requires(postun):update-alternatives
Requires(postun): update-alternatives
%endif
%else
# Documentation requirements
@@ -87,14 +85,14 @@ For more information, see the "docs" directory of the Waitress package or
http://docs.pylonsproject.org/projects/waitress/en/latest/ .
%prep
%autosetup -p1 -n waitress-%{version}
%setup -q -n waitress-%{version}
sed -i '/addopts/d' setup.cfg
%build
%python_build
%pyproject_wheel
%install
%python_install
%pyproject_install
%python_clone -a %{buildroot}%{_bindir}/waitress-serve
%python_expand %fdupes %{buildroot}%{$python_sitelib}
@@ -117,7 +115,7 @@ sed -i '/addopts/d' setup.cfg
%doc COPYRIGHT.txt README.rst
%python_alternative %{_bindir}/waitress-serve
%{python_sitelib}/waitress
%{python_sitelib}/waitress-%{version}*-info
%{python_sitelib}/waitress-%{version}.dist-info
%else

BIN
waitress-2.1.2.tar.gz (Stored with Git LFS)

Binary file not shown.

BIN
waitress-3.0.1.tar.gz (Stored with Git LFS) Normal file

Binary file not shown.