Sync from SUSE:ALP:Source:Standard:1.0 python-waitress revision 4594967786586a873a115f3001eb89d0

This commit is contained in:
Adrian Schröter 2025-01-09 13:57:52 +01:00
parent 93e871e584
commit 710769fc0a
4 changed files with 730 additions and 1 deletions

334
CVE-2024-49768.patch Normal file
View File

@ -0,0 +1,334 @@
From 6943dcf556610ece2ff3cddb39e59a05ef110661 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sat, 26 Oct 2024 22:10:36 -0600
Subject: [PATCH 1/4] Make DummySock() look more like an actual socket
This forces DummySock() to look like a properly connected socket where
there is a buffer that is read from by the remote, and a buffer that is
written to by the remote.
The local side does the opposite, this way data written by the local
side can be read by the remote without operating on the same buffer.
---
tests/test_channel.py | 57 +++++++++++++++++++++++++++++++++----------
1 file changed, 44 insertions(+), 13 deletions(-)
diff --git a/tests/test_channel.py b/tests/test_channel.py
index 8467ae7a..7d677e91 100644
--- a/tests/test_channel.py
+++ b/tests/test_channel.py
@@ -18,7 +18,7 @@ def _makeOneWithMap(self, adj=None):
map = {}
inst = self._makeOne(sock, "127.0.0.1", adj, map=map)
inst.outbuf_lock = DummyLock()
- return inst, sock, map
+ return inst, sock.local(), map
def test_ctor(self):
inst, _, map = self._makeOneWithMap()
@@ -218,7 +218,7 @@ def test_write_soon_nonempty_byte(self):
def send(_):
return 0
- sock.send = send
+ sock.remote.send = send
wrote = inst.write_soon(b"a")
self.assertEqual(wrote, 1)
@@ -236,7 +236,7 @@ def test_write_soon_filewrapper(self):
def send(_):
return 0
- sock.send = send
+ sock.remote.send = send
outbufs = inst.outbufs
wrote = inst.write_soon(wrapper)
@@ -270,7 +270,7 @@ def test_write_soon_rotates_outbuf_on_overflow(self):
def send(_):
return 0
- sock.send = send
+ sock.remote.send = send
inst.adj.outbuf_high_watermark = 3
inst.current_outbuf_count = 4
@@ -286,7 +286,7 @@ def test_write_soon_waits_on_backpressure(self):
def send(_):
return 0
- sock.send = send
+ sock.remote.send = send
inst.adj.outbuf_high_watermark = 3
inst.total_outbufs_len = 4
@@ -315,7 +315,7 @@ def send(_):
inst.connected = False
raise Exception()
- sock.send = send
+ sock.remote.send = send
inst.adj.outbuf_high_watermark = 3
inst.total_outbufs_len = 4
@@ -345,7 +345,7 @@ def send(_):
inst.connected = False
raise Exception()
- sock.send = send
+ sock.remote.send = send
wrote = inst.write_soon(b"xyz")
self.assertEqual(wrote, 3)
@@ -376,7 +376,7 @@ def test_handle_write_no_notify_after_flush(self):
inst.total_outbufs_len = len(inst.outbufs[0])
inst.adj.send_bytes = 1
inst.adj.outbuf_high_watermark = 2
- sock.send = lambda x, do_close=True: False
+ sock.remote.send = lambda x, do_close=True: False
inst.will_close = False
inst.last_activity = 0
result = inst.handle_write()
@@ -400,7 +400,7 @@ def test__flush_some_full_outbuf_socket_returns_nonzero(self):
def test__flush_some_full_outbuf_socket_returns_zero(self):
inst, sock, map = self._makeOneWithMap()
- sock.send = lambda x: False
+ sock.remote.send = lambda x: False
inst.outbufs[0].append(b"abc")
inst.total_outbufs_len = sum(len(x) for x in inst.outbufs)
result = inst._flush_some()
@@ -907,7 +907,8 @@ class DummySock:
closed = False
def __init__(self):
- self.sent = b""
+ self.local_sent = b""
+ self.remote_sent = b""
def setblocking(self, *arg):
self.blocking = True
@@ -925,14 +926,44 @@ def close(self):
self.closed = True
def send(self, data):
- self.sent += data
+ self.remote_sent += data
return len(data)
def recv(self, buffer_size):
- result = self.sent[:buffer_size]
- self.sent = self.sent[buffer_size:]
+ result = self.local_sent[:buffer_size]
+ self.local_sent = self.local_sent[buffer_size:]
return result
+ def local(self):
+ outer = self
+
+ class LocalDummySock:
+ def send(self, data):
+ outer.local_sent += data
+ return len(data)
+
+ def recv(self, buffer_size):
+ result = outer.remote_sent[:buffer_size]
+ outer.remote_sent = outer.remote_sent[buffer_size:]
+ return result
+
+ def close(self):
+ outer.closed = True
+
+ @property
+ def sent(self):
+ return outer.remote_sent
+
+ @property
+ def closed(self):
+ return outer.closed
+
+ @property
+ def remote(self):
+ return outer
+
+ return LocalDummySock()
+
class DummyLock:
notified = False
From 7e7f11e61d358ab1cb853fcadf2b46b1f00f5993 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sat, 26 Oct 2024 22:12:14 -0600
Subject: [PATCH 2/4] Add a new test to validate the lookahead race condition
---
tests/test_channel.py | 55 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 54 insertions(+), 1 deletion(-)
diff --git a/tests/test_channel.py b/tests/test_channel.py
index 7d677e91..d798091d 100644
--- a/tests/test_channel.py
+++ b/tests/test_channel.py
@@ -805,11 +805,12 @@ def app_check_disconnect(self, environ, start_response):
)
return [body]
- def _make_app_with_lookahead(self):
+ def _make_app_with_lookahead(self, recv_bytes=8192):
"""
Setup a channel with lookahead and store it and the socket in self
"""
adj = DummyAdjustments()
+ adj.recv_bytes = recv_bytes
adj.channel_request_lookahead = 5
channel, sock, map = self._makeOneWithMap(adj=adj)
channel.server.application = self.app_check_disconnect
@@ -901,6 +902,58 @@ def test_lookahead_continue(self):
self.assertEqual(data.split("\r\n")[-1], "finished")
self.assertEqual(self.request_body, b"x")
+ def test_lookahead_bad_request_drop_extra_data(self):
+ """
+ Send two requests, the first one being bad, split on the recv_bytes
+ limit, then emulate a race that could happen whereby we read data from
+ the socket while the service thread is cleaning up due to an error
+ processing the request.
+ """
+
+ invalid_request = [
+ "GET / HTTP/1.1",
+ "Host: localhost:8080",
+ "Content-length: -1",
+ "",
+ ]
+
+ invalid_request_len = len("".join([x + "\r\n" for x in invalid_request]))
+
+ second_request = [
+ "POST / HTTP/1.1",
+ "Host: localhost:8080",
+ "Content-Length: 1",
+ "",
+ "x",
+ ]
+
+ full_request = invalid_request + second_request
+
+ self._make_app_with_lookahead(recv_bytes=invalid_request_len)
+ self._send(*full_request)
+ self.channel.handle_read()
+ self.assertEqual(len(self.channel.requests), 1)
+ self.channel.server.tasks[0].service()
+ self.assertTrue(self.channel.close_when_flushed)
+ # Read all of the next request
+ self.channel.handle_read()
+ self.channel.handle_read()
+ # Validate that there is no more data to be read
+ self.assertEqual(self.sock.remote.local_sent, b"")
+ # Validate that we dropped the data from the second read, and did not
+ # create a new request
+ self.assertEqual(len(self.channel.requests), 0)
+ data = self.sock.recv(256).decode("ascii")
+ self.assertFalse(self.channel.readable())
+ self.assertTrue(self.channel.writable())
+
+ # Handle the write, which will close the socket
+ self.channel.handle_write()
+ self.assertTrue(self.sock.closed)
+
+ data = self.sock.recv(256)
+ self.assertEqual(len(data), 0)
+
class DummySock:
blocking = False
From f4ba1c260cf17156b582c6252496213ddc96b591 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sat, 26 Oct 2024 22:13:08 -0600
Subject: [PATCH 3/4] Fix a race condition on recv_bytes boundary when request
is invalid
A remote client may send a request that is exactly recv_bytes long,
followed by a secondary request using HTTP pipelining.
When request lookahead is disabled (default) we won't read any more
requests, and when the first request fails due to a parsing error, we
simply close the connection.
However when request lookahead is enabled, it is possible to process and
receive the first request, start sending the error message back to the
client while we read the next request and queue it. This will allow the
secondar request to be serviced by the worker thread while the
connection should be closed.
The fix here checks if we should not have read the data in the first
place (because the conection is going to be torn down) while we hold the
`requests_lock` which means the service thread can't be in the middle of
flipping the `close_when_flushed` flag.
---
src/waitress/channel.py | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/src/waitress/channel.py b/src/waitress/channel.py
index 3860ed51..f4d96776 100644
--- a/src/waitress/channel.py
+++ b/src/waitress/channel.py
@@ -140,7 +140,7 @@ def readable(self):
# 1. We're not already about to close the connection.
# 2. We're not waiting to flush remaining data before closing the
# connection
- # 3. There are not too many tasks already queued
+ # 3. There are not too many tasks already queued (if lookahead is enabled)
# 4. There's no data in the output buffer that needs to be sent
# before we potentially create a new task.
@@ -196,6 +196,15 @@ def received(self, data):
return False
with self.requests_lock:
+ # Don't bother processing anymore data if this connection is about
+ # to close. This may happen if readable() returned True, on the
+ # main thread before the service thread set the close_when_flushed
+ # flag, and we read data but our service thread is attempting to
+ # shut down the connection due to an error. We want to make sure we
+ # do this while holding the request_lock so that we can't race
+ if self.will_close or self.close_when_flushed:
+ return False
+
while data:
if self.request is None:
self.request = self.parser_class(self.adj)
From 810a435f9e9e293bd3446a5ce2df86f59c4e7b1b Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sat, 26 Oct 2024 22:22:32 -0600
Subject: [PATCH 4/4] Add documentation for channel_request_lookahead
---
docs/arguments.rst | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/docs/arguments.rst b/docs/arguments.rst
index 0b6ca458..b8a856aa 100644
--- a/docs/arguments.rst
+++ b/docs/arguments.rst
@@ -314,3 +314,17 @@ url_prefix
be stripped of the prefix.
Default: ``''``
+
+channel_request_lookahead
+ Sets the amount of requests we can continue to read from the socket, while
+ we are processing current requests. The default value won't allow any
+ lookahead, increase it above ``0`` to enable.
+
+ When enabled this inserts a callable ``waitress.client_disconnected`` into
+ the environment that allows the task to check if the client disconnected
+ while waiting for the response at strategic points in the execution and to
+ cancel the operation.
+
+ Default: ``0``
+
+ .. versionadded:: 2.0.0

384
CVE-2024-49769.patch Normal file
View File

@ -0,0 +1,384 @@
From 03cc640fe7106902899f82115c26e37002bca7f1 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:15:51 -0700
Subject: [PATCH 1/6] HTTPChannel is always created from accept, explicitly set
self.connected to True
---
src/waitress/channel.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/src/waitress/channel.py b/src/waitress/channel.py
index eb59dd3f..ea019d3f 100644
--- a/src/waitress/channel.py
+++ b/src/waitress/channel.py
@@ -67,8 +67,7 @@ def __init__(self, server, sock, addr, adj, map=None):
self.outbuf_lock = threading.Condition()
wasyncore.dispatcher.__init__(self, sock, map=map)
-
- # Don't let wasyncore.dispatcher throttle self.addr on us.
+ self.connected = True
self.addr = addr
self.requests = []
From 840aebce1c4c1bfd9036f402c1f5d5a4d2f4a1c2 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:16:48 -0700
Subject: [PATCH 2/6] Assume socket is not connected when passed to
wasyncore.dispatcher
No longer call getpeername() on the remote socket either, as it is not
necessary for any of the places where waitress requires that self.addr
in a subclass of the dispatcher needs it.
This removes a race condition when setting up a HTTPChannel where we
accepted the socket, and know the remote address, yet call getpeername()
again which would have the unintended side effect of potentially setting
self.connected to False because the remote has already shut down part of
the socket.
This issue was uncovered in #418, where the server would go into a hard
loop because self.connected was used in various parts of the code base.
---
src/waitress/wasyncore.py | 16 ----------------
1 file changed, 16 deletions(-)
diff --git a/src/waitress/wasyncore.py b/src/waitress/wasyncore.py
index 86155578..97943085 100644
--- a/src/waitress/wasyncore.py
+++ b/src/waitress/wasyncore.py
@@ -297,22 +297,6 @@ def __init__(self, sock=None, map=None):
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
- self.connected = True
- # The constructor no longer requires that the socket
- # passed be connected.
- try:
- self.addr = sock.getpeername()
- except OSError as err:
- if err.args[0] in (ENOTCONN, EINVAL):
- # To handle the case where we got an unconnected
- # socket.
- self.connected = False
- else:
- # The socket is broken in some unknown way, alert
- # the user and remove it from the map (to prevent
- # polling of broken sockets).
- self.del_channel(map)
- raise
else:
self.socket = None
From 86c680df4e4bdd40c78dec771cddcee059e802c4 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:23:33 -0700
Subject: [PATCH 3/6] Remove test for getpeername()
---
tests/test_wasyncore.py | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/tests/test_wasyncore.py b/tests/test_wasyncore.py
index 55c40191..a8a480a8 100644
--- a/tests/test_wasyncore.py
+++ b/tests/test_wasyncore.py
@@ -1454,17 +1454,6 @@ def _makeOne(self, sock=None, map=None):
return dispatcher(sock=sock, map=map)
- def test_unexpected_getpeername_exc(self):
- sock = dummysocket()
-
- def getpeername():
- raise OSError(errno.EBADF)
-
- map = {}
- sock.getpeername = getpeername
- self.assertRaises(socket.error, self._makeOne, sock=sock, map=map)
- self.assertEqual(map, {})
-
def test___repr__accepting(self):
sock = dummysocket()
map = {}
From 8cba302b1ac08c2874ae179b2af2445e89311bac Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:26:22 -0700
Subject: [PATCH 4/6] Don't exit handle_write early -- even if socket is not
connected
Calling handle_close() multiple times does not hurt anything, and is
safe.
---
src/waitress/channel.py | 6 ------
1 file changed, 6 deletions(-)
diff --git a/src/waitress/channel.py b/src/waitress/channel.py
index ea019d3f..3860ed51 100644
--- a/src/waitress/channel.py
+++ b/src/waitress/channel.py
@@ -91,13 +91,7 @@ def handle_write(self):
# Precondition: there's data in the out buffer to be sent, or
# there's a pending will_close request
- if not self.connected:
- # we dont want to close the channel twice
-
- return
-
# try to flush any pending output
-
if not self.requests:
# 1. There are no running tasks, so we don't need to try to lock
# the outbuf before sending
From 63678e652d912e67621580123c603e37c319d8c4 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:35:39 -0700
Subject: [PATCH 5/6] Remove code not used by waitress from vendored asyncore
---
src/waitress/wasyncore.py | 45 ------------------
tests/test_wasyncore.py | 96 ++++++++-------------------------------
2 files changed, 18 insertions(+), 123 deletions(-)
diff --git a/src/waitress/wasyncore.py b/src/waitress/wasyncore.py
index 97943085..49829f98 100644
--- a/src/waitress/wasyncore.py
+++ b/src/waitress/wasyncore.py
@@ -378,23 +378,6 @@ def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
- def connect(self, address):
- self.connected = False
- self.connecting = True
- err = self.socket.connect_ex(address)
- if (
- err in (EINPROGRESS, EALREADY, EWOULDBLOCK)
- or err == EINVAL
- and os.name == "nt"
- ): # pragma: no cover
- self.addr = address
- return
- if err in (0, EISCONN):
- self.addr = address
- self.handle_connect_event()
- else:
- raise OSError(err, errorcode[err])
-
def accept(self):
# XXX can return either an address pair or None
try:
@@ -556,34 +539,6 @@ def handle_close(self):
self.close()
-# ---------------------------------------------------------------------------
-# adds simple buffered output capability, useful for simple clients.
-# [for more sophisticated usage use asynchat.async_chat]
-# ---------------------------------------------------------------------------
-
-
-class dispatcher_with_send(dispatcher):
- def __init__(self, sock=None, map=None):
- dispatcher.__init__(self, sock, map)
- self.out_buffer = b""
-
- def initiate_send(self):
- num_sent = 0
- num_sent = dispatcher.send(self, self.out_buffer[:65536])
- self.out_buffer = self.out_buffer[num_sent:]
-
- handle_write = initiate_send
-
- def writable(self):
- return (not self.connected) or len(self.out_buffer)
-
- def send(self, data):
- if self.debug: # pragma: no cover
- self.log_info("sending %s" % repr(data))
- self.out_buffer = self.out_buffer + data
- self.initiate_send()
-
-
def close_all(map=None, ignore_all=False):
if map is None: # pragma: no cover
map = socket_map
diff --git a/tests/test_wasyncore.py b/tests/test_wasyncore.py
index a8a480a8..20f68f6e 100644
--- a/tests/test_wasyncore.py
+++ b/tests/test_wasyncore.py
@@ -1,6 +1,7 @@
import _thread as thread
import contextlib
import errno
+from errno import EALREADY, EINPROGRESS, EINVAL, EISCONN, EWOULDBLOCK, errorcode
import functools
import gc
from io import BytesIO
@@ -641,62 +642,6 @@ def test_strerror(self):
self.assertTrue(err != "")
-class dispatcherwithsend_noread(asyncore.dispatcher_with_send): # pragma: no cover
- def readable(self):
- return False
-
- def handle_connect(self):
- pass
-
-
-class DispatcherWithSendTests(unittest.TestCase):
- def setUp(self):
- pass
-
- def tearDown(self):
- asyncore.close_all()
-
- @reap_threads
- def test_send(self):
- evt = threading.Event()
- sock = socket.socket()
- sock.settimeout(3)
- port = bind_port(sock)
-
- cap = BytesIO()
- args = (evt, cap, sock)
- t = threading.Thread(target=capture_server, args=args)
- t.start()
- try:
- # wait a little longer for the server to initialize (it sometimes
- # refuses connections on slow machines without this wait)
- time.sleep(0.2)
-
- data = b"Suppose there isn't a 16-ton weight?"
- d = dispatcherwithsend_noread()
- d.create_socket()
- d.connect((HOST, port))
-
- # give time for socket to connect
- time.sleep(0.1)
-
- d.send(data)
- d.send(data)
- d.send(b"\n")
-
- n = 1000
-
- while d.out_buffer and n > 0: # pragma: no cover
- asyncore.poll()
- n -= 1
-
- evt.wait()
-
- self.assertEqual(cap.getvalue(), data * 2)
- finally:
- join_thread(t, timeout=TIMEOUT)
-
-
@unittest.skipUnless(
hasattr(asyncore, "file_wrapper"), "asyncore.file_wrapper required"
)
@@ -839,6 +784,23 @@ def __init__(self, family, address):
self.create_socket(family)
self.connect(address)
+ def connect(self, address):
+ self.connected = False
+ self.connecting = True
+ err = self.socket.connect_ex(address)
+ if (
+ err in (EINPROGRESS, EALREADY, EWOULDBLOCK)
+ or err == EINVAL
+ and os.name == "nt"
+ ): # pragma: no cover
+ self.addr = address
+ return
+ if err in (0, EISCONN):
+ self.addr = address
+ self.handle_connect_event()
+ else:
+ raise OSError(err, errorcode[err])
+
def handle_connect(self):
pass
@@ -1489,13 +1451,6 @@ def setsockopt(*arg, **kw):
inst.set_reuse_addr()
self.assertTrue(sock.errored)
- def test_connect_raise_socket_error(self):
- sock = dummysocket()
- map = {}
- sock.connect_ex = lambda *arg: 1
- inst = self._makeOne(sock=sock, map=map)
- self.assertRaises(socket.error, inst.connect, 0)
-
def test_accept_raise_TypeError(self):
sock = dummysocket()
map = {}
@@ -1664,21 +1619,6 @@ def test_handle_accepted(self):
self.assertTrue(sock.closed)
-class Test_dispatcher_with_send(unittest.TestCase):
- def _makeOne(self, sock=None, map=None):
- from waitress.wasyncore import dispatcher_with_send
-
- return dispatcher_with_send(sock=sock, map=map)
-
- def test_writable(self):
- sock = dummysocket()
- map = {}
- inst = self._makeOne(sock=sock, map=map)
- inst.out_buffer = b"123"
- inst.connected = True
- self.assertTrue(inst.writable())
-
-
class Test_close_all(unittest.TestCase):
def _callFUT(self, map=None, ignore_all=False):
from waitress.wasyncore import close_all
From 9d99c89ae4aa8449313eea210a5ec9f3994a87b2 Mon Sep 17 00:00:00 2001
From: Delta Regeer <bertjw@regeer.org>
Date: Sun, 3 Mar 2024 16:37:12 -0700
Subject: [PATCH 6/6] When closing the socket, set it to None
This avoids calling close() twice on the same socket if self.close() or
self.handle_close() is called multiple times
---
src/waitress/wasyncore.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/src/waitress/wasyncore.py b/src/waitress/wasyncore.py
index 49829f98..f42ee37c 100644
--- a/src/waitress/wasyncore.py
+++ b/src/waitress/wasyncore.py
@@ -436,6 +436,8 @@ def close(self):
if why.args[0] not in (ENOTCONN, EBADF):
raise
+ self.socket = None
+
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
@@ -486,7 +488,11 @@ def handle_expt_event(self):
# handle_expt_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ err = (
+ self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if self.socket is not None
+ else 1
+ )
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket

View File

@ -1,3 +1,10 @@
-------------------------------------------------------------------
Fri Nov 8 09:02:29 UTC 2024 - Nico Krapp <nico.krapp@suse.com>
- Add security patches:
* CVE-2024-49768.patch (bsc#1232556)
* CVE-2024-49769.patch (bsc#1232554)
-------------------------------------------------------------------
Mon Dec 4 15:20:28 UTC 2023 - Ana Guerrero <ana.guerrero@suse.com>

View File

@ -42,6 +42,10 @@ Source: https://files.pythonhosted.org/packages/source/w/waitress/waitre
# https://docs.python.org/3/objects.inv -> python3.inv
Source1: python3.inv
Source2: fetch-intersphinx-inventories.sh
# PATCH-FIX-UPSTREAM CVE-2024-49768.patch bsc#1232556
Patch1: CVE-2024-49768.patch
# PATCH-FIX-UPSTREAM CVE-2024-49769.patch bsc#1232554
Patch2: CVE-2024-49769.patch
BuildRequires: %{python_module setuptools}
BuildRequires: fdupes
BuildRequires: python-rpm-macros >= 20210929
@ -83,7 +87,7 @@ For more information, see the "docs" directory of the Waitress package or
http://docs.pylonsproject.org/projects/waitress/en/latest/ .
%prep
%setup -q -n waitress-%{version}
%autosetup -p1 -n waitress-%{version}
sed -i '/addopts/d' setup.cfg
%build