forked from pool/python-distributed
Accepting request 986476 from devel:languages:python:numeric
OBS-URL: https://build.opensuse.org/request/show/986476 OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/python-distributed?expand=0&rev=56
This commit is contained in:
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:615df296e593bc636ed584c6b13ce2f05f29af8aac74d398993da2e81fd164b7
|
||||
size 1615328
|
3
distributed-2022.6.1-gh.tar.gz
Normal file
3
distributed-2022.6.1-gh.tar.gz
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:cb597fa786c73f406a4f1f7be1d2df5c303bce7e2cfa4d5ce50c961e91803dff
|
||||
size 1682181
|
13
distributed-ignore-offline.patch
Normal file
13
distributed-ignore-offline.patch
Normal file
@@ -0,0 +1,13 @@
|
||||
Index: distributed-2022.6.1/setup.cfg
|
||||
===================================================================
|
||||
--- distributed-2022.6.1.orig/setup.cfg
|
||||
+++ distributed-2022.6.1/setup.cfg
|
||||
@@ -60,7 +60,7 @@ filterwarnings =
|
||||
ignore:unclosed file <_io.TextIOWrapper.*:ResourceWarning
|
||||
ignore:unclosed transport <_SelectorSocketTransport.*:ResourceWarning
|
||||
ignore:unclosed transport <asyncio\.sslproto\..*:ResourceWarning
|
||||
- ignore:Couldn't detect a suitable IP address for reaching '2001.4860.4860..8888', defaulting to hostname. \[Errno 65\] No route to host:RuntimeWarning
|
||||
+ ignore:Couldn't detect a suitable IP address.*:RuntimeWarning
|
||||
ignore:Dashboard and Scheduler are using the same server on port.*:RuntimeWarning
|
||||
ignore:coroutine 'BaseTCPConnector.connect' was never awaited:RuntimeWarning
|
||||
ignore:coroutine 'Client\._start' was never awaited:RuntimeWarning
|
@@ -1,375 +0,0 @@
|
||||
From 9c6a4c905c75c5e64ca460ea17bb2bdf0f2782fa Mon Sep 17 00:00:00 2001
|
||||
From: James Bourbeau <jrbourbeau@gmail.com>
|
||||
Date: Thu, 3 Feb 2022 12:58:32 -0600
|
||||
Subject: [PATCH 01/12] Add Python 3.10 build to CI
|
||||
|
||||
---
|
||||
.github/workflows/tests.yaml | 2 +-
|
||||
continuous_integration/environment-3.10.yaml | 56 ++++++++++++++++++++
|
||||
2 files changed, 57 insertions(+), 1 deletion(-)
|
||||
create mode 100644 continuous_integration/environment-3.10.yaml
|
||||
|
||||
Index: distributed-2022.03.0/.github/workflows/tests.yaml
|
||||
===================================================================
|
||||
--- distributed-2022.03.0.orig/.github/workflows/tests.yaml
|
||||
+++ distributed-2022.03.0/.github/workflows/tests.yaml
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
- python-version: ["3.8", "3.9"]
|
||||
+ python-version: ["3.8", "3.9", "3.10"]
|
||||
# Cherry-pick test modules to split the overall runtime roughly in half
|
||||
partition: [ci1, not ci1]
|
||||
include:
|
||||
@@ -65,12 +65,6 @@ jobs:
|
||||
shell: bash -l {0}
|
||||
run: conda config --show
|
||||
|
||||
- - name: Install stacktrace
|
||||
- shell: bash -l {0}
|
||||
- # stacktrace for Python 3.8 has not been released at the moment of writing
|
||||
- if: ${{ matrix.os == 'ubuntu-latest' && matrix.python-version < '3.8' }}
|
||||
- run: mamba install -c conda-forge -c defaults -c numba libunwind stacktrace
|
||||
-
|
||||
- name: Hack around https://github.com/ipython/ipython/issues/12197
|
||||
# This upstream issue causes an interpreter crash when running
|
||||
# distributed/protocol/tests/test_serialize.py::test_profile_nested_sizeof
|
||||
Index: distributed-2022.03.0/continuous_integration/environment-3.10.yaml
|
||||
===================================================================
|
||||
--- /dev/null
|
||||
+++ distributed-2022.03.0/continuous_integration/environment-3.10.yaml
|
||||
@@ -0,0 +1,56 @@
|
||||
+name: dask-distributed
|
||||
+channels:
|
||||
+ - conda-forge
|
||||
+ - defaults
|
||||
+dependencies:
|
||||
+ - python=3.10
|
||||
+ - packaging
|
||||
+ - pip
|
||||
+ - asyncssh
|
||||
+ - bokeh
|
||||
+ - click
|
||||
+ - cloudpickle
|
||||
+ - coverage<6.3 # https://github.com/nedbat/coveragepy/issues/1310
|
||||
+ - dask # overridden by git tip below
|
||||
+ - filesystem-spec # overridden by git tip below
|
||||
+ - h5py
|
||||
+ - ipykernel
|
||||
+ - ipywidgets
|
||||
+ - jinja2
|
||||
+ - joblib # overridden by git tip below
|
||||
+ - jupyter_client
|
||||
+ - lz4 # Only tested here
|
||||
+ - msgpack-python
|
||||
+ - netcdf4
|
||||
+ - paramiko
|
||||
+ - pre-commit
|
||||
+ - prometheus_client
|
||||
+ - psutil
|
||||
+ - pynvml # Only tested here
|
||||
+ - pytest
|
||||
+ - pytest-cov
|
||||
+ - pytest-faulthandler
|
||||
+ - pytest-repeat
|
||||
+ - pytest-rerunfailures
|
||||
+ - pytest-timeout
|
||||
+ - python-blosc # Only tested here
|
||||
+ - python-snappy # Only tested here
|
||||
+ - requests
|
||||
+ - s3fs # overridden by git tip below
|
||||
+ - scikit-learn
|
||||
+ - scipy
|
||||
+ - sortedcollections
|
||||
+ - tblib
|
||||
+ - toolz
|
||||
+ - tornado=6
|
||||
+ - zict # overridden by git tip below
|
||||
+ - zstandard
|
||||
+ - pip:
|
||||
+ - git+https://github.com/dask/dask
|
||||
+ - git+https://github.com/dask/s3fs
|
||||
+ - git+https://github.com/dask/zict
|
||||
+ # FIXME https://github.com/dask/distributed/issues/5345
|
||||
+ # - git+https://github.com/intake/filesystem_spec
|
||||
+ - git+https://github.com/joblib/joblib
|
||||
+ - keras
|
||||
+ - pytest-asyncio<0.14.0 # `pytest-asyncio<0.14.0` isn't available on conda-forge for Python 3.10
|
||||
Index: distributed-2022.03.0/distributed/tests/test_client.py
|
||||
===================================================================
|
||||
--- distributed-2022.03.0.orig/distributed/tests/test_client.py
|
||||
+++ distributed-2022.03.0/distributed/tests/test_client.py
|
||||
@@ -6461,6 +6461,10 @@ async def test_performance_report(c, s,
|
||||
assert "cdn.bokeh.org" in data
|
||||
|
||||
|
||||
+@pytest.mark.skipif(
|
||||
+ sys.version_info >= (3, 10),
|
||||
+ reason="On Py3.10+ semaphore._loop is not bound until .acquire() blocks",
|
||||
+)
|
||||
@gen_cluster(nthreads=[])
|
||||
async def test_client_gather_semaphore_loop(s):
|
||||
async with Client(s.address, asynchronous=True) as c:
|
||||
@@ -6471,9 +6475,16 @@ async def test_client_gather_semaphore_l
|
||||
async def test_as_completed_condition_loop(c, s, a, b):
|
||||
seq = c.map(inc, range(5))
|
||||
ac = as_completed(seq)
|
||||
+ # consume the ac so that the ac.condition is bound to the loop on py3.10+
|
||||
+ async for _ in ac:
|
||||
+ pass
|
||||
assert ac.condition._loop == c.loop.asyncio_loop
|
||||
|
||||
|
||||
+@pytest.mark.skipif(
|
||||
+ sys.version_info >= (3, 10),
|
||||
+ reason="On Py3.10+ semaphore._loop is not bound until .acquire() blocks",
|
||||
+)
|
||||
def test_client_connectionpool_semaphore_loop(s, a, b):
|
||||
with Client(s["address"]) as c:
|
||||
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
|
||||
Index: distributed-2022.03.0/distributed/node.py
|
||||
===================================================================
|
||||
--- distributed-2022.03.0.orig/distributed/node.py
|
||||
+++ distributed-2022.03.0/distributed/node.py
|
||||
@@ -131,12 +131,9 @@ class ServerNode(Server):
|
||||
import ssl
|
||||
|
||||
ssl_options = ssl.create_default_context(
|
||||
- cafile=tls_ca_file, purpose=ssl.Purpose.SERVER_AUTH
|
||||
+ cafile=tls_ca_file, purpose=ssl.Purpose.CLIENT_AUTH
|
||||
)
|
||||
ssl_options.load_cert_chain(tls_cert, keyfile=tls_key)
|
||||
- # We don't care about auth here, just encryption
|
||||
- ssl_options.check_hostname = False
|
||||
- ssl_options.verify_mode = ssl.CERT_NONE
|
||||
|
||||
self.http_server = HTTPServer(self.http_application, ssl_options=ssl_options)
|
||||
|
||||
Index: distributed-2022.03.0/distributed/profile.py
|
||||
===================================================================
|
||||
--- distributed-2022.03.0.orig/distributed/profile.py
|
||||
+++ distributed-2022.03.0/distributed/profile.py
|
||||
@@ -27,6 +27,7 @@ We represent this tree as a nested dicti
|
||||
from __future__ import annotations
|
||||
|
||||
import bisect
|
||||
+import dis
|
||||
import linecache
|
||||
import sys
|
||||
import threading
|
||||
@@ -59,21 +60,41 @@ def identifier(frame):
|
||||
)
|
||||
|
||||
|
||||
+# work around some frames lacking an f_lineo eg: https://bugs.python.org/issue47085
|
||||
+def _f_lineno(frame):
|
||||
+ f_lineno = frame.f_lineno
|
||||
+ if f_lineno is not None:
|
||||
+ return f_lineno
|
||||
+
|
||||
+ f_lasti = frame.f_lasti
|
||||
+ code = frame.f_code
|
||||
+ prev_line = code.co_firstlineno
|
||||
+
|
||||
+ for start, next_line in dis.findlinestarts(code):
|
||||
+ if f_lasti < start:
|
||||
+ return prev_line
|
||||
+ prev_line = next_line
|
||||
+
|
||||
+ return prev_line
|
||||
+
|
||||
+
|
||||
def repr_frame(frame):
|
||||
"""Render a frame as a line for inclusion into a text traceback"""
|
||||
co = frame.f_code
|
||||
- text = f' File "{co.co_filename}", line {frame.f_lineno}, in {co.co_name}'
|
||||
- line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()
|
||||
+ f_lineno = _f_lineno(frame)
|
||||
+ text = f' File "{co.co_filename}", line {f_lineno}, in {co.co_name}'
|
||||
+ line = linecache.getline(co.co_filename, f_lineno, frame.f_globals).lstrip()
|
||||
return text + "\n\t" + line
|
||||
|
||||
|
||||
def info_frame(frame):
|
||||
co = frame.f_code
|
||||
- line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()
|
||||
+ f_lineno = _f_lineno(frame)
|
||||
+ line = linecache.getline(co.co_filename, f_lineno, frame.f_globals).lstrip()
|
||||
return {
|
||||
"filename": co.co_filename,
|
||||
"name": co.co_name,
|
||||
- "line_number": frame.f_lineno,
|
||||
+ "line_number": f_lineno,
|
||||
"line": line,
|
||||
}
|
||||
|
||||
Index: distributed-2022.03.0/distributed/tests/test_profile.py
|
||||
===================================================================
|
||||
--- distributed-2022.03.0.orig/distributed/tests/test_profile.py
|
||||
+++ distributed-2022.03.0/distributed/tests/test_profile.py
|
||||
@@ -1,5 +1,9 @@
|
||||
+from __future__ import annotations
|
||||
+
|
||||
+import dataclasses
|
||||
import sys
|
||||
import threading
|
||||
+from collections.abc import Iterator, Sequence
|
||||
from time import sleep
|
||||
|
||||
import pytest
|
||||
@@ -11,6 +15,7 @@ from distributed.profile import (
|
||||
call_stack,
|
||||
create,
|
||||
identifier,
|
||||
+ info_frame,
|
||||
ll_get_stack,
|
||||
llprocess,
|
||||
merge,
|
||||
@@ -200,3 +205,102 @@ def test_watch():
|
||||
while threading.active_count() > start_threads:
|
||||
assert time() < start + 2
|
||||
sleep(0.01)
|
||||
+
|
||||
+
|
||||
+@dataclasses.dataclass(frozen=True)
|
||||
+class FakeCode:
|
||||
+ co_filename: str
|
||||
+ co_name: str
|
||||
+ co_firstlineno: int
|
||||
+ co_lnotab: bytes
|
||||
+ co_lines_seq: Sequence[tuple[int, int, int | None]]
|
||||
+ co_code: bytes
|
||||
+
|
||||
+ def co_lines(self) -> Iterator[tuple[int, int, int | None]]:
|
||||
+ yield from self.co_lines_seq
|
||||
+
|
||||
+
|
||||
+FAKE_CODE = FakeCode(
|
||||
+ co_filename="<stdin>",
|
||||
+ co_name="example",
|
||||
+ co_firstlineno=1,
|
||||
+ # https://github.com/python/cpython/blob/b68431fadb3150134ac6ccbf501cdfeaf4c75678/Objects/lnotab_notes.txt#L84
|
||||
+ # generated from:
|
||||
+ # def example():
|
||||
+ # for i in range(1):
|
||||
+ # if i >= 0:
|
||||
+ # pass
|
||||
+ # example.__code__.co_lnotab
|
||||
+ co_lnotab=b"\x00\x01\x0c\x01\x08\x01\x04\xfe",
|
||||
+ # generated with list(example.__code__.co_lines())
|
||||
+ co_lines_seq=[
|
||||
+ (0, 12, 2),
|
||||
+ (12, 20, 3),
|
||||
+ (20, 22, 4),
|
||||
+ (22, 24, None),
|
||||
+ (24, 28, 2),
|
||||
+ ],
|
||||
+ # used in dis.findlinestarts as bytecode_len = len(code.co_code)
|
||||
+ # https://github.com/python/cpython/blob/6f345d363308e3e6ecf0ad518ea0fcc30afde2a8/Lib/dis.py#L457
|
||||
+ co_code=bytes(28),
|
||||
+)
|
||||
+
|
||||
+
|
||||
+@dataclasses.dataclass(frozen=True)
|
||||
+class FakeFrame:
|
||||
+ f_lasti: int
|
||||
+ f_code: FakeCode
|
||||
+ f_lineno: int | None = None
|
||||
+ f_back: FakeFrame | None = None
|
||||
+ f_globals: dict[str, object] = dataclasses.field(default_factory=dict)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize(
|
||||
+ "f_lasti,f_lineno",
|
||||
+ [
|
||||
+ (-1, 1),
|
||||
+ (0, 2),
|
||||
+ (1, 2),
|
||||
+ (11, 2),
|
||||
+ (12, 3),
|
||||
+ (21, 4),
|
||||
+ (22, 4),
|
||||
+ (23, 4),
|
||||
+ (24, 2),
|
||||
+ (25, 2),
|
||||
+ (26, 2),
|
||||
+ (27, 2),
|
||||
+ (100, 2),
|
||||
+ ],
|
||||
+)
|
||||
+def test_info_frame_f_lineno(f_lasti: int, f_lineno: int) -> None:
|
||||
+ assert info_frame(FakeFrame(f_lasti=f_lasti, f_code=FAKE_CODE)) == {
|
||||
+ "filename": "<stdin>",
|
||||
+ "name": "example",
|
||||
+ "line_number": f_lineno,
|
||||
+ "line": "",
|
||||
+ }
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize(
|
||||
+ "f_lasti,f_lineno",
|
||||
+ [
|
||||
+ (-1, 1),
|
||||
+ (0, 2),
|
||||
+ (1, 2),
|
||||
+ (11, 2),
|
||||
+ (12, 3),
|
||||
+ (21, 4),
|
||||
+ (22, 4),
|
||||
+ (23, 4),
|
||||
+ (24, 2),
|
||||
+ (25, 2),
|
||||
+ (26, 2),
|
||||
+ (27, 2),
|
||||
+ (100, 2),
|
||||
+ ],
|
||||
+)
|
||||
+def test_call_stack_f_lineno(f_lasti: int, f_lineno: int) -> None:
|
||||
+ assert call_stack(FakeFrame(f_lasti=f_lasti, f_code=FAKE_CODE)) == [
|
||||
+ f' File "<stdin>", line {f_lineno}, in example\n\t'
|
||||
+ ]
|
||||
Index: distributed-2022.03.0/distributed/utils_test.py
|
||||
===================================================================
|
||||
--- distributed-2022.03.0.orig/distributed/utils_test.py
|
||||
+++ distributed-2022.03.0/distributed/utils_test.py
|
||||
@@ -706,13 +706,16 @@ def cluster(
|
||||
except KeyError:
|
||||
rpc_kwargs = {}
|
||||
|
||||
- with rpc(saddr, **rpc_kwargs) as s:
|
||||
- while True:
|
||||
- nthreads = loop.run_sync(s.ncores)
|
||||
- if len(nthreads) == nworkers:
|
||||
- break
|
||||
- if time() - start > 5:
|
||||
- raise Exception("Timeout on cluster creation")
|
||||
+ async def wait_for_workers():
|
||||
+ async with rpc(saddr, **rpc_kwargs) as s:
|
||||
+ while True:
|
||||
+ nthreads = await s.ncores()
|
||||
+ if len(nthreads) == nworkers:
|
||||
+ break
|
||||
+ if time() - start > 5:
|
||||
+ raise Exception("Timeout on cluster creation")
|
||||
+
|
||||
+ loop.run_sync(wait_for_workers)
|
||||
|
||||
# avoid sending processes down to function
|
||||
yield {"address": saddr}, [
|
||||
Index: distributed-2022.03.0/setup.py
|
||||
===================================================================
|
||||
--- distributed-2022.03.0.orig/setup.py
|
||||
+++ distributed-2022.03.0/setup.py
|
||||
@@ -98,8 +98,11 @@ setup(
|
||||
"License :: OSI Approved :: BSD License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python",
|
||||
+ "Programming Language :: Python :: 3",
|
||||
+ "Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
+ "Programming Language :: Python :: 3.10",
|
||||
"Topic :: Scientific/Engineering",
|
||||
"Topic :: System :: Distributed Computing",
|
||||
],
|
@@ -1,3 +1,256 @@
|
||||
-------------------------------------------------------------------
|
||||
Fri Jun 24 20:21:16 UTC 2022 - Ben Greiner <code@bnavigator.de>
|
||||
|
||||
- Update to 2022.6.1
|
||||
* Highlights
|
||||
- This release includes the Worker State Machine refactor. The
|
||||
expectation should be that the worker state is its own
|
||||
synchronous subclass. Pulling all the state out into its own
|
||||
class allows us to write targeted unit tests without invoking
|
||||
any concurrent or asynchronous code.
|
||||
* Enhancements
|
||||
- Make worker state machine methods private (GH#6564)
|
||||
crusaderky
|
||||
- Yank state machine out of Worker class (GH#6566) crusaderky
|
||||
- Track worker_state_machine.TaskState instances (GH#6525)
|
||||
Hendrik Makait
|
||||
- Trivial tweaks to the Worker State Machine (GH#6586)
|
||||
crusaderky
|
||||
- Replace loop.call_later and loop.add_callback with background
|
||||
tasks added to Server. (GH#6603) Thomas Grainger
|
||||
- Support for neater WorkerState tests (GH#6609) crusaderky
|
||||
- Limit TCP writes with Tornado to 2GB (GH#6557) hhuuggoo
|
||||
- Enable no_implicit_optional for scheduler (GH#6622) Thomas
|
||||
Grainger
|
||||
* Bug Fixes
|
||||
- Partial revert of compute-task message format (GH#6626)
|
||||
Florian Jetter
|
||||
- Restore log message about received signals in CLI (GH#6618)
|
||||
Florian Jetter
|
||||
- Handle empty memoryviews of bytearrays when (de)serializing
|
||||
(GH#6576) Benjamin Zaitlen
|
||||
- Ensure steal requests from same-IP but distinct workers are
|
||||
rejected (GH#6585) Florian Jetter
|
||||
- Fix tls_(min|max)_ version having no effect on openssl 1.1.0g
|
||||
or lower (GH#6562) Thomas Grainger
|
||||
- Fix idle_timeout and unxfail test (GH#6563) Matthew Rocklin
|
||||
- Fix crashing debug statement in _purge_state (GH#6589)
|
||||
crusaderky
|
||||
- Abort connections on CancelledError (GH#6574) Thomas Grainger
|
||||
- Fix Active Memory Manager ignores nbytes thresholds (GH#6583)
|
||||
crusaderky
|
||||
* Deprecations
|
||||
- Deprecate WorkerState accessors (GH#6579)
|
||||
crusaderky
|
||||
- Release 2022.6.0
|
||||
* Enhancements
|
||||
- Make disk access in system monitor configurable (GH#6537)
|
||||
Matthew Rocklin
|
||||
- Log and except errors on preload start (GH#6553) Matthew
|
||||
Rocklin
|
||||
- Fix Scheduler.restart logic (GH#6504) Gabe Joseph
|
||||
- Don’t heartbeat while Worker is closing (GH#6543) Gabe Joseph
|
||||
- No longer retry LocalCluster on errno.EADDRINUSE (GH#6369)
|
||||
Thomas Grainger
|
||||
- Don’t invoke log_event from state machine (GH#6512)
|
||||
crusaderky
|
||||
- Add config option to disable profiling and disable it in many
|
||||
tests per default (GH#6490) Hendrik Makait
|
||||
- Encapsulate Worker.batched_stream.send() (GH#6475) crusaderky
|
||||
* Bug Fixes
|
||||
- refresh-who-has can break the worker state machine (GH#6529)
|
||||
crusaderky
|
||||
- Restart worker if it’s unrecognized by scheduler (GH#6505)
|
||||
Gabe Joseph
|
||||
- Fix import error when distributed.rmm.pool-size is set
|
||||
(GH#6482) KoyamaSohei
|
||||
* Deprecations
|
||||
- Restore signature compatibility for dask-gateway (GH#6561)
|
||||
Tom Augspurger
|
||||
- Deprecate the io_loop and loop kwarg to Server, Worker, and
|
||||
Nanny (GH#6473) Thomas Grainger
|
||||
- Deprecate the loop kwarg to Scheduler (GH#6443) Thomas
|
||||
Grainger
|
||||
- Release 2022.05.2
|
||||
* Enhancements
|
||||
- Add a lock to distributed.profile for better concurrency
|
||||
control (GH#6421) Hendrik Makait
|
||||
- Send SIGKILL after SIGTERM when passing 95% memory (GH#6419)
|
||||
crusaderky
|
||||
* Bug Fixes
|
||||
- Log rather than raise exceptions in preload.teardown()
|
||||
(GH#6458) Matthew Rocklin
|
||||
- Handle failing plugin.close() calls during scheduler shutdown
|
||||
(GH#6450) Matthew Rocklin
|
||||
- Fix slicing bug in ensure_memoryview (GH#6449) jakirkham
|
||||
- Generalize UCX errors on connect() and correct pytest
|
||||
fixtures (GH#6434) Peter Andreas Entschev
|
||||
- Run cluster widget periodic callbacks on the correct event
|
||||
loop (GH#6444) Thomas Grainger
|
||||
- Release 2022.05.1
|
||||
* New Features
|
||||
- Add HTTP API to scheduler (GH#6270) Matthew Murray
|
||||
- Shuffle Service with Scheduler Logic (GH#6007) Matthew
|
||||
Rocklin
|
||||
* Enhancements
|
||||
- Follow-up on removing report and safe from Worker.close
|
||||
(GH#6423) Gabe Joseph
|
||||
- Server close faster (GH#6415) Florian Jetter
|
||||
- Disable HTTP API by default (GH#6420) Jacob Tomlinson
|
||||
- Remove report and safe from Worker.close (GH#6363) Florian
|
||||
Jetter
|
||||
- Allow deserialized plugins in register_scheduler_plugin
|
||||
(GH#6401) Matthew Rocklin
|
||||
- WorkerState are different for different addresses (GH#6398)
|
||||
Florian Jetter
|
||||
- Do not filter tasks before gathering data (GH#6371)
|
||||
crusaderky
|
||||
- Remove worker reconnect (GH#6361) Gabe Joseph
|
||||
- Add SchedulerPlugin.log_event handler (GH#6381) Matthew
|
||||
Rocklin
|
||||
- Ensure occupancy tracking works as expected for long running
|
||||
tasks (GH#6351) Florian Jetter
|
||||
- stimulus_id for all Instructions (GH#6347) crusaderky
|
||||
- Refactor missing-data command (GH#6332) crusaderky
|
||||
- Add idempotent to register_scheduler_plugin client (GH#6328)
|
||||
Alex Ford
|
||||
- Add option to specify a scheduler address for workers to use
|
||||
(GH#5944) Enric Tejedor
|
||||
* Bug Fixes
|
||||
- Remove stray breakpoint (GH#6417) Thomas Grainger
|
||||
- Fix API JSON MIME type (GH#6397) Jacob Tomlinson
|
||||
- Remove wrong assert in handle compute (GH#6370) Florian
|
||||
Jetter
|
||||
- Ensure multiple clients can cancel their key without
|
||||
interference (GH#6016) Florian Jetter
|
||||
- Fix Nanny shutdown assertion (GH#6357) Gabe Joseph
|
||||
- Fix fail_hard for sync functions (GH#6269) Gabe Joseph
|
||||
- Prevent infinite transition loops; more aggressive
|
||||
validate_state() (GH#6318) crusaderky
|
||||
- Ensure cleanup of many GBs of spilled data on terminate
|
||||
(GH#6280) crusaderky
|
||||
- Fix WORKER_ANY_RUNNING regression (GH#6297) Florian Jetter
|
||||
- Race conditions from fetch to compute while AMM requests
|
||||
replica (GH#6248) Florian Jetter
|
||||
- Ensure resumed tasks are not accidentally forgotten (GH#6217)
|
||||
Florian Jetter
|
||||
- Do not allow closing workers to be awaited again (GH#5910)
|
||||
Florian Jetter
|
||||
* Deprecations
|
||||
- Move wait_for_signals to private module and deprecate
|
||||
distributed.cli.utils (GH#6367) Hendrik Makait
|
||||
- Release 2022.05.0
|
||||
* Highlights
|
||||
- This is a bugfix release for this issue.
|
||||
* Enhancements
|
||||
- Handle writeable in buffer_callback (GH#6238) jakirkham
|
||||
- Use .data with NumPy array allocation (GH#6242) jakirkham
|
||||
* Bug Fixes
|
||||
- Close executor in event loop if interpreter is closing
|
||||
(GH#6256) Matthew Rocklin
|
||||
- Release 2022.04.2
|
||||
* Enhancements
|
||||
- Unblock event loop while waiting for ThreadpoolExecutor to
|
||||
shut down (GH#6091) Florian Jetter
|
||||
- RetireWorker policy is done if removed (GH#6234) Gabe Joseph
|
||||
- Pause to disable dependency gathering (GH#6195) crusaderky
|
||||
- Add EOFError to nanny multiprocessing.queue except list
|
||||
(GH#6213) Matthew Rocklin
|
||||
- Re-interpret error in lost worker scenario (GH#6193) Matthew
|
||||
Rocklin
|
||||
- Add Stimulus IDs to Scheduler (GH#6161) Florian Jetter
|
||||
- Set a five minute TTL for Dask workers (GH#6200) Matthew
|
||||
Rocklin
|
||||
- Add distributed.metrics.monotonic (GH#6181) crusaderky
|
||||
- Send worker validation errors to scheduler and err on test
|
||||
completion (GH#6192) Matthew Rocklin
|
||||
- Redesign worker exponential backoff on busy-gather (GH#6173)
|
||||
crusaderky
|
||||
- Log all invalid worker transitions to scheduler (GH#6134)
|
||||
Matthew Rocklin
|
||||
- Make Graph dashboard plot have invisible axes (GH#6149)
|
||||
Matthew Rocklin
|
||||
- Remove Nanny auto_restart state (GH#6138) Matthew Rocklin
|
||||
* Bug Fixes
|
||||
- Ensure scheduler events do not hold on to TaskState objects
|
||||
(GH#6226) Florian Jetter
|
||||
- Allow pausing and choke event loop while spilling (GH#6189)
|
||||
crusaderky
|
||||
- Do not use UUID in stealing (GH#6179) Florian Jetter
|
||||
- Handle int worker names in info page (GH#6158) Brett Naul
|
||||
- Fix psutil dio counters none (GH#6093) ungarj
|
||||
- Join Nanny watch thread (GH#6146) Matthew Rocklin
|
||||
- Improve logging when closing workers (GH#6129) Matthew
|
||||
Rocklin
|
||||
- Avoid stack overflow in profiling (GH#6141) Matthew Rocklin
|
||||
- Clean up SSHCluster if failure to start (GH#6130) Matthew
|
||||
Rocklin
|
||||
* Deprecations
|
||||
- Deprecate rpc synchronous context manager (GH#6171) Thomas
|
||||
Grainger
|
||||
- Release 2022.04.1
|
||||
* New Features
|
||||
- Add KillWorker Plugin (GH#6126) Matthew Rocklin
|
||||
* Enhancements
|
||||
- Sort worker list in info pages (GH#6135) Matthew Rocklin
|
||||
- Add back Worker.transition_fetch_missing (GH#6112) Matthew
|
||||
Rocklin
|
||||
- Log state machine events (GH#6092) crusaderky
|
||||
- Migrate ensure_executing transitions to new WorkerState event
|
||||
mechanism - part 1 (GH#6003) crusaderky
|
||||
- Migrate ensure_executing transitions to new WorkerState event
|
||||
mechanism - part 2 (GH#6062) crusaderky
|
||||
- Annotate worker transitions to error (GH#6012) crusaderky
|
||||
* Bug Fixes
|
||||
- Avoid transitioning from memory/released to missing in worker
|
||||
(GH#6123) Matthew Rocklin
|
||||
- Don’t try to reconnect client on interpreter shutdown
|
||||
(GH#6120) Matthew Rocklin
|
||||
- Wrap UCX init warnings in importable functions (GH#6121)
|
||||
Charles Blackmon-Luca
|
||||
- Cancel asyncio tasks on worker close (GH#6098) crusaderky
|
||||
- Avoid port collisions when defining port ranges (GH#6054)
|
||||
crusaderky
|
||||
- Release 2022.04.0
|
||||
* This is the first release with support for Python 3.10
|
||||
* New Features
|
||||
- Add Python 3.10 support (GH#5952) Thomas Grainger
|
||||
- New cluster dump utilities (GH#5920) Simon Perkins
|
||||
- New ClusterDump SchedulerPlugin for dumping cluster state on
|
||||
close (GH#5983) Simon Perkins
|
||||
- Track Event Loop intervals in dashboard plot (GH#5964)
|
||||
Matthew Rocklin
|
||||
- ToPickle - Unpickle on the Scheduler (GH#5728) Mads R. B.
|
||||
Kristensen
|
||||
* Enhancements
|
||||
- Retry on transient error codes in preload (GH#5982) Matthew
|
||||
Rocklin
|
||||
- More idiomatic mypy configuration (GH#6022) crusaderky
|
||||
- Name extensions and enable extension heartbeats (GH#5957)
|
||||
Matthew Rocklin
|
||||
- Better error message on misspelled executor annotation
|
||||
(GH#6009) crusaderky
|
||||
- Clarify that SchedulerPlugin must be subclassed (GH#6008)
|
||||
crusaderky
|
||||
- Remove duplication from stealing (GH#5787) Duncan McGregor
|
||||
- Remove cache in iscoroutinefunction to avoid holding on to
|
||||
refs (GH#5985) Florian Jetter
|
||||
- Add title to individual plots (GH#5967) Matthew Rocklin
|
||||
- Specify average in timeseries titles (GH#5974) Matthew
|
||||
Rocklin
|
||||
* Bug Fixes
|
||||
- Do not catch CancelledError in CommPool (GH#6005) Florian
|
||||
Jetter
|
||||
* Deprecations
|
||||
- Remove distributed._ipython_utils and dependents (GH#6036)
|
||||
Thomas Grainger
|
||||
- Remove support for PyPy (GH#6029) James Bourbeau
|
||||
- Drop runtime dependency to setuptools (GH#6017) crusaderky
|
||||
- Remove heartbeats from events (GH#5989) Matthew Rocklin
|
||||
- Cythonization has been abandoned upstream: Back to noarch.
|
||||
- Add distributed-ignore-offline.patch
|
||||
- Drop distributed-pr5952-py310.patch
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Fri Mar 25 19:18:11 UTC 2022 - Ben Greiner <code@bnavigator.de>
|
||||
|
||||
|
@@ -40,34 +40,22 @@
|
||||
%bcond_with test
|
||||
%endif
|
||||
|
||||
%ifarch %{ix86} %{arm}
|
||||
# cython optimizations not supported on 32-bit: https://github.com/dask/dask/issues/7489
|
||||
%bcond_with cythonize
|
||||
%else
|
||||
%bcond_without cythonize
|
||||
%endif
|
||||
%if %{with cythonize}
|
||||
%define cythonize --with-cython
|
||||
%endif
|
||||
|
||||
# use this to run tests with xdist in parallel, unfortunately fails server side
|
||||
%bcond_with paralleltests
|
||||
|
||||
%{?!python_module:%define python_module() python3-%{**}}
|
||||
%define skip_python2 1
|
||||
# ===> Note: python-dask MUST be updated in sync with python-distributed! <===
|
||||
%define ghversiontag 2022.03.0
|
||||
Name: python-distributed%{psuffix}
|
||||
# ===> Note: python-dask MUST be updated in sync with python-distributed! <===
|
||||
Version: 2022.3.0
|
||||
Version: 2022.6.1
|
||||
Release: 0
|
||||
Summary: Library for distributed computing with Python
|
||||
License: BSD-3-Clause
|
||||
URL: https://distributed.dask.org
|
||||
Source: https://github.com/dask/distributed/archive/refs/tags/%{ghversiontag}.tar.gz#/distributed-%{ghversiontag}-gh.tar.gz
|
||||
Source: https://github.com/dask/distributed/archive/refs/tags/%{version}.tar.gz#/distributed-%{version}-gh.tar.gz
|
||||
Source99: python-distributed-rpmlintrc
|
||||
# PATCH-FIX-UPSTREAM distributed-pr5952-py310.patch -- gh#dask/distributed#5952
|
||||
Patch1: distributed-pr5952-py310.patch
|
||||
# PATCH-FIX-OPENSUSE distributed-ignore-off.patch -- ignore that we can't probe addresses on obs, code@bnavigator.de
|
||||
Patch1: distributed-ignore-offline.patch
|
||||
# PATCH-FIX-OPENSUSE distributed-ignore-thread-leaks.patch -- ignore leaking threads on obs, code@bnavigator.de
|
||||
Patch2: distributed-ignore-thread-leaks.patch
|
||||
BuildRequires: %{python_module base >= 3.8}
|
||||
@@ -79,49 +67,36 @@ Requires: python-certifi
|
||||
Requires: python-click >= 6.6
|
||||
Requires: python-cloudpickle >= 1.5.0
|
||||
Requires: python-dask = %{version}
|
||||
Requires: python-msgpack
|
||||
Requires: python-locket >= 1.0.0
|
||||
Requires: python-msgpack >= 0.6.0
|
||||
Requires: python-packaging >= 20.0
|
||||
Requires: python-psutil >= 5.0
|
||||
Requires: python-sortedcontainers
|
||||
Requires: python-tblib
|
||||
Requires: python-toolz >= 0.8.2
|
||||
Requires: python-tornado >= 6.0.3
|
||||
Requires: python-urllib3
|
||||
Requires: python-zict >= 0.1.3
|
||||
Requires(post): update-alternatives
|
||||
Requires(postun):update-alternatives
|
||||
%if %{with cythonize}
|
||||
BuildRequires: %{python_module Cython}
|
||||
# the cythonized scheduler needs Cython also as runtime dep for some checks
|
||||
Requires: python-Cython
|
||||
%endif
|
||||
%if %{with test}
|
||||
BuildRequires: %{python_module PyYAML}
|
||||
BuildRequires: %{python_module bokeh}
|
||||
BuildRequires: %{python_module certifi}
|
||||
BuildRequires: %{python_module click >= 6.6}
|
||||
BuildRequires: %{python_module cloudpickle >= 1.5.0}
|
||||
BuildRequires: %{python_module dask-all = %{version}}
|
||||
BuildRequires: %{python_module distributed = %{version}}
|
||||
BuildRequires: %{python_module ipykernel}
|
||||
BuildRequires: %{python_module ipython}
|
||||
BuildRequires: %{python_module jupyter_client}
|
||||
BuildRequires: %{python_module msgpack}
|
||||
BuildRequires: %{python_module psutil}
|
||||
BuildRequires: %{python_module pytest-asyncio >= 0.17.2}
|
||||
BuildRequires: %{python_module pytest-rerunfailures}
|
||||
BuildRequires: %{python_module pytest-timeout}
|
||||
BuildRequires: %{python_module pytest}
|
||||
BuildRequires: %{python_module requests}
|
||||
BuildRequires: %{python_module sortedcontainers}
|
||||
BuildRequires: %{python_module sparse}
|
||||
BuildRequires: %{python_module tblib}
|
||||
BuildRequires: %{python_module toolz >= 0.8.2}
|
||||
BuildRequires: %{python_module tornado >= 6.0.3}
|
||||
BuildRequires: %{python_module zict >= 0.1.3}
|
||||
%if %{with paralleltests}
|
||||
BuildRequires: %{python_module pytest-xdist}
|
||||
%endif
|
||||
%endif
|
||||
BuildArch: noarch
|
||||
%python_subpackages
|
||||
|
||||
%description
|
||||
@@ -130,45 +105,61 @@ extends both the concurrent.futures and dask APIs to moderate sized
|
||||
clusters.
|
||||
|
||||
%prep
|
||||
%autosetup -p1 -n distributed-%{ghversiontag}
|
||||
%autosetup -p1 -n distributed-%{version}
|
||||
|
||||
sed -i -e '/addopts/ {s/--durations=20//; s/--color=yes//}' \
|
||||
-e 's/timeout_method = thread/timeout_method = signal/' setup.cfg
|
||||
sed -e '/--durations=20/d' \
|
||||
-e '/--color=yes/d' \
|
||||
-e 's/timeout_method = thread/timeout_method = signal/' \
|
||||
-i setup.cfg
|
||||
|
||||
%build
|
||||
%if ! %{with test}
|
||||
%python_build %{?cythonize}
|
||||
%python_build
|
||||
%endif
|
||||
|
||||
%install
|
||||
%if ! %{with test}
|
||||
%python_install %{?cythonize}
|
||||
%python_install
|
||||
%python_clone -a %{buildroot}%{_bindir}/dask-ssh
|
||||
%python_clone -a %{buildroot}%{_bindir}/dask-scheduler
|
||||
%python_clone -a %{buildroot}%{_bindir}/dask-worker
|
||||
%python_expand %fdupes %{buildroot}%{$python_sitearch}
|
||||
%python_expand %fdupes %{buildroot}%{$python_sitelib}
|
||||
%endif
|
||||
|
||||
%if %{with test}
|
||||
%check
|
||||
# test local src dir, not installed path: looks for test certificates and not installed test modules
|
||||
export PYTHONPATH=":x"
|
||||
# disable profiling completely -- https://github.com/dask/distributed/pull/6490
|
||||
sed '/enable profiling/ {s/enabled: True/enabled: False/}' -i distributed/distributed.yaml
|
||||
# make sure the change was successful, this is only for the tests, we didn't patch any installed source
|
||||
grep 'enabled: False .*enable profiling' distributed/distributed.yaml
|
||||
|
||||
# we obviously don't test a git repo
|
||||
donttest="test_git_revision"
|
||||
# logger error
|
||||
donttest+=" or test_version_warning_in_cluster"
|
||||
# invalid task state
|
||||
donttest+=" or test_fail_to_pickle_target_2"
|
||||
|
||||
# Some tests randomly fail server-side -- too slow for obs (?)
|
||||
# see also https://github.com/dask/distributed/issues/5818
|
||||
donttest+=" or (test_asyncprocess and test_exit_callback)"
|
||||
donttest+=" or (test_asyncprocess and (test_exit_callback or test_simple))"
|
||||
donttest+=" or (test_client and test_repr)"
|
||||
donttest+=" or (test_client and test_profile_server)"
|
||||
donttest+=" or (test_metrics and test_wall_clock)"
|
||||
donttest+=" or (test_priorities and test_compute)"
|
||||
donttest+=" or (test_resources and test_prefer_constrained)"
|
||||
donttest+=" or (test_steal and test_steal_twice)"
|
||||
donttest+=" or (test_worker and test_gather_dep_one_worker_always_busy)"
|
||||
donttest+=" or (test_variable and test_variable_in_task)"
|
||||
donttest+=" or (test_worker and test_worker_reconnects_mid_compute)"
|
||||
# server-side fail due to the non-network warning in a subprocess where the patched filter does not apply
|
||||
donttest+=" or (test_client and test_quiet_close_process)"
|
||||
|
||||
# Exception messages not caught -- https://github.com/dask/distributed/issues/5460#issuecomment-1079432890
|
||||
python310_donttest+=" or test_exception_text"
|
||||
python310_donttest+=" or test_worker_bad_args"
|
||||
python310_donttest+=" or test_run_spec_deserialize_fail"
|
||||
|
||||
if [[ $(getconf LONG_BIT) -eq 32 ]]; then
|
||||
# OverflowError -- https://github.com/dask/distributed/issues/5252
|
||||
@@ -183,11 +174,20 @@ fi
|
||||
notparallel="rebalance or memory or upload"
|
||||
notparallel+=" or test_open_close_many_workers"
|
||||
notparallel+=" or test_recreate_error_array"
|
||||
notparallel+=" or (test_preload and test_web_preload_worker)"
|
||||
%pytest_arch distributed/tests -m "not avoid_ci" -n auto -k "not ($notparallel or $donttest ${$python_donttest})"
|
||||
%pytest_arch distributed/tests -m "not avoid_ci" -k "($notparallel) and (not ($donttest ${$python_donttest}))"
|
||||
notparallel+=" or (test_preload and test_web_preload)"
|
||||
# Recursion error, https://github.com/dask/distributed/issues/6406
|
||||
notparallel+=" or test_stack_overflow"
|
||||
#
|
||||
notparallel+=" or test_dashboard_host"
|
||||
notparallel+=" or test_close_properly"
|
||||
notparallel+=" or test_popen_timeout"
|
||||
notparallel+=" or test_plugin_internal_exception"
|
||||
notparallel+=" or test_runspec_regression_sync"
|
||||
|
||||
%pytest distributed/tests -m "not avoid_ci" -n auto -k "not ($notparallel or $donttest ${$python_donttest})"
|
||||
%pytest distributed/tests -m "not avoid_ci" -k "($notparallel) and not ($donttest ${$python_donttest})"
|
||||
%else
|
||||
%pytest_arch distributed/tests -m "not avoid_ci" -k "not ($donttest ${$python_donttest})" --reruns 3 --reruns-delay 3
|
||||
%pytest distributed/tests -m "not avoid_ci" -k "not ($donttest ${$python_donttest})" --reruns 3 --reruns-delay 3
|
||||
%endif
|
||||
%endif
|
||||
|
||||
@@ -204,8 +204,8 @@ notparallel+=" or (test_preload and test_web_preload_worker)"
|
||||
%python_alternative %{_bindir}/dask-ssh
|
||||
%python_alternative %{_bindir}/dask-scheduler
|
||||
%python_alternative %{_bindir}/dask-worker
|
||||
%{python_sitearch}/distributed
|
||||
%{python_sitearch}/distributed-%{version}*-info
|
||||
%{python_sitelib}/distributed
|
||||
%{python_sitelib}/distributed-%{version}*-info
|
||||
|
||||
%endif
|
||||
|
||||
|
Reference in New Issue
Block a user