forked from pool/python-eventlet
Accepting request 828006 from devel:languages:python
OBS-URL: https://build.opensuse.org/request/show/828006 OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/python-eventlet?expand=0&rev=35
This commit is contained in:
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:4c8ab42c51bff55204fef43cff32616558bedbc7538d876bb6a96ce820c7f9ed
|
||||
size 397545
|
3
eventlet-0.26.1.tar.gz
Normal file
3
eventlet-0.26.1.tar.gz
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:4f4a43366b4cbd4a3f2f231816e5c3dae8ab316df9b7da11f0525e2800559f33
|
||||
size 398200
|
42
newdnspython.patch
Normal file
42
newdnspython.patch
Normal file
@@ -0,0 +1,42 @@
|
||||
--- a/eventlet/support/greendns.py
|
||||
+++ b/eventlet/support/greendns.py
|
||||
@@ -313,7 +313,7 @@ class ResolverProxy(object):
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
- self._resolver = dns.resolver.Resolver(filename=self._filename)
|
||||
+ self._resolver = dns.resolver.Resolver(filename=self._filename, configure=False)
|
||||
self._resolver.cache = dns.resolver.LRUCache()
|
||||
|
||||
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
|
||||
--- a/tests/greendns_test.py
|
||||
+++ b/tests/greendns_test.py
|
||||
@@ -885,7 +885,7 @@ class TinyDNSTests(tests.LimitedTestCase
|
||||
# https://github.com/eventlet/eventlet/issues/499
|
||||
# None means we don't want the server to find the IP
|
||||
with tests.dns_tcp_server(None) as dnsaddr:
|
||||
- resolver = Resolver()
|
||||
+ resolver = Resolver(configure=False)
|
||||
resolver.nameservers = [dnsaddr[0]]
|
||||
resolver.nameserver_ports[dnsaddr[0]] = dnsaddr[1]
|
||||
|
||||
@@ -896,7 +896,7 @@ class TinyDNSTests(tests.LimitedTestCase
|
||||
# https://github.com/eventlet/eventlet/issues/499
|
||||
expected_ip = "192.168.1.1"
|
||||
with tests.dns_tcp_server(expected_ip) as dnsaddr:
|
||||
- resolver = Resolver()
|
||||
+ resolver = Resolver(configure=False)
|
||||
resolver.nameservers = [dnsaddr[0]]
|
||||
resolver.nameserver_ports[dnsaddr[0]] = dnsaddr[1]
|
||||
response = resolver.query('host.example.com', 'a', tcp=True)
|
||||
--- a/setup.py
|
||||
+++ b/setup.py
|
||||
@@ -15,7 +15,7 @@ setuptools.setup(
|
||||
url='http://eventlet.net',
|
||||
packages=setuptools.find_packages(exclude=['benchmarks', 'tests', 'tests.*']),
|
||||
install_requires=(
|
||||
- 'dnspython >= 1.15.0, < 2.0.0',
|
||||
+ 'dnspython >= 1.15.0',
|
||||
'greenlet >= 0.3',
|
||||
'monotonic >= 1.4',
|
||||
'six >= 1.10.0',
|
51
pr_639.patch
Normal file
51
pr_639.patch
Normal file
@@ -0,0 +1,51 @@
|
||||
commit 46fc185c8f92008c65aef2713fc1445bfc5f6fec
|
||||
Author: Rodolfo Alonso Hernandez <ralonsoh@redhat.com>
|
||||
Date: Mon Jul 20 17:21:30 2020 +0000
|
||||
|
||||
Replace dnspython "_compute_expiration" by "_compute_times".
|
||||
|
||||
In dnspython v2.0.0, "_compute_expiration" was replaced by
|
||||
"_compute_times". Once the minimum version of dnspython is
|
||||
v2.0.0, we can remove this wrapping method.
|
||||
|
||||
Closes-Bug: #1888258
|
||||
Fixes: #629
|
||||
|
||||
diff --git a/eventlet/support/greendns.py b/eventlet/support/greendns.py
|
||||
index 13968c2..19b83c9 100644
|
||||
--- a/eventlet/support/greendns.py
|
||||
+++ b/eventlet/support/greendns.py
|
||||
@@ -118,6 +118,15 @@ def is_ip_addr(host):
|
||||
return is_ipv4_addr(host) or is_ipv6_addr(host)
|
||||
|
||||
|
||||
+def compute_expiration(query, timeout):
|
||||
+ # NOTE(ralonsoh): in dnspython v2.0.0, "_compute_expiration" was replaced
|
||||
+ # by "_compute_times".
|
||||
+ if hasattr(query, '_compute_expiration'):
|
||||
+ return query._compute_expiration(timeout)
|
||||
+ else:
|
||||
+ return query._compute_times(timeout)[1]
|
||||
+
|
||||
+
|
||||
class HostsAnswer(dns.resolver.Answer):
|
||||
"""Answer class for HostsResolver object"""
|
||||
|
||||
@@ -709,7 +718,7 @@ def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
|
||||
s = socket.socket(af, socket.SOCK_DGRAM)
|
||||
s.settimeout(timeout)
|
||||
try:
|
||||
- expiration = dns.query._compute_expiration(timeout)
|
||||
+ expiration = compute_expiration(dns.query, timeout)
|
||||
if source is not None:
|
||||
s.bind(source)
|
||||
while True:
|
||||
@@ -802,7 +811,7 @@ def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
|
||||
s = socket.socket(af, socket.SOCK_STREAM)
|
||||
s.settimeout(timeout)
|
||||
try:
|
||||
- expiration = dns.query._compute_expiration(timeout)
|
||||
+ expiration = compute_expiration(dns.query, timeout)
|
||||
if source is not None:
|
||||
s.bind(source)
|
||||
while True:
|
@@ -1,3 +1,46 @@
|
||||
-------------------------------------------------------------------
|
||||
Wed Aug 19 15:28:03 UTC 2020 - Benjamin Greiner <code@bnavigator.de>
|
||||
|
||||
- do not run test on python 2 (sadly no real macro expansion
|
||||
possible to allow run but failok with ||:)
|
||||
- disable test discovery dir recursion gh#eventlet/eventlet#638
|
||||
- simplify the pytest call, no test file deletions
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Mon Aug 17 09:38:44 UTC 2020 - John Vandenberg <jayvdb@gmail.com>
|
||||
|
||||
- Add pr_639.patch which fixes eventlet using dnspython 2.0.0
|
||||
- Add remove_nose_part_2.patch to complete the removal of nose
|
||||
- Activate test suite with 958 test cases passing on Tumbleweed
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Mon Aug 17 07:49:25 UTC 2020 - Matej Cepl <mcepl@suse.com>
|
||||
|
||||
- Don't limit the upper version of dnspython.
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Fri Aug 14 21:30:16 UTC 2020 - Matej Cepl <mcepl@suse.com>
|
||||
|
||||
- Add newdnspython.patch which makes eventlet work with new
|
||||
dnspython 2.0.0.
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Thu Aug 13 16:07:08 UTC 2020 - Matej Cepl <mcepl@suse.com>
|
||||
|
||||
- Add remove_nose.patch to remove dependency on nose (gh#eventlet/eventlet#638).
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Thu Aug 13 14:07:43 UTC 2020 - Dirk Mueller <dmueller@suse.com>
|
||||
|
||||
- update to 0.26.1:
|
||||
* pin dnspython <2.0.0 https://github.com/eventlet/eventlet/issues/619
|
||||
* Fix compatibility with SSLContext usage >= Python 3.7
|
||||
* wsgi: Fix header capitalization on py3
|
||||
* Fix #508: Py37 Deadlock ThreadPoolExecutor (#598)
|
||||
* drop Python 3.4 support
|
||||
* Fix misc SyntaxWarning's under Python 3.8
|
||||
* Remove unnecessary assignment in _recv_loop (#601)
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Wed Apr 29 10:49:14 UTC 2020 - Dirk Mueller <dmueller@suse.com>
|
||||
|
||||
|
@@ -18,33 +18,44 @@
|
||||
|
||||
%{?!python_module:%define python_module() python-%{**} python3-%{**}}
|
||||
Name: python-eventlet
|
||||
Version: 0.25.2
|
||||
Version: 0.26.1
|
||||
Release: 0
|
||||
Summary: Concurrent networking library for Python
|
||||
License: MIT
|
||||
Group: Development/Languages/Python
|
||||
URL: http://eventlet.net
|
||||
Source: https://files.pythonhosted.org/packages/source/e/eventlet/eventlet-%{version}.tar.gz
|
||||
# PATCH-FEATURE-UPSTREAM remove_nose.patch gh#eventlet/eventlet#638 mcepl@suse.com
|
||||
# Removes dependency on nose
|
||||
Patch0: remove_nose.patch
|
||||
# PATCH-FIX-UPSTREAM newdnspython.patch gh#eventlet/eventlet#638 mcepl@suse.com
|
||||
# patch is from gh#rthalley/dnspython#519
|
||||
Patch1: newdnspython.patch
|
||||
# PATCH-FEATURE-UPSTREAM pr_639.patch gh#eventlet/eventlet#639 jayvdb@gmail.com
|
||||
Patch2: pr_639.patch
|
||||
# Really remove the dependency on nose
|
||||
Patch3: remove_nose_part_2.patch
|
||||
BuildRequires: %{python_module dnspython >= 1.15.0}
|
||||
BuildRequires: %{python_module greenlet >= 0.3}
|
||||
BuildRequires: %{python_module monotonic >= 1.4}
|
||||
BuildRequires: %{python_module nose}
|
||||
BuildRequires: %{python_module pyOpenSSL}
|
||||
BuildRequires: %{python_module pytest}
|
||||
BuildRequires: %{python_module pyzmq}
|
||||
BuildRequires: %{python_module setuptools}
|
||||
BuildRequires: %{python_module six >= 1.10.0}
|
||||
BuildRequires: %{python_module testsuite}
|
||||
BuildRequires: fdupes
|
||||
BuildRequires: netcfg
|
||||
BuildRequires: python-rpm-macros
|
||||
# eventlet parses /etc/protocols which is not available in normal build envs
|
||||
# Tests
|
||||
BuildRequires: sysconfig-netconfig
|
||||
Requires: netcfg
|
||||
Requires: python-dnspython >= 1.15.0
|
||||
Requires: python-greenlet >= 0.3
|
||||
Requires: python-monotonic >= 1.4
|
||||
Requires: python-six >= 1.10.0
|
||||
BuildArch: noarch
|
||||
%if %{python_version_nodots} < 34
|
||||
Requires: python-enum34
|
||||
%endif
|
||||
%python_subpackages
|
||||
|
||||
%description
|
||||
@@ -59,7 +70,11 @@ interpreter, or as part of a larger application.
|
||||
|
||||
%prep
|
||||
%setup -q -n eventlet-%{version}
|
||||
%autopatch -p1
|
||||
|
||||
sed -i "s|^#!.*||" eventlet/support/greendns.py # Fix non-executable script
|
||||
# https://github.com/eventlet/eventlet/issues/638
|
||||
sed -i "/assert num_readers/ i \ return" tests/__init__.py
|
||||
|
||||
%build
|
||||
%python_build
|
||||
@@ -69,8 +84,22 @@ sed -i "s|^#!.*||" eventlet/support/greendns.py # Fix non-executable script
|
||||
%python_expand %fdupes %{buildroot}%{$python_sitelib}
|
||||
|
||||
%check
|
||||
# 400 out of 600 tests either fail or error out
|
||||
#%%python_expand PYTHONPATH=%{buildroot}%{$python_sitelib} nosetests-%{$python_bin_suffix} -v
|
||||
# python2 is required to build for Leap, but tests fail (even upstream)
|
||||
python2_skipall='--collect-only'
|
||||
# dnspython 1 and 2: backdoor tests fail with "take too long"
|
||||
skiptests="(BackdoorTest and test_server)"
|
||||
# fail only with dnspython 2:
|
||||
skiptests+=" or test_dns_methods_are_green or test_noraise_dns_tcp"
|
||||
|
||||
# Unknown openSUSE 15.x specific errors
|
||||
# TypeError: _wrap_socket() argument 1 must be _socket.socket, not SSLSocket
|
||||
# https://github.com/rthalley/dnspython/issues/559#issuecomment-675274960
|
||||
%if %python3_version_nodots == 36
|
||||
skiptests+=" or test_connect_ssl or test_ssl_sending_messages or test_wrap_ssl"
|
||||
skiptests+=" or ssl_test or wsgi_test"
|
||||
%endif
|
||||
# no subdir recursion https://github.com/eventlet/eventlet/issues/638#issuecomment-676085599
|
||||
%pytest -o norecursedirs="tests/*" -k "not ($skiptests)" ${$python_skipall}
|
||||
|
||||
%files %{python_files}
|
||||
%license LICENSE
|
||||
|
125
remove_nose.patch
Normal file
125
remove_nose.patch
Normal file
@@ -0,0 +1,125 @@
|
||||
--- a/setup.py
|
||||
+++ b/setup.py
|
||||
@@ -27,7 +27,7 @@ setuptools.setup(
|
||||
'README.rst'
|
||||
)
|
||||
).read(),
|
||||
- test_suite='nose.collector',
|
||||
+ test_suite='tests',
|
||||
classifiers=[
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
--- a/eventlet.egg-info/SOURCES.txt
|
||||
+++ b/eventlet.egg-info/SOURCES.txt
|
||||
@@ -174,7 +174,6 @@ tests/greenthread_test.py
|
||||
tests/hub_test.py
|
||||
tests/mock.py
|
||||
tests/mysqldb_test.py
|
||||
-tests/nosewrapper.py
|
||||
tests/openssl_test.py
|
||||
tests/os_test.py
|
||||
tests/parse_results.py
|
||||
@@ -270,4 +269,4 @@ tests/stdlib/test_threading_local.py
|
||||
tests/stdlib/test_timeout.py
|
||||
tests/stdlib/test_urllib.py
|
||||
tests/stdlib/test_urllib2.py
|
||||
-tests/stdlib/test_urllib2_localnet.py
|
||||
\ No newline at end of file
|
||||
+tests/stdlib/test_urllib2_localnet.py
|
||||
--- a/tests/greenio_test.py
|
||||
+++ b/tests/greenio_test.py
|
||||
@@ -9,8 +9,6 @@ import socket as _orig_sock
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
-from nose.tools import eq_
|
||||
-
|
||||
import eventlet
|
||||
from eventlet import event, greenio, debug
|
||||
from eventlet.hubs import get_hub
|
||||
@@ -39,7 +37,7 @@ def expect_socket_timeout(function, *arg
|
||||
raise AssertionError("socket.timeout not raised")
|
||||
except socket.timeout as e:
|
||||
assert hasattr(e, 'args')
|
||||
- eq_(e.args[0], 'timed out')
|
||||
+ assert e.args[0] == 'timed out'
|
||||
|
||||
|
||||
def min_buf_size():
|
||||
@@ -674,8 +672,8 @@ class TestGreenSocket(tests.LimitedTestC
|
||||
sender.sendto(b'second', 0, address)
|
||||
|
||||
sender_address = ('127.0.0.1', sender.getsockname()[1])
|
||||
- eq_(receiver.recvfrom(1024), (b'first', sender_address))
|
||||
- eq_(receiver.recvfrom(1024), (b'second', sender_address))
|
||||
+ assert receiver.recvfrom(1024) == (b'first', sender_address)
|
||||
+ assert receiver.recvfrom(1024) == (b'second', sender_address)
|
||||
|
||||
|
||||
def test_get_fileno_of_a_socket_works():
|
||||
--- a/tests/nosewrapper.py
|
||||
+++ b/tests/nosewrapper.py
|
||||
@@ -1,20 +1,13 @@
|
||||
""" This script simply gets the paths correct for testing eventlet with the
|
||||
hub extension for Nose."""
|
||||
-import nose
|
||||
from os.path import dirname, realpath, abspath
|
||||
import sys
|
||||
+import unittest
|
||||
|
||||
|
||||
parent_dir = dirname(dirname(realpath(abspath(__file__))))
|
||||
if parent_dir not in sys.path:
|
||||
sys.path.insert(0, parent_dir)
|
||||
|
||||
-# hudson does a better job printing the test results if the exit value is 0
|
||||
-zero_status = '--force-zero-status'
|
||||
-if zero_status in sys.argv:
|
||||
- sys.argv.remove(zero_status)
|
||||
- launch = nose.run
|
||||
-else:
|
||||
- launch = nose.main
|
||||
-
|
||||
-launch(argv=sys.argv)
|
||||
+if __name__ == '__main__':
|
||||
+ unittest.main()
|
||||
--- a/tests/__init__.py
|
||||
+++ b/tests/__init__.py
|
||||
@@ -20,7 +20,7 @@ import sys
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
-from nose.plugins.skip import SkipTest
|
||||
+from unittest import SkipTest
|
||||
|
||||
import eventlet
|
||||
from eventlet import tpool
|
||||
@@ -223,7 +223,6 @@ class LimitedTestCase(unittest.TestCase)
|
||||
def check_idle_cpu_usage(duration, allowed_part):
|
||||
if resource is None:
|
||||
# TODO: use https://code.google.com/p/psutil/
|
||||
- from nose.plugins.skip import SkipTest
|
||||
raise SkipTest('CPU usage testing not supported (`import resource` failed)')
|
||||
|
||||
r1 = resource.getrusage(resource.RUSAGE_SELF)
|
||||
--- a/tests/dagpool_test.py
|
||||
+++ b/tests/dagpool_test.py
|
||||
@@ -5,7 +5,6 @@
|
||||
@brief Test DAGPool class
|
||||
"""
|
||||
|
||||
-from nose.tools import *
|
||||
import eventlet
|
||||
from eventlet.dagpool import DAGPool, Collision, PropagateError
|
||||
import six
|
||||
@@ -13,8 +12,8 @@ from contextlib import contextmanager
|
||||
import itertools
|
||||
|
||||
|
||||
-# Not all versions of nose.tools.assert_raises() support the usage in this
|
||||
-# module, but it's straightforward enough to code that explicitly.
|
||||
+# Not all versions of assert_raises() support the usage in this module,
|
||||
+# but it's straightforward enough to code that explicitly.
|
||||
@contextmanager
|
||||
def assert_raises(exc):
|
||||
"""exc is an exception class"""
|
301
remove_nose_part_2.patch
Normal file
301
remove_nose_part_2.patch
Normal file
@@ -0,0 +1,301 @@
|
||||
diff -ur eventlet-0.26.1-orig/tests/dagpool_test.py eventlet-0.26.1/tests/dagpool_test.py
|
||||
--- eventlet-0.26.1-orig/tests/dagpool_test.py 2020-08-17 16:48:04.393065291 +0700
|
||||
+++ eventlet-0.26.1/tests/dagpool_test.py 2020-08-17 16:48:42.049502450 +0700
|
||||
@@ -162,7 +162,7 @@
|
||||
# a set. Make a set containing its elements.
|
||||
setlist.append(set(subseq))
|
||||
# Now that we've massaged 'sequence' into 'setlist', compare.
|
||||
- assert_equal(self.sequence, setlist)
|
||||
+ assert self.sequence == setlist
|
||||
|
||||
|
||||
# ****************************************************************************
|
||||
@@ -190,14 +190,14 @@
|
||||
with check_no_suspend():
|
||||
results = pool.waitall()
|
||||
# with no spawn() or post(), waitall() returns preload data
|
||||
- assert_equals(results, dict(a=1, b=2, c=3))
|
||||
+ assert results == dict(a=1, b=2, c=3)
|
||||
|
||||
# preload sequence of pairs
|
||||
pool = DAGPool([("d", 4), ("e", 5), ("f", 6)])
|
||||
# this must not hang
|
||||
with check_no_suspend():
|
||||
results = pool.waitall()
|
||||
- assert_equals(results, dict(d=4, e=5, f=6))
|
||||
+ assert results == dict(d=4, e=5, f=6)
|
||||
|
||||
|
||||
def test_wait_each_empty():
|
||||
@@ -215,10 +215,10 @@
|
||||
with check_no_suspend():
|
||||
# wait_each() may deliver in arbitrary order; collect into a dict
|
||||
# for comparison
|
||||
- assert_equals(dict(pool.wait_each("abc")), dict(a=1, b=2, c=3))
|
||||
+ assert dict(pool.wait_each("abc")) == dict(a=1, b=2, c=3)
|
||||
|
||||
# while we're at it, test wait() for preloaded keys
|
||||
- assert_equals(pool.wait("bc"), dict(b=2, c=3))
|
||||
+ assert pool.wait("bc") == dict(b=2, c=3)
|
||||
|
||||
|
||||
def post_each(pool, capture):
|
||||
@@ -256,7 +256,7 @@
|
||||
eventlet.spawn(post_each, pool, capture)
|
||||
gotten = pool.wait("bcdefg")
|
||||
capture.add("got all")
|
||||
- assert_equals(gotten,
|
||||
+ assert (gotten ==
|
||||
dict(b=2, c=3,
|
||||
d="dval", e="eval",
|
||||
f="fval", g="gval"))
|
||||
@@ -284,7 +284,7 @@
|
||||
pool = DAGPool()
|
||||
pool.spawn("a", (), lambda key, results: "aval")
|
||||
# hasn't yet even started
|
||||
- assert_equals(pool.get("a"), None)
|
||||
+ assert pool.get("a") == None
|
||||
with assert_raises(Collision):
|
||||
# Attempting to spawn again with same key should collide even if the
|
||||
# first spawned greenthread hasn't yet had a chance to run.
|
||||
@@ -292,7 +292,7 @@
|
||||
# now let the spawned eventlet run
|
||||
eventlet.sleep(0)
|
||||
# should have finished
|
||||
- assert_equals(pool.get("a"), "aval")
|
||||
+ assert pool.get("a") == "aval"
|
||||
with assert_raises(Collision):
|
||||
# Attempting to spawn with same key collides even when the greenthread
|
||||
# has completed.
|
||||
@@ -323,60 +323,60 @@
|
||||
capture.step()
|
||||
# but none of them has yet produced a result
|
||||
for k in "defgh":
|
||||
- assert_equals(pool.get(k), None)
|
||||
- assert_equals(set(pool.keys()), set("abc"))
|
||||
- assert_equals(dict(pool.items()), dict(a=1, b=2, c=3))
|
||||
- assert_equals(pool.running(), 5)
|
||||
- assert_equals(set(pool.running_keys()), set("defgh"))
|
||||
- assert_equals(pool.waiting(), 1)
|
||||
- assert_equals(pool.waiting_for(), dict(h=set("defg")))
|
||||
- assert_equals(pool.waiting_for("d"), set())
|
||||
- assert_equals(pool.waiting_for("c"), set())
|
||||
+ assert pool.get(k) == None
|
||||
+ assert set(pool.keys()) == set("abc")
|
||||
+ assert dict(pool.items()) == dict(a=1, b=2, c=3)
|
||||
+ assert pool.running() == 5
|
||||
+ assert set(pool.running_keys()) == set("defgh")
|
||||
+ assert pool.waiting() == 1
|
||||
+ assert pool.waiting_for() == dict(h=set("defg"))
|
||||
+ assert pool.waiting_for("d") == set()
|
||||
+ assert pool.waiting_for("c") == set()
|
||||
with assert_raises(KeyError):
|
||||
pool.waiting_for("j")
|
||||
- assert_equals(pool.waiting_for("h"), set("defg"))
|
||||
+ assert pool.waiting_for("h") == set("defg")
|
||||
|
||||
# let one of the upstream greenthreads complete
|
||||
events["f"].send("fval")
|
||||
spin()
|
||||
capture.step()
|
||||
- assert_equals(pool.get("f"), "fval")
|
||||
- assert_equals(set(pool.keys()), set("abcf"))
|
||||
- assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, f="fval"))
|
||||
- assert_equals(pool.running(), 4)
|
||||
- assert_equals(set(pool.running_keys()), set("degh"))
|
||||
- assert_equals(pool.waiting(), 1)
|
||||
- assert_equals(pool.waiting_for("h"), set("deg"))
|
||||
+ assert pool.get("f") == "fval"
|
||||
+ assert set(pool.keys()) == set("abcf")
|
||||
+ assert dict(pool.items()) == dict(a=1, b=2, c=3, f="fval")
|
||||
+ assert pool.running() == 4
|
||||
+ assert set(pool.running_keys()) == set("degh")
|
||||
+ assert pool.waiting() == 1
|
||||
+ assert pool.waiting_for("h") == set("deg")
|
||||
|
||||
# now two others
|
||||
events["e"].send("eval")
|
||||
events["g"].send("gval")
|
||||
spin()
|
||||
capture.step()
|
||||
- assert_equals(pool.get("e"), "eval")
|
||||
- assert_equals(pool.get("g"), "gval")
|
||||
- assert_equals(set(pool.keys()), set("abcefg"))
|
||||
- assert_equals(dict(pool.items()),
|
||||
+ assert pool.get("e") == "eval"
|
||||
+ assert pool.get("g") == "gval"
|
||||
+ assert set(pool.keys()) == set("abcefg")
|
||||
+ assert (dict(pool.items()) ==
|
||||
dict(a=1, b=2, c=3, e="eval", f="fval", g="gval"))
|
||||
- assert_equals(pool.running(), 2)
|
||||
- assert_equals(set(pool.running_keys()), set("dh"))
|
||||
- assert_equals(pool.waiting(), 1)
|
||||
- assert_equals(pool.waiting_for("h"), set("d"))
|
||||
+ assert pool.running() == 2
|
||||
+ assert set(pool.running_keys()) == set("dh")
|
||||
+ assert pool.waiting() == 1
|
||||
+ assert pool.waiting_for("h") == set("d")
|
||||
|
||||
# last one
|
||||
events["d"].send("dval")
|
||||
# make sure both pool greenthreads get a chance to run
|
||||
spin()
|
||||
capture.step()
|
||||
- assert_equals(pool.get("d"), "dval")
|
||||
- assert_equals(set(pool.keys()), set("abcdefgh"))
|
||||
- assert_equals(dict(pool.items()),
|
||||
+ assert pool.get("d") == "dval"
|
||||
+ assert set(pool.keys()) == set("abcdefgh")
|
||||
+ assert (dict(pool.items()) ==
|
||||
dict(a=1, b=2, c=3,
|
||||
d="dval", e="eval", f="fval", g="gval", h="hval"))
|
||||
- assert_equals(pool.running(), 0)
|
||||
- assert_false(pool.running_keys())
|
||||
- assert_equals(pool.waiting(), 0)
|
||||
- assert_equals(pool.waiting_for("h"), set())
|
||||
+ assert pool.running() == 0
|
||||
+ assert not pool.running_keys()
|
||||
+ assert pool.waiting() == 0
|
||||
+ assert pool.waiting_for("h") == set()
|
||||
|
||||
capture.validate([
|
||||
["h got b", "h got c"],
|
||||
@@ -431,13 +431,13 @@
|
||||
spin()
|
||||
# verify that e completed (also that post(key) within greenthread
|
||||
# overrides implicit post of return value, which would be None)
|
||||
- assert_equals(pool.get("e"), "e")
|
||||
+ assert pool.get("e") == "e"
|
||||
|
||||
# With the dependency graph shown above, it is not guaranteed whether b or
|
||||
# c will complete first. Handle either case.
|
||||
sequence = capture.sequence[:]
|
||||
sequence[1:3] = [set([sequence[1].pop(), sequence[2].pop()])]
|
||||
- assert_equals(sequence,
|
||||
+ assert (sequence ==
|
||||
[set(["a done"]),
|
||||
set(["b done", "c done"]),
|
||||
set(["d done"]),
|
||||
@@ -465,7 +465,7 @@
|
||||
for pos in range(len(keys)):
|
||||
# next value from wait_each()
|
||||
k, v = next(each)
|
||||
- assert_equals(k, keys[pos])
|
||||
+ assert k == keys[pos]
|
||||
# advance every pool greenlet as far as it can go
|
||||
spin()
|
||||
# everything from keys[:pos+1] should have a value by now
|
||||
@@ -493,7 +493,7 @@
|
||||
pool.kill("a")
|
||||
# didn't run
|
||||
spin()
|
||||
- assert_equals(pool.get("a"), None)
|
||||
+ assert pool.get("a") == None
|
||||
# killing it forgets about it
|
||||
with assert_raises(KeyError):
|
||||
pool.kill("a")
|
||||
@@ -504,7 +504,7 @@
|
||||
with assert_raises(KeyError):
|
||||
pool.kill("a")
|
||||
# verify it ran to completion
|
||||
- assert_equals(pool.get("a"), 2)
|
||||
+ assert pool.get("a") == 2
|
||||
|
||||
|
||||
def test_post_collision_preload():
|
||||
@@ -532,7 +532,7 @@
|
||||
pool.kill("a")
|
||||
# now we can post
|
||||
pool.post("a", 3)
|
||||
- assert_equals(pool.get("a"), 3)
|
||||
+ assert pool.get("a") == 3
|
||||
|
||||
pool = DAGPool()
|
||||
pool.spawn("a", (), lambda key, result: 4)
|
||||
@@ -552,10 +552,10 @@
|
||||
pool = DAGPool()
|
||||
pool.post("a", 1)
|
||||
pool.post("a", 2, replace=True)
|
||||
- assert_equals(pool.get("a"), 2)
|
||||
- assert_equals(dict(pool.wait_each("a")), dict(a=2))
|
||||
- assert_equals(pool.wait("a"), dict(a=2))
|
||||
- assert_equals(pool["a"], 2)
|
||||
+ assert pool.get("a") == 2
|
||||
+ assert dict(pool.wait_each("a")) == dict(a=2)
|
||||
+ assert pool.wait("a") == dict(a=2)
|
||||
+ assert pool["a"] == 2
|
||||
|
||||
|
||||
def waitfor(capture, pool, key):
|
||||
@@ -597,14 +597,14 @@
|
||||
try:
|
||||
pool.waitall()
|
||||
except PropagateError as err:
|
||||
- assert_equals(err.key, "a")
|
||||
+ assert err.key == "a"
|
||||
assert isinstance(err.exc, BogusError), \
|
||||
"exc attribute is {0}, not BogusError".format(err.exc)
|
||||
- assert_equals(str(err.exc), "bogus")
|
||||
+ assert str(err.exc) == "bogus"
|
||||
msg = str(err)
|
||||
- assert_in("PropagateError(a)", msg)
|
||||
- assert_in("BogusError", msg)
|
||||
- assert_in("bogus", msg)
|
||||
+ assert "PropagateError(a)" in msg
|
||||
+ assert "BogusError" in msg
|
||||
+ assert "bogus" in msg
|
||||
|
||||
|
||||
def test_propagate_exc():
|
||||
@@ -615,20 +615,20 @@
|
||||
try:
|
||||
pool["c"]
|
||||
except PropagateError as errc:
|
||||
- assert_equals(errc.key, "c")
|
||||
+ assert errc.key == "c"
|
||||
errb = errc.exc
|
||||
- assert_equals(errb.key, "b")
|
||||
+ assert errb.key == "b"
|
||||
erra = errb.exc
|
||||
- assert_equals(erra.key, "a")
|
||||
+ assert erra.key == "a"
|
||||
assert isinstance(erra.exc, BogusError), \
|
||||
"exc attribute is {0}, not BogusError".format(erra.exc)
|
||||
- assert_equals(str(erra.exc), "bogus")
|
||||
+ assert str(erra.exc) == "bogus"
|
||||
msg = str(errc)
|
||||
- assert_in("PropagateError(a)", msg)
|
||||
- assert_in("PropagateError(b)", msg)
|
||||
- assert_in("PropagateError(c)", msg)
|
||||
- assert_in("BogusError", msg)
|
||||
- assert_in("bogus", msg)
|
||||
+ assert "PropagateError(a)" in msg
|
||||
+ assert "PropagateError(b)" in msg
|
||||
+ assert "PropagateError(c)" in msg
|
||||
+ assert "BogusError" in msg
|
||||
+ assert "bogus" in msg
|
||||
|
||||
|
||||
def test_wait_each_exc():
|
||||
@@ -680,13 +680,13 @@
|
||||
pass
|
||||
|
||||
# wait_each_success() filters
|
||||
- assert_equals(dict(pool.wait_each_success()), dict(a=bogua))
|
||||
- assert_equals(dict(pool.wait_each_success("ab")), dict(a=bogua))
|
||||
- assert_equals(dict(pool.wait_each_success("a")), dict(a=bogua))
|
||||
- assert_equals(dict(pool.wait_each_success("b")), {})
|
||||
+ assert dict(pool.wait_each_success()) == dict(a=bogua)
|
||||
+ assert dict(pool.wait_each_success("ab")) == dict(a=bogua)
|
||||
+ assert dict(pool.wait_each_success("a")) == dict(a=bogua)
|
||||
+ assert dict(pool.wait_each_success("b")) == {}
|
||||
|
||||
# wait_each_exception() filters the other way
|
||||
- assert_equals(dict(pool.wait_each_exception()), dict(b=bogub))
|
||||
- assert_equals(dict(pool.wait_each_exception("ab")), dict(b=bogub))
|
||||
- assert_equals(dict(pool.wait_each_exception("a")), {})
|
||||
- assert_equals(dict(pool.wait_each_exception("b")), dict(b=bogub))
|
||||
+ assert dict(pool.wait_each_exception()) == dict(b=bogub)
|
||||
+ assert dict(pool.wait_each_exception("ab")) == dict(b=bogub)
|
||||
+ assert dict(pool.wait_each_exception("a")) == {}
|
||||
+ assert dict(pool.wait_each_exception("b")) == dict(b=bogub)
|
Reference in New Issue
Block a user