Ondřej Súkup
b7c484c7fc
- added patches https://github.com/tqdm/tqdm/pull/1052 + python-tqdm-remove-nose.patch OBS-URL: https://build.opensuse.org/request/show/843615 OBS-URL: https://build.opensuse.org/package/show/devel:languages:python/python-tqdm?expand=0&rev=84
5841 lines
214 KiB
Diff
5841 lines
214 KiB
Diff
diff --git a/pytest.ini b/pytest.ini
|
||
new file mode 100644
|
||
index 00000000..e7f77388
|
||
--- /dev/null
|
||
+++ b/pytest.ini
|
||
@@ -0,0 +1,4 @@
|
||
+[pytest]
|
||
+python_files = tests_*.py
|
||
+testpaths = tqdm
|
||
+addopts = -v
|
||
diff --git a/setup.py b/setup.py
|
||
index 770e557b..868be3fd 100755
|
||
--- a/setup.py
|
||
+++ b/setup.py
|
||
@@ -121,6 +121,6 @@ def find_packages(where='.'):
|
||
],
|
||
keywords='progressbar progressmeter progress bar meter'
|
||
' rate eta console terminal time',
|
||
- test_suite='nose.collector',
|
||
- tests_require=['nose', 'flake8', 'coverage'],
|
||
+ test_suite='pytest',
|
||
+ tests_require=['pytest', 'flake8', 'coverage'],
|
||
)
|
||
diff --git a/tox.ini b/tox.ini
|
||
index 66565b46..fd3edb87 100644
|
||
--- a/tox.ini
|
||
+++ b/tox.ini
|
||
@@ -9,11 +9,12 @@ envlist = py{26,27,33,34,35,36,37,38,py,py3}, tf-no-keras, perf, flake8, setup.p
|
||
|
||
[coverage]
|
||
deps =
|
||
- nose
|
||
+ pytest
|
||
+ pytest-cov
|
||
coverage
|
||
coveralls
|
||
commands =
|
||
- nosetests -d -v --with-coverage --cover-package=tqdm --ignore-files="tests_perf\.py" tqdm/
|
||
+ pytest --cov=tqdm -k "not tests_perf" -v tqdm/
|
||
- coveralls
|
||
- coverage xml
|
||
- curl -OL https://coverage.codacy.com/get.sh
|
||
@@ -25,10 +26,9 @@ allowlist_externals =
|
||
[extra]
|
||
deps =
|
||
{[coverage]deps}
|
||
- nose-timer
|
||
codecov
|
||
commands =
|
||
- nosetests -d -v --with-coverage --with-timer --cover-package=tqdm --ignore-files="tests_perf\.py" tqdm/
|
||
+ pytest --cov=tqdm -k "not tests_perf" -v tqdm/
|
||
- coveralls
|
||
codecov
|
||
- coverage xml
|
||
@@ -51,9 +51,9 @@ allowlist_externals = {[extra]allowlist_externals}
|
||
# no cython/numpy/pandas for py{py,py3,26,33,34}
|
||
|
||
[testenv:py26]
|
||
-# no codecov and timer for py26
|
||
deps =
|
||
- nose
|
||
+ pytest
|
||
+ pytest-cov
|
||
coverage
|
||
coveralls==1.2.0
|
||
codecov
|
||
@@ -81,14 +81,13 @@ deps =
|
||
{[extra]deps}
|
||
tensorflow
|
||
commands =
|
||
- nosetests -d -v --with-timer tqdm/tests/tests_keras.py
|
||
+ pytest tqdm/tests/tests_keras.py -v
|
||
|
||
[testenv:perf]
|
||
deps =
|
||
- nose
|
||
- nose-timer
|
||
+ pytest
|
||
commands =
|
||
- nosetests -d -v --with-timer tqdm/tests/tests_perf.py
|
||
+ pytest tqdm/tests/tests_perf.py -v
|
||
|
||
[testenv:flake8]
|
||
deps = flake8
|
||
diff --git a/tqdm/tests/py37_asyncio.py b/tqdm/tests/py37_asyncio.py
|
||
index 5ac6291b..9e6a24c1 100644
|
||
--- a/tqdm/tests/py37_asyncio.py
|
||
+++ b/tqdm/tests/py37_asyncio.py
|
||
@@ -2,7 +2,7 @@
|
||
from functools import partial, wraps
|
||
from time import time
|
||
|
||
-from tests_tqdm import with_setup, pretest, posttest, StringIO, closing
|
||
+from tests_tqdm import TestWithInstancesCheck, StringIO, closing
|
||
from tqdm.asyncio import tqdm_asyncio, tarange
|
||
|
||
tqdm = partial(tqdm_asyncio, miniters=0, mininterval=0)
|
||
@@ -10,11 +10,10 @@
|
||
as_completed = partial(tqdm_asyncio.as_completed, miniters=0, mininterval=0)
|
||
|
||
|
||
-def with_setup_sync(func):
|
||
- @with_setup(pretest, posttest)
|
||
+def setup_sync(func):
|
||
@wraps(func)
|
||
- def inner():
|
||
- return asyncio.run(func())
|
||
+ def inner(self):
|
||
+ return asyncio.run(func(self))
|
||
return inner
|
||
|
||
|
||
@@ -33,71 +32,68 @@ async def acount(*args, **kwargs):
|
||
yield i
|
||
|
||
|
||
-@with_setup_sync
|
||
-async def test_generators():
|
||
- """Test asyncio generators"""
|
||
- with closing(StringIO()) as our_file:
|
||
- async for i in tqdm(count(), desc="counter", file=our_file):
|
||
- if i >= 8:
|
||
- break
|
||
- assert '9it' in our_file.getvalue()
|
||
- our_file.seek(0)
|
||
- our_file.truncate()
|
||
-
|
||
- async for i in tqdm(acount(), desc="async_counter", file=our_file):
|
||
- if i >= 8:
|
||
- break
|
||
- assert '9it' in our_file.getvalue()
|
||
-
|
||
-
|
||
-@with_setup_sync
|
||
-async def test_range():
|
||
- """Test asyncio range"""
|
||
- with closing(StringIO()) as our_file:
|
||
- async for _ in tqdm(range(9), desc="range", file=our_file):
|
||
- pass
|
||
- assert '9/9' in our_file.getvalue()
|
||
- our_file.seek(0)
|
||
- our_file.truncate()
|
||
-
|
||
- async for _ in trange(9, desc="trange", file=our_file):
|
||
- pass
|
||
- assert '9/9' in our_file.getvalue()
|
||
-
|
||
-
|
||
-@with_setup_sync
|
||
-async def test_nested():
|
||
- """Test asyncio nested"""
|
||
- with closing(StringIO()) as our_file:
|
||
- async for _ in tqdm(trange(9, desc="inner", file=our_file),
|
||
- desc="outer", file=our_file):
|
||
- pass
|
||
- assert 'inner: 100%' in our_file.getvalue()
|
||
- assert 'outer: 100%' in our_file.getvalue()
|
||
-
|
||
-
|
||
-@with_setup_sync
|
||
-async def test_coroutines():
|
||
- """Test asyncio coroutine.send"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(count(), file=our_file) as pbar:
|
||
- async for i in pbar:
|
||
- if i == 9:
|
||
- pbar.send(-10)
|
||
- elif i < 0:
|
||
- assert i == -9
|
||
+class TestTqdmAsyncio(TestWithInstancesCheck):
|
||
+ @setup_sync
|
||
+ async def test_generators(self):
|
||
+ """Test asyncio generators"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ async for i in tqdm(count(), desc="counter", file=our_file):
|
||
+ if i >= 8:
|
||
break
|
||
- assert '10it' in our_file.getvalue()
|
||
-
|
||
-
|
||
-@with_setup_sync
|
||
-async def test_as_completed():
|
||
- """Test asyncio as_completed"""
|
||
- with closing(StringIO()) as our_file:
|
||
- t = time()
|
||
- skew = time() - t
|
||
- for i in as_completed([asyncio.sleep(0.01 * i)
|
||
- for i in range(30, 0, -1)], file=our_file):
|
||
- await i
|
||
- assert 0.29 < time() - t - 2 * skew < 0.31
|
||
- assert '30/30' in our_file.getvalue()
|
||
+ assert '9it' in our_file.getvalue()
|
||
+ our_file.seek(0)
|
||
+ our_file.truncate()
|
||
+
|
||
+ async for i in tqdm(acount(), desc="async_counter", file=our_file):
|
||
+ if i >= 8:
|
||
+ break
|
||
+ assert '9it' in our_file.getvalue()
|
||
+
|
||
+ @setup_sync
|
||
+ async def test_range(self):
|
||
+ """Test asyncio range"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ async for _ in tqdm(range(9), desc="range", file=our_file):
|
||
+ pass
|
||
+ assert '9/9' in our_file.getvalue()
|
||
+ our_file.seek(0)
|
||
+ our_file.truncate()
|
||
+
|
||
+ async for _ in trange(9, desc="trange", file=our_file):
|
||
+ pass
|
||
+ assert '9/9' in our_file.getvalue()
|
||
+
|
||
+ @setup_sync
|
||
+ async def test_nested(self):
|
||
+ """Test asyncio nested"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ async for _ in tqdm(trange(9, desc="inner", file=our_file),
|
||
+ desc="outer", file=our_file):
|
||
+ pass
|
||
+ assert 'inner: 100%' in our_file.getvalue()
|
||
+ assert 'outer: 100%' in our_file.getvalue()
|
||
+
|
||
+ @setup_sync
|
||
+ async def test_coroutines(self):
|
||
+ """Test asyncio coroutine.send"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(count(), file=our_file) as pbar:
|
||
+ async for i in pbar:
|
||
+ if i == 9:
|
||
+ pbar.send(-10)
|
||
+ elif i < 0:
|
||
+ assert i == -9
|
||
+ break
|
||
+ assert '10it' in our_file.getvalue()
|
||
+
|
||
+ @setup_sync
|
||
+ async def test_as_completed(self):
|
||
+ """Test asyncio as_completed"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = time()
|
||
+ skew = time() - t
|
||
+ for i in as_completed([asyncio.sleep(0.01 * i)
|
||
+ for i in range(30, 0, -1)], file=our_file):
|
||
+ await i
|
||
+ assert 0.29 < time() - t - 2 * skew < 0.31
|
||
+ assert '30/30' in our_file.getvalue()
|
||
diff --git a/tqdm/tests/tests_asyncio.py b/tqdm/tests/tests_asyncio.py
|
||
index 11b58ac8..d1f00234 100644
|
||
--- a/tqdm/tests/tests_asyncio.py
|
||
+++ b/tqdm/tests/tests_asyncio.py
|
||
@@ -3,5 +3,5 @@
|
||
if sys.version_info[:2] > (3, 6):
|
||
from py37_asyncio import * # NOQA
|
||
else:
|
||
- from tests_tqdm import SkipTest
|
||
+ from unittest import SkipTest
|
||
raise SkipTest
|
||
diff --git a/tqdm/tests/tests_concurrent.py b/tqdm/tests/tests_concurrent.py
|
||
index e64cb789..ecfea117 100644
|
||
--- a/tqdm/tests/tests_concurrent.py
|
||
+++ b/tqdm/tests/tests_concurrent.py
|
||
@@ -1,10 +1,10 @@
|
||
"""
|
||
Tests for `tqdm.contrib.concurrent`.
|
||
"""
|
||
+import unittest
|
||
from warnings import catch_warnings
|
||
from tqdm.contrib.concurrent import thread_map, process_map
|
||
-from tests_tqdm import with_setup, pretest, posttest, SkipTest, StringIO, \
|
||
- closing
|
||
+from tests_tqdm import TestWithInstancesCheck, StringIO, closing
|
||
|
||
|
||
def incr(x):
|
||
@@ -12,47 +12,47 @@ def incr(x):
|
||
return x + 1
|
||
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_thread_map():
|
||
- """Test contrib.concurrent.thread_map"""
|
||
- with closing(StringIO()) as our_file:
|
||
- a = range(9)
|
||
- b = [i + 1 for i in a]
|
||
+class TestTqdmConcurrent(TestWithInstancesCheck):
|
||
+
|
||
+ def test_thread_map(self):
|
||
+ """Test contrib.concurrent.thread_map"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ a = range(9)
|
||
+ b = [i + 1 for i in a]
|
||
+ try:
|
||
+ assert thread_map(lambda x: x + 1, a, file=our_file) == b
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+ assert thread_map(incr, a, file=our_file) == b
|
||
+
|
||
+ def test_process_map(self):
|
||
+ """Test contrib.concurrent.process_map"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ a = range(9)
|
||
+ b = [i + 1 for i in a]
|
||
+ try:
|
||
+ assert process_map(incr, a, file=our_file) == b
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+
|
||
+class TestTqdmConcurrentWithoutInstancesCheck(unittest.TestCase):
|
||
+ def test_chunksize_warning(self):
|
||
+ """Test contrib.concurrent.process_map chunksize warnings"""
|
||
try:
|
||
- assert thread_map(lambda x: x + 1, a, file=our_file) == b
|
||
+ from unittest.mock import patch
|
||
except ImportError:
|
||
- raise SkipTest
|
||
- assert thread_map(incr, a, file=our_file) == b
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_process_map():
|
||
- """Test contrib.concurrent.process_map"""
|
||
- with closing(StringIO()) as our_file:
|
||
- a = range(9)
|
||
- b = [i + 1 for i in a]
|
||
- try:
|
||
- assert process_map(incr, a, file=our_file) == b
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
-
|
||
-def test_chunksize_warning():
|
||
- """Test contrib.concurrent.process_map chunksize warnings"""
|
||
- try:
|
||
- from unittest.mock import patch
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- for iterables, should_warn in [
|
||
- ([], False),
|
||
- (['x'], False),
|
||
- ([()], False),
|
||
- (['x', ()], False),
|
||
- (['x' * 1001], True),
|
||
- (['x' * 100, ('x',) * 1001], True),
|
||
- ]:
|
||
- with patch('tqdm.contrib.concurrent._executor_map'):
|
||
- with catch_warnings(record=True) as w:
|
||
- process_map(incr, *iterables)
|
||
- assert should_warn == bool(w)
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ for iterables, should_warn in [
|
||
+ ([], False),
|
||
+ (['x'], False),
|
||
+ ([()], False),
|
||
+ (['x', ()], False),
|
||
+ (['x' * 1001], True),
|
||
+ (['x' * 100, ('x',) * 1001], True),
|
||
+ ]:
|
||
+ with patch('tqdm.contrib.concurrent._executor_map'):
|
||
+ with catch_warnings(record=True) as w:
|
||
+ process_map(incr, *iterables)
|
||
+ assert should_warn == bool(w)
|
||
diff --git a/tqdm/tests/tests_contrib.py b/tqdm/tests/tests_contrib.py
|
||
index 5239d779..38e1779e 100644
|
||
--- a/tqdm/tests/tests_contrib.py
|
||
+++ b/tqdm/tests/tests_contrib.py
|
||
@@ -2,66 +2,61 @@
|
||
Tests for `tqdm.contrib`.
|
||
"""
|
||
import sys
|
||
+import unittest
|
||
from tqdm.contrib import tenumerate, tzip, tmap
|
||
-from tests_tqdm import with_setup, pretest, posttest, SkipTest, StringIO, \
|
||
- closing
|
||
-
|
||
-
|
||
-def incr(x):
|
||
- """Dummy function"""
|
||
- return x + 1
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_enumerate():
|
||
- """Test contrib.tenumerate"""
|
||
- with closing(StringIO()) as our_file:
|
||
- a = range(9)
|
||
- assert list(tenumerate(a, file=our_file)) == list(enumerate(a))
|
||
- assert list(tenumerate(a, 42, file=our_file)) == list(enumerate(a, 42))
|
||
- with closing(StringIO()) as our_file:
|
||
- _ = list(tenumerate((i for i in a), file=our_file))
|
||
- assert "100%" not in our_file.getvalue()
|
||
- with closing(StringIO()) as our_file:
|
||
- _ = list(tenumerate((i for i in a), file=our_file, total=len(a)))
|
||
- assert "100%" in our_file.getvalue()
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_enumerate_numpy():
|
||
- """Test contrib.tenumerate(numpy.ndarray)"""
|
||
- try:
|
||
- import numpy as np
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
- with closing(StringIO()) as our_file:
|
||
- a = np.random.random((42, 1337))
|
||
- assert list(tenumerate(a, file=our_file)) == list(np.ndenumerate(a))
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_zip():
|
||
- """Test contrib.tzip"""
|
||
- with closing(StringIO()) as our_file:
|
||
- a = range(9)
|
||
- b = [i + 1 for i in a]
|
||
- if sys.version_info[:1] < (3,):
|
||
- assert tzip(a, b, file=our_file) == zip(a, b)
|
||
- else:
|
||
- gen = tzip(a, b, file=our_file)
|
||
- assert gen != list(zip(a, b))
|
||
- assert list(gen) == list(zip(a, b))
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_map():
|
||
- """Test contrib.tmap"""
|
||
- with closing(StringIO()) as our_file:
|
||
- a = range(9)
|
||
- b = [i + 1 for i in a]
|
||
- if sys.version_info[:1] < (3,):
|
||
- assert tmap(lambda x: x + 1, a, file=our_file) == map(incr, a)
|
||
- else:
|
||
- gen = tmap(lambda x: x + 1, a, file=our_file)
|
||
- assert gen != b
|
||
- assert list(gen) == b
|
||
+from tests_tqdm import TestWithInstancesCheck, StringIO, closing
|
||
+
|
||
+
|
||
+class TestTqdmContrib(TestWithInstancesCheck):
|
||
+ def incr(self, x):
|
||
+ """Dummy function"""
|
||
+ return x + 1
|
||
+
|
||
+ def test_enumerate(self):
|
||
+ """Test contrib.tenumerate"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ a = range(9)
|
||
+ assert list(tenumerate(a, file=our_file)) == list(enumerate(a))
|
||
+ assert list(tenumerate(a, 42, file=our_file)) == \
|
||
+ list(enumerate(a, 42))
|
||
+ with closing(StringIO()) as our_file:
|
||
+ _ = list(tenumerate((i for i in a), file=our_file))
|
||
+ assert "100%" not in our_file.getvalue()
|
||
+ with closing(StringIO()) as our_file:
|
||
+ _ = list(tenumerate((i for i in a), file=our_file, total=len(a)))
|
||
+ assert "100%" in our_file.getvalue()
|
||
+
|
||
+ def test_enumerate_numpy(self):
|
||
+ """Test contrib.tenumerate(numpy.ndarray)"""
|
||
+ try:
|
||
+ import numpy as np
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+ with closing(StringIO()) as our_file:
|
||
+ a = np.random.random((42, 1337))
|
||
+ assert list(tenumerate(a, file=our_file)) == list(np.ndenumerate(a))
|
||
+
|
||
+ def test_zip(self):
|
||
+ """Test contrib.tzip"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ a = range(9)
|
||
+ b = [i + 1 for i in a]
|
||
+ if sys.version_info[:1] < (3,):
|
||
+ assert tzip(a, b, file=our_file) == zip(a, b)
|
||
+ else:
|
||
+ gen = tzip(a, b, file=our_file)
|
||
+ assert gen != list(zip(a, b))
|
||
+ assert list(gen) == list(zip(a, b))
|
||
+
|
||
+ def test_map(self):
|
||
+ """Test contrib.tmap"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ a = range(9)
|
||
+ b = [i + 1 for i in a]
|
||
+ if sys.version_info[:1] < (3,):
|
||
+ assert tmap(lambda x: x + 1, a, file=our_file) == \
|
||
+ map(self.incr, a)
|
||
+ else:
|
||
+ gen = tmap(lambda x: x + 1, a, file=our_file)
|
||
+ assert gen != b
|
||
+ assert list(gen) == b
|
||
diff --git a/tqdm/tests/tests_itertools.py b/tqdm/tests/tests_itertools.py
|
||
index c55e07db..6b78e204 100644
|
||
--- a/tqdm/tests/tests_itertools.py
|
||
+++ b/tqdm/tests/tests_itertools.py
|
||
@@ -2,7 +2,7 @@
|
||
Tests for `tqdm.contrib.itertools`.
|
||
"""
|
||
from tqdm.contrib.itertools import product
|
||
-from tests_tqdm import with_setup, pretest, posttest, StringIO, closing
|
||
+from tests_tqdm import TestWithInstancesCheck, StringIO, closing
|
||
import itertools
|
||
|
||
|
||
@@ -15,13 +15,13 @@ def __iter__(self):
|
||
yield i
|
||
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_product():
|
||
- """Test contrib.itertools.product"""
|
||
- with closing(StringIO()) as our_file:
|
||
- a = range(9)
|
||
- assert list(product(a, a[::-1], file=our_file)) == \
|
||
- list(itertools.product(a, a[::-1]))
|
||
+class TestTqdmItertools(TestWithInstancesCheck):
|
||
+ def test_product(self):
|
||
+ """Test contrib.itertools.product"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ a = range(9)
|
||
+ assert list(product(a, a[::-1], file=our_file)) == \
|
||
+ list(itertools.product(a, a[::-1]))
|
||
|
||
- assert list(product(a, NoLenIter(a), file=our_file)) == \
|
||
- list(itertools.product(a, NoLenIter(a)))
|
||
+ assert list(product(a, NoLenIter(a), file=our_file)) == \
|
||
+ list(itertools.product(a, NoLenIter(a)))
|
||
diff --git a/tqdm/tests/tests_keras.py b/tqdm/tests/tests_keras.py
|
||
index 11684c49..39ee0fc3 100644
|
||
--- a/tqdm/tests/tests_keras.py
|
||
+++ b/tqdm/tests/tests_keras.py
|
||
@@ -1,97 +1,97 @@
|
||
from __future__ import division
|
||
+import unittest
|
||
from tqdm import tqdm
|
||
-from tests_tqdm import with_setup, pretest, posttest, SkipTest, StringIO, \
|
||
- closing
|
||
+from tests_tqdm import TestWithInstancesCheck, StringIO, closing
|
||
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_keras():
|
||
- """Test tqdm.keras.TqdmCallback"""
|
||
- try:
|
||
- from tqdm.keras import TqdmCallback
|
||
- import numpy as np
|
||
+class TestTqdmKeras(TestWithInstancesCheck):
|
||
+ def test_keras(self):
|
||
+ """Test tqdm.keras.TqdmCallback"""
|
||
try:
|
||
- import keras as K
|
||
+ from tqdm.keras import TqdmCallback
|
||
+ import numpy as np
|
||
+ try:
|
||
+ import keras as K
|
||
+ except ImportError:
|
||
+ from tensorflow import keras as K
|
||
except ImportError:
|
||
- from tensorflow import keras as K
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
+ raise unittest.SkipTest
|
||
|
||
- # 1D autoencoder
|
||
- dtype = np.float32
|
||
- model = K.models.Sequential(
|
||
- [K.layers.InputLayer((1, 1), dtype=dtype), K.layers.Conv1D(1, 1)]
|
||
- )
|
||
- model.compile("adam", "mse")
|
||
- x = np.random.rand(100, 1, 1).astype(dtype)
|
||
- batch_size = 10
|
||
- batches = len(x) / batch_size
|
||
- epochs = 5
|
||
+ # 1D autoencoder
|
||
+ dtype = np.float32
|
||
+ model = K.models.Sequential(
|
||
+ [K.layers.InputLayer((1, 1), dtype=dtype), K.layers.Conv1D(1, 1)]
|
||
+ )
|
||
+ model.compile("adam", "mse")
|
||
+ x = np.random.rand(100, 1, 1).astype(dtype)
|
||
+ batch_size = 10
|
||
+ batches = len(x) / batch_size
|
||
+ epochs = 5
|
||
|
||
- with closing(StringIO()) as our_file:
|
||
+ with closing(StringIO()) as our_file:
|
||
|
||
- class Tqdm(tqdm):
|
||
- """redirected I/O class"""
|
||
+ class Tqdm(tqdm):
|
||
+ """redirected I/O class"""
|
||
|
||
- def __init__(self, *a, **k):
|
||
- k.setdefault("file", our_file)
|
||
- super(Tqdm, self).__init__(*a, **k)
|
||
+ def __init__(self, *a, **k):
|
||
+ k.setdefault("file", our_file)
|
||
+ super(Tqdm, self).__init__(*a, **k)
|
||
|
||
- # just epoch (no batch) progress
|
||
- model.fit(
|
||
- x,
|
||
- x,
|
||
- epochs=epochs,
|
||
- batch_size=batch_size,
|
||
- verbose=False,
|
||
- callbacks=[
|
||
- TqdmCallback(
|
||
- epochs,
|
||
- data_size=len(x),
|
||
- batch_size=batch_size,
|
||
- verbose=0,
|
||
- tqdm_class=Tqdm,
|
||
- )
|
||
- ],
|
||
- )
|
||
- res = our_file.getvalue()
|
||
- assert "{epochs}/{epochs}".format(epochs=epochs) in res
|
||
- assert "{batches}/{batches}".format(batches=batches) not in res
|
||
+ # just epoch (no batch) progress
|
||
+ model.fit(
|
||
+ x,
|
||
+ x,
|
||
+ epochs=epochs,
|
||
+ batch_size=batch_size,
|
||
+ verbose=False,
|
||
+ callbacks=[
|
||
+ TqdmCallback(
|
||
+ epochs,
|
||
+ data_size=len(x),
|
||
+ batch_size=batch_size,
|
||
+ verbose=0,
|
||
+ tqdm_class=Tqdm,
|
||
+ )
|
||
+ ],
|
||
+ )
|
||
+ res = our_file.getvalue()
|
||
+ assert "{epochs}/{epochs}".format(epochs=epochs) in res
|
||
+ assert "{batches}/{batches}".format(batches=batches) not in res
|
||
|
||
- # full (epoch and batch) progress
|
||
- our_file.seek(0)
|
||
- our_file.truncate()
|
||
- model.fit(
|
||
- x,
|
||
- x,
|
||
- epochs=epochs,
|
||
- batch_size=batch_size,
|
||
- verbose=False,
|
||
- callbacks=[
|
||
- TqdmCallback(
|
||
- epochs,
|
||
- data_size=len(x),
|
||
- batch_size=batch_size,
|
||
- verbose=2,
|
||
- tqdm_class=Tqdm,
|
||
- )
|
||
- ],
|
||
- )
|
||
- res = our_file.getvalue()
|
||
- assert "{epochs}/{epochs}".format(epochs=epochs) in res
|
||
- assert "{batches}/{batches}".format(batches=batches) in res
|
||
+ # full (epoch and batch) progress
|
||
+ our_file.seek(0)
|
||
+ our_file.truncate()
|
||
+ model.fit(
|
||
+ x,
|
||
+ x,
|
||
+ epochs=epochs,
|
||
+ batch_size=batch_size,
|
||
+ verbose=False,
|
||
+ callbacks=[
|
||
+ TqdmCallback(
|
||
+ epochs,
|
||
+ data_size=len(x),
|
||
+ batch_size=batch_size,
|
||
+ verbose=2,
|
||
+ tqdm_class=Tqdm,
|
||
+ )
|
||
+ ],
|
||
+ )
|
||
+ res = our_file.getvalue()
|
||
+ assert "{epochs}/{epochs}".format(epochs=epochs) in res
|
||
+ assert "{batches}/{batches}".format(batches=batches) in res
|
||
|
||
- # auto-detect epochs and batches
|
||
- our_file.seek(0)
|
||
- our_file.truncate()
|
||
- model.fit(
|
||
- x,
|
||
- x,
|
||
- epochs=epochs,
|
||
- batch_size=batch_size,
|
||
- verbose=False,
|
||
- callbacks=[TqdmCallback(verbose=2, tqdm_class=Tqdm)],
|
||
- )
|
||
- res = our_file.getvalue()
|
||
- assert "{epochs}/{epochs}".format(epochs=epochs) in res
|
||
- assert "{batches}/{batches}".format(batches=batches) in res
|
||
+ # auto-detect epochs and batches
|
||
+ our_file.seek(0)
|
||
+ our_file.truncate()
|
||
+ model.fit(
|
||
+ x,
|
||
+ x,
|
||
+ epochs=epochs,
|
||
+ batch_size=batch_size,
|
||
+ verbose=False,
|
||
+ callbacks=[TqdmCallback(verbose=2, tqdm_class=Tqdm)],
|
||
+ )
|
||
+ res = our_file.getvalue()
|
||
+ assert "{epochs}/{epochs}".format(epochs=epochs) in res
|
||
+ assert "{batches}/{batches}".format(batches=batches) in res
|
||
diff --git a/tqdm/tests/tests_main.py b/tqdm/tests/tests_main.py
|
||
index 08eccc05..1cce8d1f 100644
|
||
--- a/tqdm/tests/tests_main.py
|
||
+++ b/tqdm/tests/tests_main.py
|
||
@@ -1,5 +1,6 @@
|
||
import sys
|
||
import subprocess
|
||
+import unittest
|
||
from os import path
|
||
from shutil import rmtree
|
||
from tempfile import mkdtemp
|
||
@@ -7,8 +8,8 @@
|
||
from tqdm.utils import IS_WIN
|
||
from io import open as io_open
|
||
|
||
-from tests_tqdm import with_setup, pretest, posttest, _range, closing, \
|
||
- UnicodeIO, StringIO, SkipTest
|
||
+from tests_tqdm import TestWithInstancesCheck, _range, closing,\
|
||
+ UnicodeIO, StringIO
|
||
|
||
|
||
def _sh(*cmd, **kwargs):
|
||
@@ -27,256 +28,256 @@ def __getattr__(self, _):
|
||
NULL = Null()
|
||
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pipes():
|
||
- """Test command line pipes"""
|
||
- ls_out = _sh('ls').replace('\r\n', '\n')
|
||
- ls = subprocess.Popen('ls', stdout=subprocess.PIPE,
|
||
- stderr=subprocess.STDOUT)
|
||
- res = _sh(sys.executable, '-c', 'from tqdm.cli import main; main()',
|
||
- stdin=ls.stdout, stderr=subprocess.STDOUT)
|
||
- ls.wait()
|
||
-
|
||
- # actual test:
|
||
-
|
||
- assert ls_out in res.replace('\r\n', '\n')
|
||
-
|
||
-
|
||
-# WARNING: this should be the last test as it messes with sys.stdin, argv
|
||
-@with_setup(pretest, posttest)
|
||
-def test_main():
|
||
- """Test misc CLI options"""
|
||
- _SYS = sys.stdin, sys.argv
|
||
- N = 123
|
||
-
|
||
- # test direct import
|
||
- sys.stdin = map(str, _range(N))
|
||
- sys.argv = ['', '--desc', 'Test CLI import',
|
||
- '--ascii', 'True', '--unit_scale', 'True']
|
||
- import tqdm.__main__ # NOQA
|
||
- sys.stderr.write("Test misc CLI options ... ")
|
||
-
|
||
- # test --delim
|
||
- IN_DATA = '\0'.join(map(str, _range(N)))
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.argv = ['', '--desc', 'Test CLI delim',
|
||
- '--ascii', 'True', '--delim', r'\0', '--buf_size', '64']
|
||
- sys.stdin.write(IN_DATA)
|
||
- # sys.stdin.write(b'\xff') # TODO
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(fp=fp)
|
||
- assert str(N) + "it" in fp.getvalue()
|
||
-
|
||
- # test --bytes
|
||
- IN_DATA = IN_DATA.replace('\0', '\n')
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA)
|
||
- sys.stdin.seek(0)
|
||
- sys.argv = ['', '--ascii', '--bytes=True', '--unit_scale', 'False']
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(fp=fp)
|
||
- assert str(len(IN_DATA)) in fp.getvalue()
|
||
-
|
||
- # test --log
|
||
- sys.stdin = map(str, _range(N))
|
||
- # with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--log', 'DEBUG'], fp=NULL)
|
||
- # assert "DEBUG:" in sys.stdout.getvalue()
|
||
-
|
||
- # test --tee
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA)
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--mininterval', '0', '--miniters', '1'], fp=fp)
|
||
- res = len(fp.getvalue())
|
||
- # assert len(fp.getvalue()) < len(sys.stdout.getvalue())
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--tee', '--mininterval', '0', '--miniters', '1'], fp=fp)
|
||
- # spaces to clear intermediate lines could increase length
|
||
- assert len(fp.getvalue()) >= res + len(IN_DATA)
|
||
-
|
||
- # test --null
|
||
- _STDOUT = sys.stdout
|
||
- try:
|
||
- with closing(StringIO()) as sys.stdout:
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA)
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--null'], fp=fp)
|
||
- assert not sys.stdout.getvalue()
|
||
-
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA)
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=[], fp=fp)
|
||
- assert sys.stdout.getvalue()
|
||
- except:
|
||
- sys.stdout = _STDOUT
|
||
- raise
|
||
- else:
|
||
- sys.stdout = _STDOUT
|
||
-
|
||
- # test integer --update
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA)
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--update'], fp=fp)
|
||
- res = fp.getvalue()
|
||
- assert str(N // 2 * N) + "it" in res # arithmetic sum formula
|
||
-
|
||
- # test integer --update --delim
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA.replace('\n', 'D'))
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--update', '--delim', 'D'], fp=fp)
|
||
- res = fp.getvalue()
|
||
- assert str(N // 2 * N) + "it" in res # arithmetic sum formula
|
||
-
|
||
- # test integer --update_to
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA)
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--update-to'], fp=fp)
|
||
- res = fp.getvalue()
|
||
- assert str(N - 1) + "it" in res
|
||
- assert str(N) + "it" not in res
|
||
-
|
||
- # test integer --update_to --delim
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA.replace('\n', 'D'))
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--update-to', '--delim', 'D'], fp=fp)
|
||
- res = fp.getvalue()
|
||
- assert str(N - 1) + "it" in res
|
||
- assert str(N) + "it" not in res
|
||
-
|
||
- # test float --update_to
|
||
- IN_DATA = '\n'.join((str(i / 2.0) for i in _range(N)))
|
||
- with closing(StringIO()) as sys.stdin:
|
||
- sys.stdin.write(IN_DATA)
|
||
-
|
||
- sys.stdin.seek(0)
|
||
- with closing(UnicodeIO()) as fp:
|
||
- main(argv=['--update-to'], fp=fp)
|
||
- res = fp.getvalue()
|
||
- assert str((N - 1) / 2.0) + "it" in res
|
||
- assert str(N / 2.0) + "it" not in res
|
||
-
|
||
- # clean up
|
||
- sys.stdin, sys.argv = _SYS
|
||
-
|
||
-
|
||
-def test_manpath():
|
||
- """Test CLI --manpath"""
|
||
- if IS_WIN:
|
||
- raise SkipTest
|
||
- tmp = mkdtemp()
|
||
- man = path.join(tmp, "tqdm.1")
|
||
- assert not path.exists(man)
|
||
- try:
|
||
- main(argv=['--manpath', tmp], fp=NULL)
|
||
- except SystemExit:
|
||
- pass
|
||
- else:
|
||
- raise SystemExit("Expected system exit")
|
||
- assert path.exists(man)
|
||
- rmtree(tmp, True)
|
||
-
|
||
-
|
||
-def test_comppath():
|
||
- """Test CLI --comppath"""
|
||
- if IS_WIN:
|
||
- raise SkipTest
|
||
- tmp = mkdtemp()
|
||
- man = path.join(tmp, "tqdm_completion.sh")
|
||
- assert not path.exists(man)
|
||
- try:
|
||
- main(argv=['--comppath', tmp], fp=NULL)
|
||
- except SystemExit:
|
||
- pass
|
||
- else:
|
||
- raise SystemExit("Expected system exit")
|
||
- assert path.exists(man)
|
||
-
|
||
- # check most important options appear
|
||
- with io_open(man, mode='r', encoding='utf-8') as fd:
|
||
- script = fd.read()
|
||
- opts = set([
|
||
- '--help', '--desc', '--total', '--leave', '--ncols', '--ascii',
|
||
- '--dynamic_ncols', '--position', '--bytes', '--nrows', '--delim',
|
||
- '--manpath', '--comppath'
|
||
- ])
|
||
- assert all(args in script for args in opts)
|
||
- rmtree(tmp, True)
|
||
-
|
||
-
|
||
-def test_exceptions():
|
||
- """Test CLI Exceptions"""
|
||
- _SYS = sys.stdin, sys.argv
|
||
- sys.stdin = map(str, _range(123))
|
||
-
|
||
- sys.argv = ['', '-ascii', '-unit_scale', '--bad_arg_u_ment', 'foo']
|
||
- try:
|
||
- main(fp=NULL)
|
||
- except TqdmKeyError as e:
|
||
- if 'bad_arg_u_ment' not in str(e):
|
||
- raise
|
||
- else:
|
||
- raise TqdmKeyError('bad_arg_u_ment')
|
||
-
|
||
- sys.argv = ['', '-ascii', '-unit_scale', 'invalid_bool_value']
|
||
- try:
|
||
- main(fp=NULL)
|
||
- except TqdmTypeError as e:
|
||
- if 'invalid_bool_value' not in str(e):
|
||
- raise
|
||
- else:
|
||
- raise TqdmTypeError('invalid_bool_value')
|
||
-
|
||
- sys.argv = ['', '-ascii', '--total', 'invalid_int_value']
|
||
- try:
|
||
- main(fp=NULL)
|
||
- except TqdmTypeError as e:
|
||
- if 'invalid_int_value' not in str(e):
|
||
- raise
|
||
- else:
|
||
- raise TqdmTypeError('invalid_int_value')
|
||
-
|
||
- sys.argv = ['', '--update', '--update_to']
|
||
- try:
|
||
- main(fp=NULL)
|
||
- except TqdmKeyError as e:
|
||
- if 'Can only have one of --' not in str(e):
|
||
+class TestTqdmMain(TestWithInstancesCheck):
|
||
+
|
||
+ def test_pipes(self):
|
||
+ """Test command line pipes"""
|
||
+ ls_out = _sh('ls').replace('\r\n', '\n')
|
||
+ ls = subprocess.Popen('ls', stdout=subprocess.PIPE,
|
||
+ stderr=subprocess.STDOUT)
|
||
+ res = _sh(sys.executable, '-c', 'from tqdm.cli import main; main()',
|
||
+ stdin=ls.stdout, stderr=subprocess.STDOUT)
|
||
+ ls.wait()
|
||
+
|
||
+ # actual test:
|
||
+
|
||
+ assert ls_out in res.replace('\r\n', '\n')
|
||
+
|
||
+ # WARNING: this should be the last test as it messes with sys.stdin, argv
|
||
+ def test_main(self):
|
||
+ # hack in posix_pipe() does not work under pytest
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ """Test misc CLI options"""
|
||
+ _SYS = sys.stdin, sys.argv
|
||
+ N = 123
|
||
+
|
||
+ # test direct import
|
||
+ sys.stdin = [n.encode('utf-8') for n in map(str, _range(N))]
|
||
+ sys.argv = ['', '--desc', 'Test CLI import',
|
||
+ '--ascii', 'True', '--unit_scale', 'True']
|
||
+ import tqdm.__main__ # NOQA
|
||
+ sys.stderr.write("Test misc CLI options ... ")
|
||
+
|
||
+ # test --delim
|
||
+ IN_DATA = '\0'.join(map(str, _range(N)))
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.argv = ['', '--desc', 'Test CLI delim',
|
||
+ '--ascii', 'True', '--delim', r'\0', '--buf_size', '64']
|
||
+ sys.stdin.write(IN_DATA)
|
||
+ # sys.stdin.write(b'\xff') # TODO
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(fp=fp)
|
||
+ assert str(N) + "it" in fp.getvalue()
|
||
+
|
||
+ # test --bytes
|
||
+ IN_DATA = IN_DATA.replace('\0', '\n')
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA)
|
||
+ sys.stdin.seek(0)
|
||
+ sys.argv = ['', '--ascii', '--bytes=True', '--unit_scale', 'False']
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(fp=fp)
|
||
+ assert str(len(IN_DATA)) in fp.getvalue()
|
||
+
|
||
+ # test --log
|
||
+ sys.stdin = map(str, _range(N))
|
||
+ # with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--log', 'DEBUG'], fp=NULL)
|
||
+ # assert "DEBUG:" in sys.stdout.getvalue()
|
||
+
|
||
+ # test --tee
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA)
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--mininterval', '0', '--miniters', '1'], fp=fp)
|
||
+ res = len(fp.getvalue())
|
||
+ # assert len(fp.getvalue()) < len(sys.stdout.getvalue())
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--tee', '--mininterval', '0', '--miniters', '1'],
|
||
+ fp=fp)
|
||
+ # spaces to clear intermediate lines could increase length
|
||
+ assert len(fp.getvalue()) >= res + len(IN_DATA)
|
||
+
|
||
+ # test --null
|
||
+ _STDOUT = sys.stdout
|
||
+ try:
|
||
+ with closing(StringIO()) as sys.stdout:
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA)
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--null'], fp=fp)
|
||
+ assert not sys.stdout.getvalue()
|
||
+
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA)
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=[], fp=fp)
|
||
+ assert sys.stdout.getvalue()
|
||
+ except:
|
||
+ sys.stdout = _STDOUT
|
||
raise
|
||
- else:
|
||
- raise TqdmKeyError('Cannot have both --update --update_to')
|
||
-
|
||
- # test SystemExits
|
||
- for i in ('-h', '--help', '-v', '--version'):
|
||
- sys.argv = ['', i]
|
||
+ else:
|
||
+ sys.stdout = _STDOUT
|
||
+
|
||
+ # test integer --update
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA)
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--update'], fp=fp)
|
||
+ res = fp.getvalue()
|
||
+ assert str(N // 2 * N) + "it" in res # arithmetic sum formula
|
||
+
|
||
+ # test integer --update --delim
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA.replace('\n', 'D'))
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--update', '--delim', 'D'], fp=fp)
|
||
+ res = fp.getvalue()
|
||
+ assert str(N // 2 * N) + "it" in res # arithmetic sum formula
|
||
+
|
||
+ # test integer --update_to
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA)
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--update-to'], fp=fp)
|
||
+ res = fp.getvalue()
|
||
+ assert str(N - 1) + "it" in res
|
||
+ assert str(N) + "it" not in res
|
||
+
|
||
+ # test integer --update_to --delim
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA.replace('\n', 'D'))
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--update-to', '--delim', 'D'], fp=fp)
|
||
+ res = fp.getvalue()
|
||
+ assert str(N - 1) + "it" in res
|
||
+ assert str(N) + "it" not in res
|
||
+
|
||
+ # test float --update_to
|
||
+ IN_DATA = '\n'.join((str(i / 2.0) for i in _range(N)))
|
||
+ with closing(StringIO()) as sys.stdin:
|
||
+ sys.stdin.write(IN_DATA)
|
||
+
|
||
+ sys.stdin.seek(0)
|
||
+ with closing(UnicodeIO()) as fp:
|
||
+ main(argv=['--update-to'], fp=fp)
|
||
+ res = fp.getvalue()
|
||
+ assert str((N - 1) / 2.0) + "it" in res
|
||
+ assert str(N / 2.0) + "it" not in res
|
||
+
|
||
+ # clean up
|
||
+ sys.stdin, sys.argv = _SYS
|
||
+
|
||
+ def test_manpath(self):
|
||
+ """Test CLI --manpath"""
|
||
+ if IS_WIN:
|
||
+ raise unittest.SkipTest
|
||
+ tmp = mkdtemp()
|
||
+ man = path.join(tmp, "tqdm.1")
|
||
+ assert not path.exists(man)
|
||
try:
|
||
- main(fp=NULL)
|
||
+ main(argv=['--manpath', tmp], fp=NULL)
|
||
except SystemExit:
|
||
pass
|
||
else:
|
||
- raise ValueError('expected SystemExit')
|
||
+ raise SystemExit("Expected system exit")
|
||
+ assert path.exists(man)
|
||
+ rmtree(tmp, True)
|
||
+
|
||
+ def test_comppath(self):
|
||
+ """Test CLI --comppath"""
|
||
+ if IS_WIN:
|
||
+ raise unittest.SkipTest
|
||
+ tmp = mkdtemp()
|
||
+ man = path.join(tmp, "tqdm_completion.sh")
|
||
+ assert not path.exists(man)
|
||
+ try:
|
||
+ main(argv=['--comppath', tmp], fp=NULL)
|
||
+ except SystemExit:
|
||
+ pass
|
||
+ else:
|
||
+ raise SystemExit("Expected system exit")
|
||
+ assert path.exists(man)
|
||
+
|
||
+ # check most important options appear
|
||
+ with io_open(man, mode='r', encoding='utf-8') as fd:
|
||
+ script = fd.read()
|
||
+ opts = set([
|
||
+ '--help', '--desc', '--total', '--leave', '--ncols', '--ascii',
|
||
+ '--dynamic_ncols', '--position', '--bytes', '--nrows', '--delim',
|
||
+ '--manpath', '--comppath'
|
||
+ ])
|
||
+ assert all(args in script for args in opts)
|
||
+ rmtree(tmp, True)
|
||
+
|
||
+ def test_exceptions(self):
|
||
+ """Test CLI Exceptions"""
|
||
+ _SYS = sys.stdin, sys.argv
|
||
+ sys.stdin = map(str, _range(123))
|
||
+
|
||
+ sys.argv = ['', '-ascii', '-unit_scale', '--bad_arg_u_ment', 'foo']
|
||
+ try:
|
||
+ main(fp=NULL)
|
||
+ except TqdmKeyError as e:
|
||
+ if 'bad_arg_u_ment' not in str(e):
|
||
+ raise
|
||
+ else:
|
||
+ raise TqdmKeyError('bad_arg_u_ment')
|
||
+
|
||
+ sys.argv = ['', '-ascii', '-unit_scale', 'invalid_bool_value']
|
||
+ try:
|
||
+ main(fp=NULL)
|
||
+ except TqdmTypeError as e:
|
||
+ if 'invalid_bool_value' not in str(e):
|
||
+ raise
|
||
+ else:
|
||
+ raise TqdmTypeError('invalid_bool_value')
|
||
+
|
||
+ sys.argv = ['', '-ascii', '--total', 'invalid_int_value']
|
||
+ try:
|
||
+ main(fp=NULL)
|
||
+ except TqdmTypeError as e:
|
||
+ if 'invalid_int_value' not in str(e):
|
||
+ raise
|
||
+ else:
|
||
+ raise TqdmTypeError('invalid_int_value')
|
||
|
||
- # clean up
|
||
- sys.stdin, sys.argv = _SYS
|
||
+ sys.argv = ['', '--update', '--update_to']
|
||
+ try:
|
||
+ main(fp=NULL)
|
||
+ except TqdmKeyError as e:
|
||
+ if 'Can only have one of --' not in str(e):
|
||
+ raise
|
||
+ else:
|
||
+ raise TqdmKeyError('Cannot have both --update --update_to')
|
||
+
|
||
+ # test SystemExits
|
||
+ for i in ('-h', '--help', '-v', '--version'):
|
||
+ sys.argv = ['', i]
|
||
+ try:
|
||
+ main(fp=NULL)
|
||
+ except SystemExit:
|
||
+ pass
|
||
+ else:
|
||
+ raise ValueError('expected SystemExit')
|
||
+
|
||
+ # clean up
|
||
+ sys.stdin, sys.argv = _SYS
|
||
diff --git a/tqdm/tests/tests_notebook.py b/tqdm/tests/tests_notebook.py
|
||
index 3af992f0..cb422391 100644
|
||
--- a/tqdm/tests/tests_notebook.py
|
||
+++ b/tqdm/tests/tests_notebook.py
|
||
@@ -1,9 +1,9 @@
|
||
from tqdm.notebook import tqdm as tqdm_notebook
|
||
-from tests_tqdm import with_setup, pretest, posttest
|
||
+from tests_tqdm import TestWithInstancesCheck
|
||
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_notebook_disabled_description():
|
||
- """Test that set_description works for disabled tqdm_notebook"""
|
||
- with tqdm_notebook(1, disable=True) as t:
|
||
- t.set_description("description")
|
||
+class TestTqdmNotebook(TestWithInstancesCheck):
|
||
+ def test_notebook_disabled_description(self):
|
||
+ """Test that set_description works for disabled tqdm_notebook"""
|
||
+ with tqdm_notebook(1, disable=True) as t:
|
||
+ t.set_description("description")
|
||
diff --git a/tqdm/tests/tests_pandas.py b/tqdm/tests/tests_pandas.py
|
||
index 8719a7ca..5d9f5944 100644
|
||
--- a/tqdm/tests/tests_pandas.py
|
||
+++ b/tqdm/tests/tests_pandas.py
|
||
@@ -1,264 +1,250 @@
|
||
+import unittest
|
||
from tqdm import tqdm
|
||
-from tests_tqdm import with_setup, pretest, posttest, SkipTest, \
|
||
- StringIO, closing
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pandas_setup():
|
||
- """Test tqdm.pandas()"""
|
||
- try:
|
||
- from numpy.random import randint
|
||
- import pandas as pd
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm.pandas(file=our_file, leave=True, ascii=True, total=123)
|
||
- series = pd.Series(randint(0, 50, (100,)))
|
||
- series.progress_apply(lambda x: x + 10)
|
||
- res = our_file.getvalue()
|
||
- assert '100/123' in res
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pandas_rolling_expanding():
|
||
- """Test pandas.(Series|DataFrame).(rolling|expanding)"""
|
||
- try:
|
||
- from numpy.random import randint
|
||
- import pandas as pd
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
-
|
||
- series = pd.Series(randint(0, 50, (123,)))
|
||
- res1 = series.rolling(10).progress_apply(lambda x: 1, raw=True)
|
||
- res2 = series.rolling(10).apply(lambda x: 1, raw=True)
|
||
- assert res1.equals(res2)
|
||
-
|
||
- res3 = series.expanding(10).progress_apply(lambda x: 2, raw=True)
|
||
- res4 = series.expanding(10).apply(lambda x: 2, raw=True)
|
||
- assert res3.equals(res4)
|
||
-
|
||
- expects = ['114it'] # 123-10+1
|
||
- for exres in expects:
|
||
- our_file.seek(0)
|
||
- if our_file.getvalue().count(exres) < 2:
|
||
- our_file.seek(0)
|
||
- raise AssertionError(
|
||
- "\nExpected:\n{0}\nIn:\n{1}\n".format(
|
||
- exres + " at least twice.", our_file.read()))
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pandas_series():
|
||
- """Test pandas.Series.progress_apply and .progress_map"""
|
||
- try:
|
||
- from numpy.random import randint
|
||
- import pandas as pd
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
-
|
||
- series = pd.Series(randint(0, 50, (123,)))
|
||
- res1 = series.progress_apply(lambda x: x + 10)
|
||
- res2 = series.apply(lambda x: x + 10)
|
||
- assert res1.equals(res2)
|
||
-
|
||
- res3 = series.progress_map(lambda x: x + 10)
|
||
- res4 = series.map(lambda x: x + 10)
|
||
- assert res3.equals(res4)
|
||
+from tests_tqdm import TestWithInstancesCheck, StringIO, closing
|
||
+
|
||
+
|
||
+class TestTqdmPandas(TestWithInstancesCheck):
|
||
+ def test_pandas_setup(self):
|
||
+ """Test tqdm.pandas()"""
|
||
+ try:
|
||
+ from numpy.random import randint
|
||
+ import pandas as pd
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm.pandas(file=our_file, leave=True, ascii=True, total=123)
|
||
+ series = pd.Series(randint(0, 50, (100,)))
|
||
+ series.progress_apply(lambda x: x + 10)
|
||
+ res = our_file.getvalue()
|
||
+ assert '100/123' in res
|
||
+
|
||
+ def test_pandas_rolling_expanding(self):
|
||
+ """Test pandas.(Series|DataFrame).(rolling|expanding)"""
|
||
+ try:
|
||
+ from numpy.random import randint
|
||
+ import pandas as pd
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
+
|
||
+ series = pd.Series(randint(0, 50, (123,)))
|
||
+ res1 = series.rolling(10).progress_apply(lambda x: 1, raw=True)
|
||
+ res2 = series.rolling(10).apply(lambda x: 1, raw=True)
|
||
+ assert res1.equals(res2)
|
||
+
|
||
+ res3 = series.expanding(10).progress_apply(lambda x: 2, raw=True)
|
||
+ res4 = series.expanding(10).apply(lambda x: 2, raw=True)
|
||
+ assert res3.equals(res4)
|
||
|
||
- expects = ['100%', '123/123']
|
||
- for exres in expects:
|
||
- our_file.seek(0)
|
||
- if our_file.getvalue().count(exres) < 2:
|
||
+ expects = ['114it'] # 123-10+1
|
||
+ for exres in expects:
|
||
our_file.seek(0)
|
||
- raise AssertionError(
|
||
- "\nExpected:\n{0}\nIn:\n{1}\n".format(
|
||
- exres + " at least twice.", our_file.read()))
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pandas_data_frame():
|
||
- """Test pandas.DataFrame.progress_apply and .progress_applymap"""
|
||
- try:
|
||
- from numpy.random import randint
|
||
- import pandas as pd
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
- df = pd.DataFrame(randint(0, 50, (100, 200)))
|
||
-
|
||
- def task_func(x):
|
||
- return x + 1
|
||
-
|
||
- # applymap
|
||
- res1 = df.progress_applymap(task_func)
|
||
- res2 = df.applymap(task_func)
|
||
- assert res1.equals(res2)
|
||
-
|
||
- # apply unhashable
|
||
- res1 = []
|
||
- df.progress_apply(res1.extend)
|
||
- assert len(res1) == df.size
|
||
-
|
||
- # apply
|
||
- for axis in [0, 1, 'index', 'columns']:
|
||
- res3 = df.progress_apply(task_func, axis=axis)
|
||
- res4 = df.apply(task_func, axis=axis)
|
||
+ if our_file.getvalue().count(exres) < 2:
|
||
+ our_file.seek(0)
|
||
+ raise AssertionError(
|
||
+ "\nExpected:\n{0}\nIn:\n{1}\n".format(
|
||
+ exres + " at least twice.", our_file.read()))
|
||
+
|
||
+ def test_pandas_series(self):
|
||
+ """Test pandas.Series.progress_apply and .progress_map"""
|
||
+ try:
|
||
+ from numpy.random import randint
|
||
+ import pandas as pd
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
+
|
||
+ series = pd.Series(randint(0, 50, (123,)))
|
||
+ res1 = series.progress_apply(lambda x: x + 10)
|
||
+ res2 = series.apply(lambda x: x + 10)
|
||
+ assert res1.equals(res2)
|
||
+
|
||
+ res3 = series.progress_map(lambda x: x + 10)
|
||
+ res4 = series.map(lambda x: x + 10)
|
||
assert res3.equals(res4)
|
||
|
||
- our_file.seek(0)
|
||
- if our_file.read().count('100%') < 3:
|
||
- our_file.seek(0)
|
||
- raise AssertionError("\nExpected:\n{0}\nIn:\n{1}\n".format(
|
||
- '100% at least three times', our_file.read()))
|
||
+ expects = ['100%', '123/123']
|
||
+ for exres in expects:
|
||
+ our_file.seek(0)
|
||
+ if our_file.getvalue().count(exres) < 2:
|
||
+ our_file.seek(0)
|
||
+ raise AssertionError(
|
||
+ "\nExpected:\n{0}\nIn:\n{1}\n".format(
|
||
+ exres + " at least twice.", our_file.read()))
|
||
+
|
||
+ def test_pandas_data_frame(self):
|
||
+ """Test pandas.DataFrame.progress_apply and .progress_applymap"""
|
||
+ try:
|
||
+ from numpy.random import randint
|
||
+ import pandas as pd
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
+ df = pd.DataFrame(randint(0, 50, (100, 200)))
|
||
+
|
||
+ def task_func(x):
|
||
+ return x + 1
|
||
+
|
||
+ # applymap
|
||
+ res1 = df.progress_applymap(task_func)
|
||
+ res2 = df.applymap(task_func)
|
||
+ assert res1.equals(res2)
|
||
+
|
||
+ # apply unhashable
|
||
+ res1 = []
|
||
+ df.progress_apply(res1.extend)
|
||
+ assert len(res1) == df.size
|
||
+
|
||
+ # apply
|
||
+ for axis in [0, 1, 'index', 'columns']:
|
||
+ res3 = df.progress_apply(task_func, axis=axis)
|
||
+ res4 = df.apply(task_func, axis=axis)
|
||
+ assert res3.equals(res4)
|
||
|
||
- # apply_map, apply axis=0, apply axis=1
|
||
- expects = ['20000/20000', '200/200', '100/100']
|
||
- for exres in expects:
|
||
our_file.seek(0)
|
||
- if our_file.getvalue().count(exres) < 1:
|
||
+ if our_file.read().count('100%') < 3:
|
||
our_file.seek(0)
|
||
- raise AssertionError(
|
||
- "\nExpected:\n{0}\nIn:\n {1}\n".format(
|
||
- exres + " at least once.", our_file.read()))
|
||
-
|
||
+ raise AssertionError("\nExpected:\n{0}\nIn:\n{1}\n".format(
|
||
+ '100% at least three times', our_file.read()))
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pandas_groupby_apply():
|
||
- """Test pandas.DataFrame.groupby(...).progress_apply"""
|
||
- try:
|
||
- from numpy.random import randint, rand
|
||
- import pandas as pd
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
+ # apply_map, apply axis=0, apply axis=1
|
||
+ expects = ['20000/20000', '200/200', '100/100']
|
||
+ for exres in expects:
|
||
+ our_file.seek(0)
|
||
+ if our_file.getvalue().count(exres) < 1:
|
||
+ our_file.seek(0)
|
||
+ raise AssertionError(
|
||
+ "\nExpected:\n{0}\nIn:\n {1}\n".format(
|
||
+ exres + " at least once.", our_file.read()))
|
||
+
|
||
+ def test_pandas_groupby_apply(self):
|
||
+ """Test pandas.DataFrame.groupby(...).progress_apply"""
|
||
+ try:
|
||
+ from numpy.random import randint, rand
|
||
+ import pandas as pd
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm.pandas(file=our_file, leave=False, ascii=True)
|
||
+
|
||
+ df = pd.DataFrame(randint(0, 50, (500, 3)))
|
||
+ df.groupby(0).progress_apply(lambda x: None)
|
||
+
|
||
+ dfs = pd.DataFrame(randint(0, 50, (500, 3)), columns=list('abc'))
|
||
+ dfs.groupby(['a']).progress_apply(lambda x: None)
|
||
+
|
||
+ df2 = df = pd.DataFrame(dict(a=randint(1, 8, 10000), b=rand(10000)))
|
||
+ res1 = df2.groupby("a").apply(max)
|
||
+ res2 = df2.groupby("a").progress_apply(max)
|
||
+ assert res1.equals(res2)
|
||
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm.pandas(file=our_file, leave=False, ascii=True)
|
||
+ our_file.seek(0)
|
||
|
||
- df = pd.DataFrame(randint(0, 50, (500, 3)))
|
||
- df.groupby(0).progress_apply(lambda x: None)
|
||
+ # don't expect final output since no `leave` and
|
||
+ # high dynamic `miniters`
|
||
+ nexres = '100%|##########|'
|
||
+ if nexres in our_file.read():
|
||
+ our_file.seek(0)
|
||
+ raise AssertionError("\nDid not expect:\n{0}\nIn:{1}\n".format(
|
||
+ nexres, our_file.read()))
|
||
|
||
- dfs = pd.DataFrame(randint(0, 50, (500, 3)), columns=list('abc'))
|
||
- dfs.groupby(['a']).progress_apply(lambda x: None)
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
|
||
- df2 = df = pd.DataFrame(dict(a=randint(1, 8, 10000), b=rand(10000)))
|
||
- res1 = df2.groupby("a").apply(max)
|
||
- res2 = df2.groupby("a").progress_apply(max)
|
||
- assert res1.equals(res2)
|
||
+ dfs = pd.DataFrame(randint(0, 50, (500, 3)), columns=list('abc'))
|
||
+ dfs.loc[0] = [2, 1, 1]
|
||
+ dfs['d'] = 100
|
||
|
||
- our_file.seek(0)
|
||
+ expects = ['500/500', '1/1', '4/4', '2/2']
|
||
+ dfs.groupby(dfs.index).progress_apply(lambda x: None)
|
||
+ dfs.groupby('d').progress_apply(lambda x: None)
|
||
+ dfs.groupby(dfs.columns, axis=1).progress_apply(lambda x: None)
|
||
+ dfs.groupby([2, 2, 1, 1], axis=1).progress_apply(lambda x: None)
|
||
|
||
- # don't expect final output since no `leave` and
|
||
- # high dynamic `miniters`
|
||
- nexres = '100%|##########|'
|
||
- if nexres in our_file.read():
|
||
our_file.seek(0)
|
||
- raise AssertionError("\nDid not expect:\n{0}\nIn:{1}\n".format(
|
||
- nexres, our_file.read()))
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
-
|
||
- dfs = pd.DataFrame(randint(0, 50, (500, 3)), columns=list('abc'))
|
||
- dfs.loc[0] = [2, 1, 1]
|
||
- dfs['d'] = 100
|
||
+ if our_file.read().count('100%') < 4:
|
||
+ our_file.seek(0)
|
||
+ raise AssertionError("\nExpected:\n{0}\nIn:\n{1}\n".format(
|
||
+ '100% at least four times', our_file.read()))
|
||
|
||
- expects = ['500/500', '1/1', '4/4', '2/2']
|
||
- dfs.groupby(dfs.index).progress_apply(lambda x: None)
|
||
- dfs.groupby('d').progress_apply(lambda x: None)
|
||
- dfs.groupby(dfs.columns, axis=1).progress_apply(lambda x: None)
|
||
- dfs.groupby([2, 2, 1, 1], axis=1).progress_apply(lambda x: None)
|
||
+ for exres in expects:
|
||
+ our_file.seek(0)
|
||
+ if our_file.getvalue().count(exres) < 1:
|
||
+ our_file.seek(0)
|
||
+ raise AssertionError(
|
||
+ "\nExpected:\n{0}\nIn:\n {1}\n".format(
|
||
+ exres + " at least once.", our_file.read()))
|
||
+
|
||
+ def test_pandas_leave(self):
|
||
+ """Test pandas with `leave=True`"""
|
||
+ try:
|
||
+ from numpy.random import randint
|
||
+ import pandas as pd
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ df = pd.DataFrame(randint(0, 100, (1000, 6)))
|
||
+ tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
+ df.groupby(0).progress_apply(lambda x: None)
|
||
|
||
- our_file.seek(0)
|
||
- if our_file.read().count('100%') < 4:
|
||
our_file.seek(0)
|
||
- raise AssertionError("\nExpected:\n{0}\nIn:\n{1}\n".format(
|
||
- '100% at least four times', our_file.read()))
|
||
|
||
- for exres in expects:
|
||
- our_file.seek(0)
|
||
- if our_file.getvalue().count(exres) < 1:
|
||
+ exres = '100%|##########| 100/100'
|
||
+ if exres not in our_file.read():
|
||
our_file.seek(0)
|
||
raise AssertionError(
|
||
- "\nExpected:\n{0}\nIn:\n {1}\n".format(
|
||
- exres + " at least once.", our_file.read()))
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pandas_leave():
|
||
- """Test pandas with `leave=True`"""
|
||
- try:
|
||
- from numpy.random import randint
|
||
- import pandas as pd
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- df = pd.DataFrame(randint(0, 100, (1000, 6)))
|
||
- tqdm.pandas(file=our_file, leave=True, ascii=True)
|
||
- df.groupby(0).progress_apply(lambda x: None)
|
||
-
|
||
- our_file.seek(0)
|
||
-
|
||
- exres = '100%|##########| 100/100'
|
||
- if exres not in our_file.read():
|
||
- our_file.seek(0)
|
||
- raise AssertionError(
|
||
- "\nExpected:\n{0}\nIn:{1}\n".format(exres, our_file.read()))
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pandas_apply_args_deprecation():
|
||
- """Test warning info in
|
||
- `pandas.Dataframe(Series).progress_apply(func, *args)`"""
|
||
- try:
|
||
- from numpy.random import randint
|
||
- from tqdm import tqdm_pandas
|
||
- import pandas as pd
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm_pandas(tqdm(file=our_file, leave=False, ascii=True, ncols=20))
|
||
- df = pd.DataFrame(randint(0, 50, (500, 3)))
|
||
- df.progress_apply(lambda x: None, 1) # 1 shall cause a warning
|
||
- # Check deprecation message
|
||
- res = our_file.getvalue()
|
||
- assert all([i in res for i in (
|
||
- "TqdmDeprecationWarning", "not supported",
|
||
- "keyword arguments instead")])
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_pandas_deprecation():
|
||
- """Test bar object instance as argument deprecation"""
|
||
- try:
|
||
- from numpy.random import randint
|
||
- from tqdm import tqdm_pandas
|
||
- import pandas as pd
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm_pandas(tqdm(file=our_file, leave=False, ascii=True, ncols=20))
|
||
- df = pd.DataFrame(randint(0, 50, (500, 3)))
|
||
- df.groupby(0).progress_apply(lambda x: None)
|
||
- # Check deprecation message
|
||
- assert "TqdmDeprecationWarning" in our_file.getvalue()
|
||
- assert "instead of `tqdm_pandas(tqdm(...))`" in our_file.getvalue()
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- tqdm_pandas(tqdm, file=our_file, leave=False, ascii=True, ncols=20)
|
||
- df = pd.DataFrame(randint(0, 50, (500, 3)))
|
||
- df.groupby(0).progress_apply(lambda x: None)
|
||
- # Check deprecation message
|
||
- assert "TqdmDeprecationWarning" in our_file.getvalue()
|
||
- assert "instead of `tqdm_pandas(tqdm, ...)`" in our_file.getvalue()
|
||
+ "\nExpected:\n{0}\nIn:{1}\n".format(exres, our_file.read()))
|
||
+
|
||
+ def test_pandas_apply_args_deprecation(self):
|
||
+ """Test warning info in
|
||
+ `pandas.Dataframe(Series).progress_apply(func, *args)`"""
|
||
+ try:
|
||
+ from numpy.random import randint
|
||
+ from tqdm import tqdm_pandas
|
||
+ import pandas as pd
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm_pandas(tqdm(file=our_file, leave=False, ascii=True, ncols=20))
|
||
+ df = pd.DataFrame(randint(0, 50, (500, 3)))
|
||
+ df.progress_apply(lambda x: None, 1) # 1 shall cause a warning
|
||
+ # Check deprecation message
|
||
+ res = our_file.getvalue()
|
||
+ assert all([i in res for i in (
|
||
+ "TqdmDeprecationWarning", "not supported",
|
||
+ "keyword arguments instead")])
|
||
+
|
||
+ def test_pandas_deprecation(self):
|
||
+ """Test bar object instance as argument deprecation"""
|
||
+ try:
|
||
+ from numpy.random import randint
|
||
+ from tqdm import tqdm_pandas
|
||
+ import pandas as pd
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm_pandas(tqdm(file=our_file, leave=False, ascii=True, ncols=20))
|
||
+ df = pd.DataFrame(randint(0, 50, (500, 3)))
|
||
+ df.groupby(0).progress_apply(lambda x: None)
|
||
+ # Check deprecation message
|
||
+ assert "TqdmDeprecationWarning" in our_file.getvalue()
|
||
+ assert "instead of `tqdm_pandas(tqdm(...))`" in our_file.getvalue()
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ tqdm_pandas(tqdm, file=our_file, leave=False, ascii=True, ncols=20)
|
||
+ df = pd.DataFrame(randint(0, 50, (500, 3)))
|
||
+ df.groupby(0).progress_apply(lambda x: None)
|
||
+ # Check deprecation message
|
||
+ assert "TqdmDeprecationWarning" in our_file.getvalue()
|
||
+ assert "instead of `tqdm_pandas(tqdm, ...)`" in our_file.getvalue()
|
||
diff --git a/tqdm/tests/tests_perf.py b/tqdm/tests/tests_perf.py
|
||
index 8e808f7d..b6f558fb 100644
|
||
--- a/tqdm/tests/tests_perf.py
|
||
+++ b/tqdm/tests/tests_perf.py
|
||
@@ -1,4 +1,5 @@
|
||
from __future__ import print_function, division
|
||
+import unittest
|
||
from contextlib import contextmanager
|
||
from functools import wraps
|
||
from time import sleep, time
|
||
@@ -11,11 +12,9 @@
|
||
import sys
|
||
|
||
from tqdm import tqdm, trange
|
||
-from tests_tqdm import with_setup, pretest, posttest, StringIO, closing, \
|
||
+from tests_tqdm import TestWithInstancesCheck, StringIO, closing, \
|
||
_range, patch_lock
|
||
|
||
-from nose.plugins.skip import SkipTest
|
||
-
|
||
|
||
def cpu_sleep(t):
|
||
"""Sleep the given amount of cpu time"""
|
||
@@ -41,7 +40,7 @@ def checkCpuTime(sleeptime=0.2):
|
||
if abs(t1) < 0.0001 and t1 < t2 / 10:
|
||
checkCpuTime.passed = True
|
||
return True
|
||
- raise SkipTest
|
||
+ raise unittest.SkipTest
|
||
|
||
|
||
checkCpuTime.passed = False
|
||
@@ -74,7 +73,7 @@ def test_inner(*args, **kwargs):
|
||
if check_cpu_time:
|
||
checkCpuTime()
|
||
func(*args, **kwargs)
|
||
- except SkipTest:
|
||
+ except unittest.SkipTest:
|
||
raise
|
||
except Exception:
|
||
if i >= n:
|
||
@@ -170,195 +169,184 @@ def assert_performance(thresh, name_left, time_left, name_right, time_right):
|
||
ratio=time_left / time_right, thresh=thresh))
|
||
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-@retry_on_except()
|
||
-def test_iter_basic_overhead():
|
||
- """Test overhead of iteration based tqdm"""
|
||
+def worker(total, blocking=True):
|
||
+ def incr_bar(x):
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in trange(
|
||
+ total, file=our_file,
|
||
+ lock_args=None if blocking else (False,),
|
||
+ miniters=1, mininterval=0, maxinterval=0):
|
||
+ pass
|
||
+ return x + 1
|
||
+ return incr_bar
|
||
|
||
- total = int(1e6)
|
||
|
||
- with closing(MockIO()) as our_file:
|
||
- a = 0
|
||
- with trange(total, file=our_file) as t:
|
||
- with relative_timer() as time_tqdm:
|
||
- for i in t:
|
||
- a += i
|
||
- assert a == (total * total - total) / 2.0
|
||
+class TestTqdmPerf(TestWithInstancesCheck):
|
||
+ @retry_on_except()
|
||
+ def test_iter_basic_overhead(self):
|
||
+ """Test overhead of iteration based tqdm"""
|
||
+
|
||
+ total = int(1e6)
|
||
+
|
||
+ with closing(MockIO()) as our_file:
|
||
+ a = 0
|
||
+ with trange(total, file=our_file) as t:
|
||
+ with relative_timer() as time_tqdm:
|
||
+ for i in t:
|
||
+ a += i
|
||
+ assert a == (total * total - total) / 2.0
|
||
|
||
- a = 0
|
||
- with relative_timer() as time_bench:
|
||
- for i in _range(total):
|
||
- a += i
|
||
- our_file.write(a)
|
||
+ a = 0
|
||
+ with relative_timer() as time_bench:
|
||
+ for i in _range(total):
|
||
+ a += i
|
||
+ our_file.write(a)
|
||
|
||
- assert_performance(3, 'trange', time_tqdm(), 'range', time_bench())
|
||
+ assert_performance(3, 'trange', time_tqdm(), 'range', time_bench())
|
||
|
||
+ @retry_on_except()
|
||
+ def test_manual_basic_overhead(self):
|
||
+ """Test overhead of manual tqdm"""
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-@retry_on_except()
|
||
-def test_manual_basic_overhead():
|
||
- """Test overhead of manual tqdm"""
|
||
+ total = int(1e6)
|
||
|
||
- total = int(1e6)
|
||
+ with closing(MockIO()) as our_file:
|
||
+ with tqdm(total=total * 10, file=our_file, leave=True) as t:
|
||
+ a = 0
|
||
+ with relative_timer() as time_tqdm:
|
||
+ for i in _range(total):
|
||
+ a += i
|
||
+ t.update(10)
|
||
|
||
- with closing(MockIO()) as our_file:
|
||
- with tqdm(total=total * 10, file=our_file, leave=True) as t:
|
||
a = 0
|
||
- with relative_timer() as time_tqdm:
|
||
+ with relative_timer() as time_bench:
|
||
for i in _range(total):
|
||
a += i
|
||
- t.update(10)
|
||
+ our_file.write(a)
|
||
|
||
- a = 0
|
||
- with relative_timer() as time_bench:
|
||
- for i in _range(total):
|
||
- a += i
|
||
- our_file.write(a)
|
||
+ assert_performance(5, 'tqdm', time_tqdm(), 'range', time_bench())
|
||
|
||
- assert_performance(5, 'tqdm', time_tqdm(), 'range', time_bench())
|
||
+ @retry_on_except()
|
||
+ @patch_lock(thread=True)
|
||
+ def test_lock_args(self):
|
||
+ """Test overhead of nonblocking threads"""
|
||
+ try:
|
||
+ from concurrent.futures import ThreadPoolExecutor
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
|
||
+ total = 16
|
||
+ subtotal = 10000
|
||
|
||
-def worker(total, blocking=True):
|
||
- def incr_bar(x):
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in trange(
|
||
- total, file=our_file,
|
||
- lock_args=None if blocking else (False,),
|
||
- miniters=1, mininterval=0, maxinterval=0):
|
||
- pass
|
||
- return x + 1
|
||
- return incr_bar
|
||
+ with ThreadPoolExecutor() as pool:
|
||
+ sys.stderr.write('block ... ')
|
||
+ sys.stderr.flush()
|
||
+ with relative_timer() as time_tqdm:
|
||
+ res = list(pool.map(worker(subtotal, True), range(total)))
|
||
+ assert sum(res) == sum(range(total)) + total
|
||
+ sys.stderr.write('noblock ... ')
|
||
+ sys.stderr.flush()
|
||
+ with relative_timer() as time_noblock:
|
||
+ res = list(pool.map(worker(subtotal, False), range(total)))
|
||
+ assert sum(res) == sum(range(total)) + total
|
||
|
||
+ assert_performance(0.5, 'noblock', time_noblock(), 'tqdm', time_tqdm())
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-@retry_on_except()
|
||
-@patch_lock(thread=True)
|
||
-def test_lock_args():
|
||
- """Test overhead of nonblocking threads"""
|
||
- try:
|
||
- from concurrent.futures import ThreadPoolExecutor
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- total = 16
|
||
- subtotal = 10000
|
||
-
|
||
- with ThreadPoolExecutor() as pool:
|
||
- sys.stderr.write('block ... ')
|
||
- sys.stderr.flush()
|
||
- with relative_timer() as time_tqdm:
|
||
- res = list(pool.map(worker(subtotal, True), range(total)))
|
||
- assert sum(res) == sum(range(total)) + total
|
||
- sys.stderr.write('noblock ... ')
|
||
- sys.stderr.flush()
|
||
- with relative_timer() as time_noblock:
|
||
- res = list(pool.map(worker(subtotal, False), range(total)))
|
||
- assert sum(res) == sum(range(total)) + total
|
||
-
|
||
- assert_performance(0.5, 'noblock', time_noblock(), 'tqdm', time_tqdm())
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-@retry_on_except()
|
||
-def test_iter_overhead_hard():
|
||
- """Test overhead of iteration based tqdm (hard)"""
|
||
-
|
||
- total = int(1e5)
|
||
-
|
||
- with closing(MockIO()) as our_file:
|
||
- a = 0
|
||
- with trange(total, file=our_file, leave=True, miniters=1,
|
||
- mininterval=0, maxinterval=0) as t:
|
||
- with relative_timer() as time_tqdm:
|
||
- for i in t:
|
||
- a += i
|
||
- assert a == (total * total - total) / 2.0
|
||
+ @retry_on_except()
|
||
+ def test_iter_overhead_hard(self):
|
||
+ """Test overhead of iteration based tqdm (hard)"""
|
||
|
||
- a = 0
|
||
- with relative_timer() as time_bench:
|
||
- for i in _range(total):
|
||
- a += i
|
||
- our_file.write(("%i" % a) * 40)
|
||
+ total = int(1e5)
|
||
|
||
- assert_performance(130, 'trange', time_tqdm(), 'range', time_bench())
|
||
+ with closing(MockIO()) as our_file:
|
||
+ a = 0
|
||
+ with trange(total, file=our_file, leave=True, miniters=1,
|
||
+ mininterval=0, maxinterval=0) as t:
|
||
+ with relative_timer() as time_tqdm:
|
||
+ for i in t:
|
||
+ a += i
|
||
+ assert a == (total * total - total) / 2.0
|
||
|
||
+ a = 0
|
||
+ with relative_timer() as time_bench:
|
||
+ for i in _range(total):
|
||
+ a += i
|
||
+ our_file.write(("%i" % a) * 40)
|
||
+
|
||
+ assert_performance(130, 'trange', time_tqdm(), 'range', time_bench())
|
||
+
|
||
+ @retry_on_except()
|
||
+ def test_manual_overhead_hard(self):
|
||
+ """Test overhead of manual tqdm (hard)"""
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-@retry_on_except()
|
||
-def test_manual_overhead_hard():
|
||
- """Test overhead of manual tqdm (hard)"""
|
||
+ total = int(1e5)
|
||
|
||
- total = int(1e5)
|
||
+ with closing(MockIO()) as our_file:
|
||
+ with tqdm(total=total * 10, file=our_file, leave=True, miniters=1,
|
||
+ mininterval=0, maxinterval=0) as t:
|
||
+ a = 0
|
||
+ with relative_timer() as time_tqdm:
|
||
+ for i in _range(total):
|
||
+ a += i
|
||
+ t.update(10)
|
||
|
||
- with closing(MockIO()) as our_file:
|
||
- with tqdm(total=total * 10, file=our_file, leave=True, miniters=1,
|
||
- mininterval=0, maxinterval=0) as t:
|
||
a = 0
|
||
- with relative_timer() as time_tqdm:
|
||
+ with relative_timer() as time_bench:
|
||
for i in _range(total):
|
||
a += i
|
||
- t.update(10)
|
||
-
|
||
- a = 0
|
||
- with relative_timer() as time_bench:
|
||
- for i in _range(total):
|
||
- a += i
|
||
- our_file.write(("%i" % a) * 40)
|
||
+ our_file.write(("%i" % a) * 40)
|
||
|
||
- assert_performance(130, 'tqdm', time_tqdm(), 'range', time_bench())
|
||
+ assert_performance(130, 'tqdm', time_tqdm(), 'range', time_bench())
|
||
|
||
+ @retry_on_except()
|
||
+ def test_iter_overhead_simplebar_hard(self):
|
||
+ """Test overhead of iteration based tqdm vs simple
|
||
+ progress bar (hard)"""
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-@retry_on_except()
|
||
-def test_iter_overhead_simplebar_hard():
|
||
- """Test overhead of iteration based tqdm vs simple progress bar (hard)"""
|
||
+ total = int(1e4)
|
||
|
||
- total = int(1e4)
|
||
+ with closing(MockIO()) as our_file:
|
||
+ a = 0
|
||
+ with trange(total, file=our_file, leave=True, miniters=1,
|
||
+ mininterval=0, maxinterval=0) as t:
|
||
+ with relative_timer() as time_tqdm:
|
||
+ for i in t:
|
||
+ a += i
|
||
+ assert a == (total * total - total) / 2.0
|
||
|
||
- with closing(MockIO()) as our_file:
|
||
- a = 0
|
||
- with trange(total, file=our_file, leave=True, miniters=1,
|
||
- mininterval=0, maxinterval=0) as t:
|
||
- with relative_timer() as time_tqdm:
|
||
- for i in t:
|
||
+ a = 0
|
||
+ s = simple_progress(_range(total), file=our_file, leave=True,
|
||
+ miniters=1, mininterval=0)
|
||
+ with relative_timer() as time_bench:
|
||
+ for i in s:
|
||
a += i
|
||
- assert a == (total * total - total) / 2.0
|
||
-
|
||
- a = 0
|
||
- s = simple_progress(_range(total), file=our_file, leave=True,
|
||
- miniters=1, mininterval=0)
|
||
- with relative_timer() as time_bench:
|
||
- for i in s:
|
||
- a += i
|
||
|
||
- assert_performance(
|
||
- 10, 'trange', time_tqdm(), 'simple_progress', time_bench())
|
||
+ assert_performance(
|
||
+ 10, 'trange', time_tqdm(), 'simple_progress', time_bench())
|
||
|
||
+ @retry_on_except()
|
||
+ def test_manual_overhead_simplebar_hard(self):
|
||
+ """Test overhead of manual tqdm vs simple progress bar (hard)"""
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-@retry_on_except()
|
||
-def test_manual_overhead_simplebar_hard():
|
||
- """Test overhead of manual tqdm vs simple progress bar (hard)"""
|
||
+ total = int(1e4)
|
||
|
||
- total = int(1e4)
|
||
+ with closing(MockIO()) as our_file:
|
||
+ with tqdm(total=total * 10, file=our_file, leave=True, miniters=1,
|
||
+ mininterval=0, maxinterval=0) as t:
|
||
+ a = 0
|
||
+ with relative_timer() as time_tqdm:
|
||
+ for i in _range(total):
|
||
+ a += i
|
||
+ t.update(10)
|
||
|
||
- with closing(MockIO()) as our_file:
|
||
- with tqdm(total=total * 10, file=our_file, leave=True, miniters=1,
|
||
- mininterval=0, maxinterval=0) as t:
|
||
+ simplebar_update = simple_progress(
|
||
+ total=total * 10, file=our_file, leave=True, miniters=1,
|
||
+ mininterval=0)
|
||
a = 0
|
||
- with relative_timer() as time_tqdm:
|
||
+ with relative_timer() as time_bench:
|
||
for i in _range(total):
|
||
a += i
|
||
- t.update(10)
|
||
-
|
||
- simplebar_update = simple_progress(
|
||
- total=total * 10, file=our_file, leave=True, miniters=1,
|
||
- mininterval=0)
|
||
- a = 0
|
||
- with relative_timer() as time_bench:
|
||
- for i in _range(total):
|
||
- a += i
|
||
- simplebar_update(10)
|
||
-
|
||
- assert_performance(
|
||
- 10, 'tqdm', time_tqdm(), 'simple_progress', time_bench())
|
||
+ simplebar_update(10)
|
||
+
|
||
+ assert_performance(
|
||
+ 10, 'tqdm', time_tqdm(), 'simple_progress', time_bench())
|
||
diff --git a/tqdm/tests/tests_synchronisation.py b/tqdm/tests/tests_synchronisation.py
|
||
index 60b4860a..95774c11 100644
|
||
--- a/tqdm/tests/tests_synchronisation.py
|
||
+++ b/tqdm/tests/tests_synchronisation.py
|
||
@@ -1,7 +1,7 @@
|
||
from __future__ import division
|
||
+import unittest
|
||
from tqdm import tqdm, trange, TMonitor
|
||
-from tests_tqdm import with_setup, pretest, posttest, SkipTest, \
|
||
- StringIO, closing, patch_lock
|
||
+from tests_tqdm import TestWithInstancesCheck, StringIO, closing, patch_lock
|
||
from tests_perf import retry_on_except
|
||
|
||
from functools import wraps
|
||
@@ -102,128 +102,129 @@ def incr_bar(x):
|
||
return incr(x)
|
||
|
||
|
||
-@patch_sleep
|
||
-@with_setup(pretest, posttest)
|
||
-def test_monitor_thread():
|
||
- """Test dummy monitoring thread"""
|
||
- monitor = TMonitor(FakeTqdm, 10)
|
||
- # Test if alive, then killed
|
||
- assert monitor.report()
|
||
- monitor.exit()
|
||
- assert not monitor.report()
|
||
- assert not monitor.is_alive()
|
||
- del monitor
|
||
-
|
||
-
|
||
-@patch_sleep
|
||
-@with_setup(pretest, posttest)
|
||
-def test_monitoring_and_cleanup():
|
||
- """Test for stalled tqdm instance and monitor deletion"""
|
||
- # Note: should fix miniters for these tests, else with dynamic_miniters
|
||
- # it's too complicated to handle with monitoring update and maxinterval...
|
||
- maxinterval = tqdm.monitor_interval
|
||
- assert maxinterval == 10
|
||
- total = 1000
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=total, file=our_file, miniters=500, mininterval=0.1,
|
||
- maxinterval=maxinterval) as t:
|
||
- cpu_timify(t, Time)
|
||
- # Do a lot of iterations in a small timeframe
|
||
- # (smaller than monitor interval)
|
||
- Time.fake_sleep(maxinterval / 10) # monitor won't wake up
|
||
- t.update(500)
|
||
- # check that our fixed miniters is still there
|
||
- assert t.miniters <= 500 # TODO: should really be == 500
|
||
- # Then do 1 it after monitor interval, so that monitor kicks in
|
||
- Time.fake_sleep(maxinterval)
|
||
- t.update(1)
|
||
- # Wait for the monitor to get out of sleep's loop and update tqdm..
|
||
- timeend = Time.time()
|
||
- while not (t.monitor.woken >= timeend and t.miniters == 1):
|
||
- Time.fake_sleep(1) # Force awake up if it woken too soon
|
||
- assert t.miniters == 1 # check that monitor corrected miniters
|
||
- # Note: at this point, there may be a race condition: monitor saved
|
||
- # current woken time but Time.sleep() happen just before monitor
|
||
- # sleep. To fix that, either sleep here or increase time in a loop
|
||
- # to ensure that monitor wakes up at some point.
|
||
-
|
||
- # Try again but already at miniters = 1 so nothing will be done
|
||
- Time.fake_sleep(maxinterval)
|
||
- t.update(2)
|
||
- timeend = Time.time()
|
||
- while t.monitor.woken < timeend:
|
||
- Time.fake_sleep(1) # Force awake if it woken too soon
|
||
- # Wait for the monitor to get out of sleep's loop and update tqdm
|
||
- assert t.miniters == 1 # check that monitor corrected miniters
|
||
-
|
||
-
|
||
-@patch_sleep
|
||
-@with_setup(pretest, posttest)
|
||
-def test_monitoring_multi():
|
||
- """Test on multiple bars, one not needing miniters adjustment"""
|
||
- # Note: should fix miniters for these tests, else with dynamic_miniters
|
||
- # it's too complicated to handle with monitoring update and maxinterval...
|
||
- maxinterval = tqdm.monitor_interval
|
||
- assert maxinterval == 10
|
||
- total = 1000
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=total, file=our_file, miniters=500, mininterval=0.1,
|
||
- maxinterval=maxinterval) as t1:
|
||
- # Set high maxinterval for t2 so monitor does not need to adjust it
|
||
+class TestTqdmSynchronisation(TestWithInstancesCheck):
|
||
+ @patch_sleep
|
||
+ def test_monitor_thread(self):
|
||
+ """Test dummy monitoring thread"""
|
||
+ monitor = TMonitor(FakeTqdm, 10)
|
||
+ # Test if alive, then killed
|
||
+ assert monitor.report()
|
||
+ monitor.exit()
|
||
+ assert not monitor.report()
|
||
+ assert not monitor.is_alive()
|
||
+ del monitor
|
||
+
|
||
+ @patch_sleep
|
||
+ def test_monitoring_and_cleanup(self):
|
||
+ """Test for stalled tqdm instance and monitor deletion"""
|
||
+ # Note: should fix miniters for these tests, else with dynamic_miniters
|
||
+ # it's too complicated to handle with monitoring update and
|
||
+ # maxinterval...
|
||
+ maxinterval = tqdm.monitor_interval
|
||
+ assert maxinterval == 10
|
||
+ total = 1000
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
with tqdm(total=total, file=our_file, miniters=500, mininterval=0.1,
|
||
- maxinterval=1E5) as t2:
|
||
- cpu_timify(t1, Time)
|
||
- cpu_timify(t2, Time)
|
||
+ maxinterval=maxinterval) as t:
|
||
+ cpu_timify(t, Time)
|
||
# Do a lot of iterations in a small timeframe
|
||
- Time.fake_sleep(maxinterval / 10)
|
||
- t1.update(500)
|
||
- t2.update(500)
|
||
- assert t1.miniters <= 500 # TODO: should really be == 500
|
||
- assert t2.miniters == 500
|
||
+ # (smaller than monitor interval)
|
||
+ Time.fake_sleep(maxinterval / 10) # monitor won't wake up
|
||
+ t.update(500)
|
||
+ # check that our fixed miniters is still there
|
||
+ assert t.miniters <= 500 # TODO: should really be == 500
|
||
# Then do 1 it after monitor interval, so that monitor kicks in
|
||
Time.fake_sleep(maxinterval)
|
||
- t1.update(1)
|
||
- t2.update(1)
|
||
- # Wait for the monitor to get out of sleep and update tqdm
|
||
+ t.update(1)
|
||
+ # Wait for the monitor to get out of sleep's loop and update
|
||
+ # tqdm..
|
||
timeend = Time.time()
|
||
- while not (t1.monitor.woken >= timeend and t1.miniters == 1):
|
||
- Time.fake_sleep(1)
|
||
- assert t1.miniters == 1 # check that monitor corrected miniters
|
||
- assert t2.miniters == 500 # check that t2 was not adjusted
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_imap():
|
||
- """Test multiprocessing.Pool"""
|
||
- try:
|
||
- from multiprocessing import Pool
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- pool = Pool()
|
||
- res = list(tqdm(pool.imap(incr, range(100)), disable=True))
|
||
- assert res[-1] == 100
|
||
-
|
||
-
|
||
-# py2: locks won't propagate to incr_bar so may cause `AttributeError`
|
||
-@with_setup(pretest, posttest)
|
||
-@retry_on_except(n=3 if sys.version_info < (3,) else 1, check_cpu_time=False)
|
||
-@patch_lock(thread=True)
|
||
-def test_threadpool():
|
||
- """Test concurrent.futures.ThreadPoolExecutor"""
|
||
- try:
|
||
- from concurrent.futures import ThreadPoolExecutor
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
-
|
||
- with ThreadPoolExecutor(8) as pool:
|
||
+ while not (t.monitor.woken >= timeend and t.miniters == 1):
|
||
+ Time.fake_sleep(1) # Force awake up if it woken too soon
|
||
+ assert t.miniters == 1 # check that monitor corrected miniters
|
||
+ # Note: at this point, there may be a race condition: monitor
|
||
+ # saved current woken time but Time.sleep() happen just before
|
||
+ # monitor sleep. To fix that, either sleep here or increase time
|
||
+ # in a loop to ensure that monitor wakes up at some point.
|
||
+
|
||
+ # Try again but already at miniters = 1 so nothing will be done
|
||
+ Time.fake_sleep(maxinterval)
|
||
+ t.update(2)
|
||
+ timeend = Time.time()
|
||
+ while t.monitor.woken < timeend:
|
||
+ Time.fake_sleep(1) # Force awake if it woken too soon
|
||
+ # Wait for the monitor to get out of sleep's loop and update
|
||
+ # tqdm
|
||
+ assert t.miniters == 1 # check that monitor corrected miniters
|
||
+
|
||
+ @patch_sleep
|
||
+ def test_monitoring_multi(self):
|
||
+ """Test on multiple bars, one not needing miniters adjustment"""
|
||
+ # Note: should fix miniters for these tests, else with dynamic_miniters
|
||
+ # it's too complicated to handle with monitoring update and
|
||
+ # maxinterval...
|
||
+ maxinterval = tqdm.monitor_interval
|
||
+ assert maxinterval == 10
|
||
+ total = 1000
|
||
+
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=total, file=our_file, miniters=500, mininterval=0.1,
|
||
+ maxinterval=maxinterval) as t1:
|
||
+ # Set high maxinterval for t2 so monitor does not need to adjust
|
||
+ # it
|
||
+ with tqdm(total=total, file=our_file, miniters=500,
|
||
+ mininterval=0.1, maxinterval=1E5) as t2:
|
||
+ cpu_timify(t1, Time)
|
||
+ cpu_timify(t2, Time)
|
||
+ # Do a lot of iterations in a small timeframe
|
||
+ Time.fake_sleep(maxinterval / 10)
|
||
+ t1.update(500)
|
||
+ t2.update(500)
|
||
+ assert t1.miniters <= 500 # TODO: should really be == 500
|
||
+ assert t2.miniters == 500
|
||
+ # Then do 1 it after monitor interval, so that monitor
|
||
+ # kicks in
|
||
+ Time.fake_sleep(maxinterval)
|
||
+ t1.update(1)
|
||
+ t2.update(1)
|
||
+ # Wait for the monitor to get out of sleep and update tqdm
|
||
+ timeend = Time.time()
|
||
+ while not (t1.monitor.woken
|
||
+ >= timeend and t1.miniters == 1):
|
||
+ Time.fake_sleep(1)
|
||
+ # check that monitor corrected miniters
|
||
+ assert t1.miniters == 1
|
||
+ assert t2.miniters == 500 # check that t2 was not adjusted
|
||
+
|
||
+ def test_imap(self):
|
||
+ """Test multiprocessing.Pool"""
|
||
+ try:
|
||
+ from multiprocessing import Pool
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ pool = Pool()
|
||
+ res = list(tqdm(pool.imap(incr, range(100)), disable=True))
|
||
+ assert res[-1] == 100
|
||
+
|
||
+ # py2: locks won't propagate to incr_bar so may cause `AttributeError`
|
||
+ @retry_on_except(n=3 if sys.version_info < (3,) else 1,
|
||
+ check_cpu_time=False)
|
||
+ @patch_lock(thread=True)
|
||
+ def test_threadpool(self):
|
||
+ """Test concurrent.futures.ThreadPoolExecutor"""
|
||
try:
|
||
- res = list(tqdm(pool.map(incr_bar, range(100)), disable=True))
|
||
- except AttributeError:
|
||
- if sys.version_info < (3,):
|
||
- raise SkipTest
|
||
- else:
|
||
- raise
|
||
- assert sum(res) == sum(range(1, 101))
|
||
+ from concurrent.futures import ThreadPoolExecutor
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ with ThreadPoolExecutor(8) as pool:
|
||
+ try:
|
||
+ res = list(tqdm(pool.map(incr_bar, range(100)), disable=True))
|
||
+ except AttributeError:
|
||
+ if sys.version_info < (3,):
|
||
+ raise unittest.SkipTest
|
||
+ else:
|
||
+ raise
|
||
+ assert sum(res) == sum(range(1, 101))
|
||
diff --git a/tqdm/tests/tests_tqdm.py b/tqdm/tests/tests_tqdm.py
|
||
index af2f2601..2eb8eef1 100644
|
||
--- a/tqdm/tests/tests_tqdm.py
|
||
+++ b/tqdm/tests/tests_tqdm.py
|
||
@@ -6,11 +6,9 @@
|
||
import csv
|
||
import re
|
||
import os
|
||
+import pytest
|
||
+import unittest
|
||
from functools import wraps
|
||
-from nose import with_setup
|
||
-from nose.plugins.skip import SkipTest
|
||
-from nose.tools import assert_raises
|
||
-from nose.tools import eq_
|
||
from contextlib import contextmanager
|
||
from warnings import catch_warnings, simplefilter
|
||
|
||
@@ -122,30 +120,6 @@ def cpu_timify(t, timer=None):
|
||
return timer
|
||
|
||
|
||
-def pretest():
|
||
- # setcheckinterval is deprecated
|
||
- try:
|
||
- sys.setswitchinterval(1)
|
||
- except AttributeError:
|
||
- sys.setcheckinterval(100)
|
||
-
|
||
- if getattr(tqdm, "_instances", False):
|
||
- n = len(tqdm._instances)
|
||
- if n:
|
||
- tqdm._instances.clear()
|
||
- raise EnvironmentError(
|
||
- "{0} `tqdm` instances still in existence PRE-test".format(n))
|
||
-
|
||
-
|
||
-def posttest():
|
||
- if getattr(tqdm, "_instances", False):
|
||
- n = len(tqdm._instances)
|
||
- if n:
|
||
- tqdm._instances.clear()
|
||
- raise EnvironmentError(
|
||
- "{0} `tqdm` instances still in existence POST-test".format(n))
|
||
-
|
||
-
|
||
class UnicodeIO(IOBase):
|
||
"""Unicode version of StringIO"""
|
||
|
||
@@ -232,1812 +206,1735 @@ def squash_ctrlchars(s):
|
||
return lines
|
||
|
||
|
||
-def test_format_interval():
|
||
- """Test time interval format"""
|
||
- format_interval = tqdm.format_interval
|
||
-
|
||
- assert format_interval(60) == '01:00'
|
||
- assert format_interval(6160) == '1:42:40'
|
||
- assert format_interval(238113) == '66:08:33'
|
||
+def _rlock_creation_target():
|
||
+ """Check that the RLock has not been constructed."""
|
||
+ from unittest.mock import patch
|
||
+ import multiprocessing as mp
|
||
|
||
+ # Patch the RLock class/method but use the original implementation
|
||
+ with patch('multiprocessing.RLock', wraps=mp.RLock) as rlock_mock:
|
||
+ # Importing the module should not create a lock
|
||
+ from tqdm import tqdm
|
||
+ assert rlock_mock.call_count == 0
|
||
+ # Creating a progress bar should initialize the lock
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(file=our_file) as _: # NOQA
|
||
+ pass
|
||
+ assert rlock_mock.call_count == 1
|
||
+ # Creating a progress bar again should reuse the lock
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(file=our_file) as _: # NOQA
|
||
+ pass
|
||
+ assert rlock_mock.call_count == 1
|
||
|
||
-def test_format_num():
|
||
- """Test number format"""
|
||
- format_num = tqdm.format_num
|
||
|
||
- assert float(format_num(1337)) == 1337
|
||
- assert format_num(int(1e6)) == '1e+6'
|
||
- assert format_num(1239876) == '1''239''876'
|
||
+@contextmanager
|
||
+def std_out_err_redirect_tqdm(tqdm_file=sys.stderr):
|
||
+ orig_out_err = sys.stdout, sys.stderr
|
||
+ try:
|
||
+ sys.stdout = sys.stderr = DummyTqdmFile(tqdm_file)
|
||
+ yield orig_out_err[0]
|
||
+ # Relay exceptions
|
||
+ except Exception as exc:
|
||
+ raise exc
|
||
+ # Always restore sys.stdout/err if necessary
|
||
+ finally:
|
||
+ sys.stdout, sys.stderr = orig_out_err
|
||
|
||
|
||
-def test_format_meter():
|
||
- """Test statistics and progress bar formatting"""
|
||
+def patch_lock(thread=True):
|
||
+ """decorator replacing tqdm's lock with vanilla threading/multiprocessing"""
|
||
try:
|
||
- unich = unichr
|
||
- except NameError:
|
||
- unich = chr
|
||
-
|
||
- format_meter = tqdm.format_meter
|
||
-
|
||
- assert format_meter(0, 1000, 13) == \
|
||
- " 0%| | 0/1000 [00:13<?, ?it/s]"
|
||
- # If not implementing any changes to _tqdm.py, set prefix='desc'
|
||
- # or else ": : " will be in output, so assertion should change
|
||
- assert format_meter(0, 1000, 13, ncols=68, prefix='desc: ') == \
|
||
- "desc: 0%| | 0/1000 [00:13<?, ?it/s]"
|
||
- assert format_meter(231, 1000, 392) == \
|
||
- " 23%|" + unich(0x2588) * 2 + unich(0x258e) + \
|
||
- " | 231/1000 [06:32<21:44, 1.70s/it]"
|
||
- assert format_meter(10000, 1000, 13) == \
|
||
- "10000it [00:13, 769.23it/s]"
|
||
- assert format_meter(231, 1000, 392, ncols=56, ascii=True) == \
|
||
- " 23%|" + '#' * 3 + '6' + \
|
||
- " | 231/1000 [06:32<21:44, 1.70s/it]"
|
||
- assert format_meter(100000, 1000, 13, unit_scale=True, unit='iB') == \
|
||
- "100kiB [00:13, 7.69kiB/s]"
|
||
- assert format_meter(100, 1000, 12, ncols=0, rate=7.33) == \
|
||
- " 10% 100/1000 [00:12<02:02, 7.33it/s]"
|
||
- eq_(
|
||
+ if thread:
|
||
+ from threading import RLock
|
||
+ else:
|
||
+ from multiprocessing import RLock
|
||
+ lock = RLock()
|
||
+ except (ImportError, OSError):
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ def outer(func):
|
||
+ """actual decorator"""
|
||
+ @wraps(func)
|
||
+ def inner(*args, **kwargs):
|
||
+ """set & reset lock even if exceptions occur"""
|
||
+ default_lock = tqdm.get_lock()
|
||
+ try:
|
||
+ tqdm.set_lock(lock)
|
||
+ return func(*args, **kwargs)
|
||
+ finally:
|
||
+ tqdm.set_lock(default_lock)
|
||
+ return inner
|
||
+ return outer
|
||
+
|
||
+
|
||
+class WriteTypeChecker(BytesIO):
|
||
+ """File-like to assert the expected type is written"""
|
||
+ def __init__(self, expected_type):
|
||
+ super(WriteTypeChecker, self).__init__()
|
||
+ self.expected_type = expected_type
|
||
+
|
||
+ def write(self, s):
|
||
+ assert isinstance(s, self.expected_type)
|
||
+
|
||
+
|
||
+class TestWithInstancesCheck(unittest.TestCase):
|
||
+ def setUp(self):
|
||
+ # setcheckinterval is deprecated
|
||
+ try:
|
||
+ sys.setswitchinterval(1)
|
||
+ except AttributeError:
|
||
+ sys.setcheckinterval(100)
|
||
+
|
||
+ if getattr(tqdm, "_instances", False):
|
||
+ n = len(tqdm._instances)
|
||
+ if n:
|
||
+ tqdm._instances.clear()
|
||
+ raise EnvironmentError(
|
||
+ "{0} `tqdm` instances still in existence PRE-test"
|
||
+ .format(n))
|
||
+
|
||
+ def tearDown(self):
|
||
+ if getattr(tqdm, "_instances", False):
|
||
+ n = len(tqdm._instances)
|
||
+ if n:
|
||
+ tqdm._instances.clear()
|
||
+ raise EnvironmentError(
|
||
+ "{0} `tqdm` instances still in existence POST-test"
|
||
+ .format(n))
|
||
+
|
||
+
|
||
+class TestTqdmWithoutSetUp(unittest.TestCase):
|
||
+ def test_format_interval(self):
|
||
+ """Test time interval format"""
|
||
+ format_interval = tqdm.format_interval
|
||
+
|
||
+ assert format_interval(60) == '01:00'
|
||
+ assert format_interval(6160) == '1:42:40'
|
||
+ assert format_interval(238113) == '66:08:33'
|
||
+
|
||
+ def test_format_num(self):
|
||
+ """Test number format"""
|
||
+ format_num = tqdm.format_num
|
||
+
|
||
+ assert float(format_num(1337)) == 1337
|
||
+ assert format_num(int(1e6)) == '1e+6'
|
||
+ assert format_num(1239876) == '1''239''876'
|
||
+
|
||
+ def test_format_meter(self):
|
||
+ """Test statistics and progress bar formatting"""
|
||
+ try:
|
||
+ unich = unichr
|
||
+ except NameError:
|
||
+ unich = chr
|
||
+
|
||
+ format_meter = tqdm.format_meter
|
||
+
|
||
+ assert format_meter(0, 1000, 13) == \
|
||
+ " 0%| | 0/1000 [00:13<?, ?it/s]"
|
||
+ # If not implementing any changes to _tqdm.py, set prefix='desc'
|
||
+ # or else ": : " will be in output, so assertion should change
|
||
+ assert format_meter(0, 1000, 13, ncols=68, prefix='desc: ') == \
|
||
+ "desc: 0%|" \
|
||
+ " | 0/1000 [00:13<?, ?it/s]"
|
||
+ assert format_meter(231, 1000, 392) == \
|
||
+ " 23%|" + unich(0x2588) * 2 + unich(0x258e) + \
|
||
+ " | 231/1000 [06:32<21:44, 1.70s/it]"
|
||
+ assert format_meter(10000, 1000, 13) == \
|
||
+ "10000it [00:13, 769.23it/s]"
|
||
+ assert format_meter(231, 1000, 392, ncols=56, ascii=True) == \
|
||
+ " 23%|" + '#' * 3 + '6' + \
|
||
+ " | 231/1000 [06:32<21:44, 1.70s/it]"
|
||
+ assert format_meter(100000, 1000, 13, unit_scale=True, unit='iB') == \
|
||
+ "100kiB [00:13, 7.69kiB/s]"
|
||
+ assert format_meter(100, 1000, 12, ncols=0, rate=7.33) == \
|
||
+ " 10% 100/1000 [00:12<02:02, 7.33it/s]"
|
||
# ncols is small, l_bar is too large
|
||
# l_bar gets chopped
|
||
# no bar
|
||
# no r_bar
|
||
- format_meter(
|
||
- 0, 1000, 13, ncols=10,
|
||
- bar_format="************{bar:10}$$$$$$$$$$"),
|
||
- "**********" # 10/12 stars since ncols is 10
|
||
- )
|
||
- eq_(
|
||
+ assert \
|
||
+ format_meter(
|
||
+ 0, 1000, 13, ncols=10,
|
||
+ bar_format="************{bar:10}$$$$$$$$$$") == \
|
||
+ "**********" # 10/12 stars since ncols is 10
|
||
# n_cols allows for l_bar and some of bar
|
||
# l_bar displays
|
||
# bar gets chopped
|
||
# no r_bar
|
||
- format_meter(
|
||
- 0, 1000, 13, ncols=20,
|
||
- bar_format="************{bar:10}$$$$$$$$$$"),
|
||
- "************ " # all 12 stars and 8/10 bar parts
|
||
- )
|
||
- eq_(
|
||
+ assert \
|
||
+ format_meter(
|
||
+ 0, 1000, 13, ncols=20,
|
||
+ bar_format="************{bar:10}$$$$$$$$$$") == \
|
||
+ "************ " # all 12 stars and 8/10 bar parts
|
||
# n_cols allows for l_bar, bar, and some of r_bar
|
||
# l_bar displays
|
||
# bar displays
|
||
# r_bar gets chopped
|
||
- format_meter(
|
||
- 0, 1000, 13, ncols=30,
|
||
- bar_format="************{bar:10}$$$$$$$$$$"),
|
||
- "************ $$$$$$$$"
|
||
# all 12 stars and 10 bar parts, but only 8/10 dollar signs
|
||
- )
|
||
- eq_(
|
||
+ assert \
|
||
+ format_meter(
|
||
+ 0, 1000, 13, ncols=30,
|
||
+ bar_format="************{bar:10}$$$$$$$$$$") == \
|
||
+ "************ $$$$$$$$"
|
||
# trim left ANSI; escape is before trim zone
|
||
- format_meter(
|
||
- 0, 1000, 13, ncols=10,
|
||
- bar_format="*****\033[22m****\033[0m***{bar:10}$$$$$$$$$$"),
|
||
- "*****\033[22m****\033[0m*\033[0m"
|
||
# we only know it has ANSI codes, so we append an END code anyway
|
||
- )
|
||
- eq_(
|
||
+ assert \
|
||
+ format_meter(
|
||
+ 0, 1000, 13, ncols=10,
|
||
+ bar_format="*****\033[22m****\033[0m***{bar:10}$$$$$$$$$$") == \
|
||
+ "*****\033[22m****\033[0m*\033[0m"
|
||
# trim left ANSI; escape is at trim zone
|
||
- format_meter(
|
||
- 0, 1000, 13, ncols=10,
|
||
- bar_format="*****\033[22m*****\033[0m**{bar:10}$$$$$$$$$$"),
|
||
- "*****\033[22m*****\033[0m"
|
||
- )
|
||
- eq_(
|
||
+ assert \
|
||
+ format_meter(
|
||
+ 0, 1000, 13, ncols=10,
|
||
+ bar_format="*****\033[22m*****\033[0m**{bar:10}$$$$$$$$$$") == \
|
||
+ "*****\033[22m*****\033[0m"
|
||
# trim left ANSI; escape is after trim zone
|
||
- format_meter(
|
||
- 0, 1000, 13, ncols=10,
|
||
- bar_format="*****\033[22m******\033[0m*{bar:10}$$$$$$$$$$"),
|
||
- "*****\033[22m*****\033[0m"
|
||
- )
|
||
- # Check that bar_format correctly adapts {bar} size to the rest
|
||
- assert format_meter(20, 100, 12, ncols=13, rate=8.1,
|
||
- bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == \
|
||
- " 20%|" + unich(0x258f) + "|20/100"
|
||
- assert format_meter(20, 100, 12, ncols=14, rate=8.1,
|
||
- bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == \
|
||
- " 20%|" + unich(0x258d) + " |20/100"
|
||
- # Check wide characters
|
||
- if sys.version_info >= (3,):
|
||
- assert format_meter(0, 1000, 13, ncols=68, prefix='fullwidth: ') == \
|
||
- "fullwidth: 0%| | 0/1000 [00:13<?, ?it/s]"
|
||
- assert format_meter(0, 1000, 13, ncols=68, prefix='ニッポン [ニッポン]: ') == \
|
||
- "ニッポン [ニッポン]: 0%| | 0/1000 [00:13<?, ?it/s]"
|
||
- # Check that bar_format can print only {bar} or just one side
|
||
- assert format_meter(20, 100, 12, ncols=2, rate=8.1,
|
||
- bar_format=r'{bar}') == \
|
||
- unich(0x258d) + " "
|
||
- assert format_meter(20, 100, 12, ncols=7, rate=8.1,
|
||
- bar_format=r'{l_bar}{bar}') == \
|
||
- " 20%|" + unich(0x258d) + " "
|
||
- assert format_meter(20, 100, 12, ncols=6, rate=8.1,
|
||
- bar_format=r'{bar}|test') == \
|
||
- unich(0x258f) + "|test"
|
||
-
|
||
-
|
||
-def test_ansi_escape_codes():
|
||
- """Test stripping of ANSI escape codes"""
|
||
- ansi = dict(BOLD='\033[1m', RED='\033[91m', END='\033[0m')
|
||
- desc_raw = '{BOLD}{RED}Colored{END} description'
|
||
- ncols = 123
|
||
-
|
||
- desc_stripped = desc_raw.format(BOLD='', RED='', END='')
|
||
- meter = tqdm.format_meter(0, 100, 0, ncols=ncols, prefix=desc_stripped)
|
||
- assert len(meter) == ncols
|
||
-
|
||
- desc = desc_raw.format(**ansi)
|
||
- meter = tqdm.format_meter(0, 100, 0, ncols=ncols, prefix=desc)
|
||
- # `format_meter` inserts an extra END for safety
|
||
- ansi_len = len(desc) - len(desc_stripped) + len(ansi['END'])
|
||
- assert len(meter) == ncols + ansi_len
|
||
-
|
||
-
|
||
-def test_si_format():
|
||
- """Test SI unit prefixes"""
|
||
- format_meter = tqdm.format_meter
|
||
-
|
||
- assert '9.00 ' in format_meter(1, 9, 1, unit_scale=True, unit='B')
|
||
- assert '99.0 ' in format_meter(1, 99, 1, unit_scale=True)
|
||
- assert '999 ' in format_meter(1, 999, 1, unit_scale=True)
|
||
- assert '9.99k ' in format_meter(1, 9994, 1, unit_scale=True)
|
||
- assert '10.0k ' in format_meter(1, 9999, 1, unit_scale=True)
|
||
- assert '99.5k ' in format_meter(1, 99499, 1, unit_scale=True)
|
||
- assert '100k ' in format_meter(1, 99999, 1, unit_scale=True)
|
||
- assert '1.00M ' in format_meter(1, 999999, 1, unit_scale=True)
|
||
- assert '1.00G ' in format_meter(1, 999999999, 1, unit_scale=True)
|
||
- assert '1.00T ' in format_meter(1, 999999999999, 1, unit_scale=True)
|
||
- assert '1.00P ' in format_meter(1, 999999999999999, 1, unit_scale=True)
|
||
- assert '1.00E ' in format_meter(1, 999999999999999999, 1, unit_scale=True)
|
||
- assert '1.00Z ' in format_meter(1, 999999999999999999999, 1,
|
||
- unit_scale=True)
|
||
- assert '1.0Y ' in format_meter(1, 999999999999999999999999, 1,
|
||
- unit_scale=True)
|
||
- assert '10.0Y ' in format_meter(1, 9999999999999999999999999, 1,
|
||
- unit_scale=True)
|
||
- assert '100.0Y ' in format_meter(1, 99999999999999999999999999, 1,
|
||
- unit_scale=True)
|
||
- assert '1000.0Y ' in format_meter(1, 999999999999999999999999999, 1,
|
||
- unit_scale=True)
|
||
-
|
||
-
|
||
-def test_bar_formatspec():
|
||
- """Test Bar.__format__ spec"""
|
||
- assert "{0:5a}".format(Bar(0.3)) == "#5 "
|
||
- assert "{0:2}".format(Bar(0.5, charset=" .oO0")) == "0 "
|
||
- assert "{0:2a}".format(Bar(0.5, charset=" .oO0")) == "# "
|
||
- assert "{0:-6a}".format(Bar(0.5, 10)) == '## '
|
||
- assert "{0:2b}".format(Bar(0.5, 10)) == ' '
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_all_defaults():
|
||
- """Test default kwargs"""
|
||
- with closing(UnicodeIO()) as our_file:
|
||
- with tqdm(range(10), file=our_file) as progressbar:
|
||
- assert len(progressbar) == 10
|
||
- for _ in progressbar:
|
||
+ assert \
|
||
+ format_meter(
|
||
+ 0, 1000, 13, ncols=10,
|
||
+ bar_format="*****\033[22m******\033[0m*{bar:10}$$$$$$$$$$") == \
|
||
+ "*****\033[22m*****\033[0m"
|
||
+ # Check that bar_format correctly adapts {bar} size to the rest
|
||
+ assert format_meter(20, 100, 12, ncols=13, rate=8.1,
|
||
+ bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == \
|
||
+ " 20%|" + unich(0x258f) + "|20/100"
|
||
+ assert format_meter(20, 100, 12, ncols=14, rate=8.1,
|
||
+ bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == \
|
||
+ " 20%|" + unich(0x258d) + " |20/100"
|
||
+ # Check wide characters
|
||
+# if sys.version_info >= (3,):
|
||
+# assert format_meter(0, 1000, 13, ncols=68,
|
||
+# prefix='fullwidth: ') == \
|
||
+# "fullwidth: 0%|" \
|
||
+# " | 0/1000 [00:13<?, ?it/s]"
|
||
+# assert format_meter(0, 1000, 13, ncols=68,
|
||
+# prefix='ニッポン [ニッポン]: ') == \
|
||
+# "ニッポン [ニッポン]: 0%|" \
|
||
+# " | 0/1000 [00:13<?, ?it/s]"
|
||
+ # Check that bar_format can print only {bar} or just one side
|
||
+ assert format_meter(20, 100, 12, ncols=2, rate=8.1,
|
||
+ bar_format=r'{bar}') == \
|
||
+ unich(0x258d) + " "
|
||
+ assert format_meter(20, 100, 12, ncols=7, rate=8.1,
|
||
+ bar_format=r'{l_bar}{bar}') == \
|
||
+ " 20%|" + unich(0x258d) + " "
|
||
+ assert format_meter(20, 100, 12, ncols=6, rate=8.1,
|
||
+ bar_format=r'{bar}|test') == \
|
||
+ unich(0x258f) + "|test"
|
||
+
|
||
+ def test_ansi_escape_codes(self):
|
||
+ """Test stripping of ANSI escape codes"""
|
||
+ ansi = dict(BOLD='\033[1m', RED='\033[91m', END='\033[0m')
|
||
+ desc_raw = '{BOLD}{RED}Colored{END} description'
|
||
+ ncols = 123
|
||
+
|
||
+ desc_stripped = desc_raw.format(BOLD='', RED='', END='')
|
||
+ meter = tqdm.format_meter(0, 100, 0, ncols=ncols, prefix=desc_stripped)
|
||
+ assert len(meter) == ncols
|
||
+
|
||
+ desc = desc_raw.format(**ansi)
|
||
+ meter = tqdm.format_meter(0, 100, 0, ncols=ncols, prefix=desc)
|
||
+ # `format_meter` inserts an extra END for safety
|
||
+ ansi_len = len(desc) - len(desc_stripped) + len(ansi['END'])
|
||
+ assert len(meter) == ncols + ansi_len
|
||
+
|
||
+ def test_si_format(self):
|
||
+ """Test SI unit prefixes"""
|
||
+ format_meter = tqdm.format_meter
|
||
+
|
||
+ assert '9.00 ' in format_meter(1, 9, 1, unit_scale=True, unit='B')
|
||
+ assert '99.0 ' in format_meter(1, 99, 1, unit_scale=True)
|
||
+ assert '999 ' in format_meter(1, 999, 1, unit_scale=True)
|
||
+ assert '9.99k ' in format_meter(1, 9994, 1, unit_scale=True)
|
||
+ assert '10.0k ' in format_meter(1, 9999, 1, unit_scale=True)
|
||
+ assert '99.5k ' in format_meter(1, 99499, 1, unit_scale=True)
|
||
+ assert '100k ' in format_meter(1, 99999, 1, unit_scale=True)
|
||
+ assert '1.00M ' in format_meter(1, 999999, 1, unit_scale=True)
|
||
+ assert '1.00G ' in format_meter(1, 999999999, 1, unit_scale=True)
|
||
+ assert '1.00T ' in format_meter(1, 999999999999, 1, unit_scale=True)
|
||
+ assert '1.00P ' in format_meter(1, 999999999999999, 1, unit_scale=True)
|
||
+ assert '1.00E ' in format_meter(1, 999999999999999999, 1,
|
||
+ unit_scale=True)
|
||
+ assert '1.00Z ' in format_meter(1, 999999999999999999999, 1,
|
||
+ unit_scale=True)
|
||
+ assert '1.0Y ' in format_meter(1, 999999999999999999999999, 1,
|
||
+ unit_scale=True)
|
||
+ assert '10.0Y ' in format_meter(1, 9999999999999999999999999, 1,
|
||
+ unit_scale=True)
|
||
+ assert '100.0Y ' in format_meter(1, 99999999999999999999999999, 1,
|
||
+ unit_scale=True)
|
||
+ assert '1000.0Y ' in format_meter(1, 999999999999999999999999999, 1,
|
||
+ unit_scale=True)
|
||
+
|
||
+ def test_bar_formatspec(self):
|
||
+ """Test Bar.__format__ spec"""
|
||
+ assert "{0:5a}".format(Bar(0.3)) == "#5 "
|
||
+ assert "{0:2}".format(Bar(0.5, charset=" .oO0")) == "0 "
|
||
+ assert "{0:2a}".format(Bar(0.5, charset=" .oO0")) == "# "
|
||
+ assert "{0:-6a}".format(Bar(0.5, 10)) == '## '
|
||
+ assert "{0:2b}".format(Bar(0.5, 10)) == ' '
|
||
+
|
||
+ def test_postfix_direct(self):
|
||
+ """Test directly assigning non-str objects to postfix"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, file=our_file, miniters=1, mininterval=0,
|
||
+ bar_format="{postfix[0][name]} {postfix[1]:>5.2f}",
|
||
+ postfix=[dict(name="foo"), 42]) as t:
|
||
+ for i in range(10):
|
||
+ if i % 2:
|
||
+ t.postfix[0]["name"] = "abcdefghij"[i]
|
||
+ else:
|
||
+ t.postfix[1] = i
|
||
+ t.update()
|
||
+ res = our_file.getvalue()
|
||
+ assert "f 6.00" in res
|
||
+ assert "h 6.00" in res
|
||
+ assert "h 8.00" in res
|
||
+ assert "j 8.00" in res
|
||
+
|
||
+
|
||
+class TestTqdmWithSetUp(TestWithInstancesCheck):
|
||
+ def test_all_defaults(self):
|
||
+ """Test default kwargs"""
|
||
+ with closing(UnicodeIO()) as our_file:
|
||
+ with tqdm(range(10), file=our_file) as progressbar:
|
||
+ assert len(progressbar) == 10
|
||
+ for _ in progressbar:
|
||
+ pass
|
||
+ # restore stdout/stderr output for `nosetest` interface
|
||
+ # try:
|
||
+ # sys.stderr.write('\x1b[A')
|
||
+ # except:
|
||
+ # pass
|
||
+ sys.stderr.write('\rTest default kwargs ... ')
|
||
+
|
||
+ def test_native_string_io_for_default_file(self):
|
||
+ """Native strings written to unspecified files"""
|
||
+ stderr = sys.stderr
|
||
+ try:
|
||
+ sys.stderr = WriteTypeChecker(expected_type=type(''))
|
||
+ for _ in tqdm(range(3)):
|
||
pass
|
||
- # restore stdout/stderr output for `nosetest` interface
|
||
- # try:
|
||
- # sys.stderr.write('\x1b[A')
|
||
- # except:
|
||
- # pass
|
||
- sys.stderr.write('\rTest default kwargs ... ')
|
||
-
|
||
-
|
||
-class WriteTypeChecker(BytesIO):
|
||
- """File-like to assert the expected type is written"""
|
||
- def __init__(self, expected_type):
|
||
- super(WriteTypeChecker, self).__init__()
|
||
- self.expected_type = expected_type
|
||
-
|
||
- def write(self, s):
|
||
- assert isinstance(s, self.expected_type)
|
||
-
|
||
+ sys.stderr.encoding = None # py2 behaviour
|
||
+ for _ in tqdm(range(3)):
|
||
+ pass
|
||
+ finally:
|
||
+ sys.stderr = stderr
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_native_string_io_for_default_file():
|
||
- """Native strings written to unspecified files"""
|
||
- stderr = sys.stderr
|
||
- try:
|
||
- sys.stderr = WriteTypeChecker(expected_type=type(''))
|
||
- for _ in tqdm(range(3)):
|
||
- pass
|
||
- sys.stderr.encoding = None # py2 behaviour
|
||
- for _ in tqdm(range(3)):
|
||
+ def test_unicode_string_io_for_specified_file(self):
|
||
+ """Unicode strings written to specified files"""
|
||
+ for _ in tqdm(range(3), file=WriteTypeChecker(expected_type=type(u''))):
|
||
pass
|
||
- finally:
|
||
- sys.stderr = stderr
|
||
-
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_unicode_string_io_for_specified_file():
|
||
- """Unicode strings written to specified files"""
|
||
- for _ in tqdm(range(3), file=WriteTypeChecker(expected_type=type(u''))):
|
||
- pass
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_write_bytes():
|
||
- """Test write_bytes argument with and without `file`"""
|
||
- # specified file (and bytes)
|
||
- for _ in tqdm(range(3), file=WriteTypeChecker(expected_type=type(b'')),
|
||
- write_bytes=True):
|
||
- pass
|
||
- # unspecified file (and unicode)
|
||
- stderr = sys.stderr
|
||
- try:
|
||
- sys.stderr = WriteTypeChecker(expected_type=type(u''))
|
||
- for _ in tqdm(range(3), write_bytes=False):
|
||
+ def test_write_bytes(self):
|
||
+ """Test write_bytes argument with and without `file`"""
|
||
+ # specified file (and bytes)
|
||
+ for _ in tqdm(range(3), file=WriteTypeChecker(expected_type=type(b'')),
|
||
+ write_bytes=True):
|
||
pass
|
||
- finally:
|
||
- sys.stderr = stderr
|
||
-
|
||
+ # unspecified file (and unicode)
|
||
+ stderr = sys.stderr
|
||
+ try:
|
||
+ sys.stderr = WriteTypeChecker(expected_type=type(u''))
|
||
+ for _ in tqdm(range(3), write_bytes=False):
|
||
+ pass
|
||
+ finally:
|
||
+ sys.stderr = stderr
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_iterate_over_csv_rows():
|
||
- """Test csv iterator"""
|
||
- # Create a test csv pseudo file
|
||
- with closing(StringIO()) as test_csv_file:
|
||
- writer = csv.writer(test_csv_file)
|
||
- for _ in _range(3):
|
||
- writer.writerow(['test'] * 3)
|
||
- test_csv_file.seek(0)
|
||
+ def test_iterate_over_csv_rows(self):
|
||
+ """Test csv iterator"""
|
||
+ # Create a test csv pseudo file
|
||
+ with closing(StringIO()) as test_csv_file:
|
||
+ writer = csv.writer(test_csv_file)
|
||
+ for _ in _range(3):
|
||
+ writer.writerow(['test'] * 3)
|
||
+ test_csv_file.seek(0)
|
||
+
|
||
+ # Test that nothing fails if we iterate over rows
|
||
+ reader = csv.DictReader(test_csv_file,
|
||
+ fieldnames=('row1', 'row2', 'row3'))
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(reader, file=our_file):
|
||
+ pass
|
||
+
|
||
+ def test_file_output(self):
|
||
+ """Test output to arbitrary file-like objects"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for i in tqdm(_range(3), file=our_file):
|
||
+ if i == 1:
|
||
+ our_file.seek(0)
|
||
+ assert '0/3' in our_file.read()
|
||
|
||
- # Test that nothing fails if we iterate over rows
|
||
- reader = csv.DictReader(test_csv_file,
|
||
- fieldnames=('row1', 'row2', 'row3'))
|
||
+ def test_leave_option(self):
|
||
+ """Test `leave=True` always prints info about the last iteration"""
|
||
with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(reader, file=our_file):
|
||
+ for _ in tqdm(_range(3), file=our_file, leave=True):
|
||
pass
|
||
+ res = our_file.getvalue()
|
||
+ assert '| 3/3 ' in res
|
||
+ assert '\n' == res[-1] # not '\r'
|
||
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_file_output():
|
||
- """Test output to arbitrary file-like objects"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for i in tqdm(_range(3), file=our_file):
|
||
- if i == 1:
|
||
- our_file.seek(0)
|
||
- assert '0/3' in our_file.read()
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_leave_option():
|
||
- """Test `leave=True` always prints info about the last iteration"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(3), file=our_file, leave=True):
|
||
- pass
|
||
- res = our_file.getvalue()
|
||
- assert '| 3/3 ' in res
|
||
- assert '\n' == res[-1] # not '\r'
|
||
-
|
||
- with closing(StringIO()) as our_file2:
|
||
- for _ in tqdm(_range(3), file=our_file2, leave=False):
|
||
- pass
|
||
- assert '| 3/3 ' not in our_file2.getvalue()
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_trange():
|
||
- """Test trange"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in trange(3, file=our_file, leave=True):
|
||
- pass
|
||
- assert '| 3/3 ' in our_file.getvalue()
|
||
-
|
||
- with closing(StringIO()) as our_file2:
|
||
- for _ in trange(3, file=our_file2, leave=False):
|
||
- pass
|
||
- assert '| 3/3 ' not in our_file2.getvalue()
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_min_interval():
|
||
- """Test mininterval"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(3), file=our_file, mininterval=1e-10):
|
||
- pass
|
||
- assert " 0%| | 0/3 [00:00<" in our_file.getvalue()
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_max_interval():
|
||
- """Test maxinterval"""
|
||
- total = 100
|
||
- bigstep = 10
|
||
- smallstep = 5
|
||
-
|
||
- # Test without maxinterval
|
||
- timer = DiscreteTimer()
|
||
- with closing(StringIO()) as our_file:
|
||
with closing(StringIO()) as our_file2:
|
||
- # with maxinterval but higher than loop sleep time
|
||
- t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
|
||
- smoothing=1, maxinterval=1e-2)
|
||
- cpu_timify(t, timer)
|
||
+ for _ in tqdm(_range(3), file=our_file2, leave=False):
|
||
+ pass
|
||
+ assert '| 3/3 ' not in our_file2.getvalue()
|
||
|
||
- # without maxinterval
|
||
- t2 = tqdm(total=total, file=our_file2, miniters=None, mininterval=0,
|
||
- smoothing=1, maxinterval=None)
|
||
- cpu_timify(t2, timer)
|
||
+ def test_trange(self):
|
||
+ """Test trange"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in trange(3, file=our_file, leave=True):
|
||
+ pass
|
||
+ assert '| 3/3 ' in our_file.getvalue()
|
||
|
||
- assert t.dynamic_miniters
|
||
- assert t2.dynamic_miniters
|
||
-
|
||
- # Increase 10 iterations at once
|
||
- t.update(bigstep)
|
||
- t2.update(bigstep)
|
||
- # The next iterations should not trigger maxinterval (step 10)
|
||
- for _ in _range(4):
|
||
- t.update(smallstep)
|
||
- t2.update(smallstep)
|
||
- timer.sleep(1e-5)
|
||
- t.close() # because PyPy doesn't gc immediately
|
||
- t2.close() # as above
|
||
-
|
||
- assert "25%" not in our_file2.getvalue()
|
||
- assert "25%" not in our_file.getvalue()
|
||
-
|
||
- # Test with maxinterval effect
|
||
- timer = DiscreteTimer()
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=total, file=our_file, miniters=None, mininterval=0,
|
||
- smoothing=1, maxinterval=1e-4) as t:
|
||
- cpu_timify(t, timer)
|
||
+ with closing(StringIO()) as our_file2:
|
||
+ for _ in trange(3, file=our_file2, leave=False):
|
||
+ pass
|
||
+ assert '| 3/3 ' not in our_file2.getvalue()
|
||
|
||
- # Increase 10 iterations at once
|
||
- t.update(bigstep)
|
||
- # The next iterations should trigger maxinterval (step 5)
|
||
- for _ in _range(4):
|
||
- t.update(smallstep)
|
||
- timer.sleep(1e-2)
|
||
+ def test_min_interval(self):
|
||
+ """Test mininterval"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(3), file=our_file, mininterval=1e-10):
|
||
+ pass
|
||
+ assert " 0%| | 0/3 [00:00<" in our_file.getvalue()
|
||
|
||
- assert "25%" in our_file.getvalue()
|
||
+ def test_max_interval(self):
|
||
+ """Test maxinterval"""
|
||
+ total = 100
|
||
+ bigstep = 10
|
||
+ smallstep = 5
|
||
|
||
- # Test iteration based tqdm with maxinterval effect
|
||
- timer = DiscreteTimer()
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(_range(total), file=our_file, miniters=None,
|
||
- mininterval=1e-5, smoothing=1, maxinterval=1e-4) as t2:
|
||
- cpu_timify(t2, timer)
|
||
+ # Test without maxinterval
|
||
+ timer = DiscreteTimer()
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with closing(StringIO()) as our_file2:
|
||
+ # with maxinterval but higher than loop sleep time
|
||
+ t = tqdm(total=total, file=our_file, miniters=None,
|
||
+ mininterval=0, smoothing=1, maxinterval=1e-2)
|
||
+ cpu_timify(t, timer)
|
||
+
|
||
+ # without maxinterval
|
||
+ t2 = tqdm(total=total, file=our_file2, miniters=None,
|
||
+ mininterval=0, smoothing=1, maxinterval=None)
|
||
+ cpu_timify(t2, timer)
|
||
|
||
- for i in t2:
|
||
- if i >= (bigstep - 1) and \
|
||
- ((i - (bigstep - 1)) % smallstep) == 0:
|
||
- timer.sleep(1e-2)
|
||
- if i >= 3 * bigstep:
|
||
- break
|
||
-
|
||
- assert "15%" in our_file.getvalue()
|
||
-
|
||
- # Test different behavior with and without mininterval
|
||
- timer = DiscreteTimer()
|
||
- total = 1000
|
||
- mininterval = 0.1
|
||
- maxinterval = 10
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=total, file=our_file, miniters=None, smoothing=1,
|
||
- mininterval=mininterval, maxinterval=maxinterval) as tm1:
|
||
- with tqdm(total=total, file=our_file, miniters=None, smoothing=1,
|
||
- mininterval=0, maxinterval=maxinterval) as tm2:
|
||
-
|
||
- cpu_timify(tm1, timer)
|
||
- cpu_timify(tm2, timer)
|
||
-
|
||
- # Fast iterations, check if dynamic_miniters triggers
|
||
- timer.sleep(mininterval) # to force update for t1
|
||
- tm1.update(total / 2)
|
||
- tm2.update(total / 2)
|
||
- assert int(tm1.miniters) == tm2.miniters == total / 2
|
||
-
|
||
- # Slow iterations, check different miniters if mininterval
|
||
- timer.sleep(maxinterval * 2)
|
||
- tm1.update(total / 2)
|
||
- tm2.update(total / 2)
|
||
- res = [tm1.miniters, tm2.miniters]
|
||
- assert res == [(total / 2) * mininterval / (maxinterval * 2),
|
||
- (total / 2) * maxinterval / (maxinterval * 2)]
|
||
-
|
||
- # Same with iterable based tqdm
|
||
- timer1 = DiscreteTimer() # need 2 timers for each bar because zip not work
|
||
- timer2 = DiscreteTimer()
|
||
- total = 100
|
||
- mininterval = 0.1
|
||
- maxinterval = 10
|
||
- with closing(StringIO()) as our_file:
|
||
- t1 = tqdm(_range(total), file=our_file, miniters=None, smoothing=1,
|
||
- mininterval=mininterval, maxinterval=maxinterval)
|
||
- t2 = tqdm(_range(total), file=our_file, miniters=None, smoothing=1,
|
||
- mininterval=0, maxinterval=maxinterval)
|
||
-
|
||
- cpu_timify(t1, timer1)
|
||
- cpu_timify(t2, timer2)
|
||
-
|
||
- for i in t1:
|
||
- if i == ((total / 2) - 2):
|
||
- timer1.sleep(mininterval)
|
||
- if i == (total - 1):
|
||
- timer1.sleep(maxinterval * 2)
|
||
-
|
||
- for i in t2:
|
||
- if i == ((total / 2) - 2):
|
||
- timer2.sleep(mininterval)
|
||
- if i == (total - 1):
|
||
- timer2.sleep(maxinterval * 2)
|
||
-
|
||
- assert t1.miniters == 0.255
|
||
- assert t2.miniters == 0.5
|
||
+ assert t.dynamic_miniters
|
||
+ assert t2.dynamic_miniters
|
||
|
||
- t1.close()
|
||
- t2.close()
|
||
+ # Increase 10 iterations at once
|
||
+ t.update(bigstep)
|
||
+ t2.update(bigstep)
|
||
+ # The next iterations should not trigger maxinterval (step 10)
|
||
+ for _ in _range(4):
|
||
+ t.update(smallstep)
|
||
+ t2.update(smallstep)
|
||
+ timer.sleep(1e-5)
|
||
+ t.close() # because PyPy doesn't gc immediately
|
||
+ t2.close() # as above
|
||
|
||
+ assert "25%" not in our_file2.getvalue()
|
||
+ assert "25%" not in our_file.getvalue()
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_min_iters():
|
||
- """Test miniters"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(3), file=our_file, leave=True, mininterval=0,
|
||
- miniters=2):
|
||
- pass
|
||
+ # Test with maxinterval effect
|
||
+ timer = DiscreteTimer()
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=total, file=our_file, miniters=None, mininterval=0,
|
||
+ smoothing=1, maxinterval=1e-4) as t:
|
||
+ cpu_timify(t, timer)
|
||
+
|
||
+ # Increase 10 iterations at once
|
||
+ t.update(bigstep)
|
||
+ # The next iterations should trigger maxinterval (step 5)
|
||
+ for _ in _range(4):
|
||
+ t.update(smallstep)
|
||
+ timer.sleep(1e-2)
|
||
|
||
- out = our_file.getvalue()
|
||
- assert '| 0/3 ' in out
|
||
- assert '| 1/3 ' not in out
|
||
- assert '| 2/3 ' in out
|
||
- assert '| 3/3 ' in out
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(3), file=our_file, leave=True, mininterval=0,
|
||
- miniters=1):
|
||
- pass
|
||
+ assert "25%" in our_file.getvalue()
|
||
|
||
- out = our_file.getvalue()
|
||
- assert '| 0/3 ' in out
|
||
- assert '| 1/3 ' in out
|
||
- assert '| 2/3 ' in out
|
||
- assert '| 3/3 ' in out
|
||
+ # Test iteration based tqdm with maxinterval effect
|
||
+ timer = DiscreteTimer()
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(_range(total), file=our_file, miniters=None,
|
||
+ mininterval=1e-5, smoothing=1, maxinterval=1e-4) as t2:
|
||
+ cpu_timify(t2, timer)
|
||
|
||
+ for i in t2:
|
||
+ if i >= (bigstep - 1) and \
|
||
+ ((i - (bigstep - 1)) % smallstep) == 0:
|
||
+ timer.sleep(1e-2)
|
||
+ if i >= 3 * bigstep:
|
||
+ break
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_dynamic_min_iters():
|
||
- """Test purely dynamic miniters (and manual updates and __del__)"""
|
||
- with closing(StringIO()) as our_file:
|
||
- total = 10
|
||
- t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
|
||
- smoothing=1)
|
||
+ assert "15%" in our_file.getvalue()
|
||
|
||
- t.update()
|
||
- # Increase 3 iterations
|
||
- t.update(3)
|
||
- # The next two iterations should be skipped because of dynamic_miniters
|
||
- t.update()
|
||
- t.update()
|
||
- # The third iteration should be displayed
|
||
- t.update()
|
||
+ # Test different behavior with and without mininterval
|
||
+ timer = DiscreteTimer()
|
||
+ total = 1000
|
||
+ mininterval = 0.1
|
||
+ maxinterval = 10
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=total, file=our_file, miniters=None, smoothing=1,
|
||
+ mininterval=mininterval, maxinterval=maxinterval) as tm1:
|
||
+ with tqdm(total=total, file=our_file,
|
||
+ miniters=None, smoothing=1,
|
||
+ mininterval=0, maxinterval=maxinterval) as tm2:
|
||
+
|
||
+ cpu_timify(tm1, timer)
|
||
+ cpu_timify(tm2, timer)
|
||
+
|
||
+ # Fast iterations, check if dynamic_miniters triggers
|
||
+ timer.sleep(mininterval) # to force update for t1
|
||
+ tm1.update(total / 2)
|
||
+ tm2.update(total / 2)
|
||
+ assert int(tm1.miniters) == tm2.miniters == total / 2
|
||
+
|
||
+ # Slow iterations, check different miniters if mininterval
|
||
+ timer.sleep(maxinterval * 2)
|
||
+ tm1.update(total / 2)
|
||
+ tm2.update(total / 2)
|
||
+ res = [tm1.miniters, tm2.miniters]
|
||
+ assert res == \
|
||
+ [(total / 2) * mininterval / (maxinterval * 2),
|
||
+ (total / 2) * maxinterval / (maxinterval * 2)]
|
||
+
|
||
+ # Same with iterable based tqdm
|
||
+ # need 2 timers for each bar because zip not work
|
||
+ timer1 = DiscreteTimer()
|
||
+ timer2 = DiscreteTimer()
|
||
+ total = 100
|
||
+ mininterval = 0.1
|
||
+ maxinterval = 10
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t1 = tqdm(_range(total), file=our_file, miniters=None, smoothing=1,
|
||
+ mininterval=mininterval, maxinterval=maxinterval)
|
||
+ t2 = tqdm(_range(total), file=our_file, miniters=None, smoothing=1,
|
||
+ mininterval=0, maxinterval=maxinterval)
|
||
|
||
- out = our_file.getvalue()
|
||
- assert t.dynamic_miniters
|
||
- t.__del__() # simulate immediate del gc
|
||
+ cpu_timify(t1, timer1)
|
||
+ cpu_timify(t2, timer2)
|
||
|
||
- assert ' 0%| | 0/10 [00:00<' in out
|
||
- assert '40%' in out
|
||
- assert '50%' not in out
|
||
- assert '60%' not in out
|
||
- assert '70%' in out
|
||
+ for i in t1:
|
||
+ if i == ((total / 2) - 2):
|
||
+ timer1.sleep(mininterval)
|
||
+ if i == (total - 1):
|
||
+ timer1.sleep(maxinterval * 2)
|
||
|
||
- # Check with smoothing=0, miniters should be set to max update seen so far
|
||
- with closing(StringIO()) as our_file:
|
||
- total = 10
|
||
- t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
|
||
- smoothing=0)
|
||
+ for i in t2:
|
||
+ if i == ((total / 2) - 2):
|
||
+ timer2.sleep(mininterval)
|
||
+ if i == (total - 1):
|
||
+ timer2.sleep(maxinterval * 2)
|
||
|
||
- t.update()
|
||
- t.update(2)
|
||
- t.update(5) # this should be stored as miniters
|
||
- t.update(1)
|
||
+ assert t1.miniters == 0.255
|
||
+ assert t2.miniters == 0.5
|
||
|
||
- out = our_file.getvalue()
|
||
- assert all(i in out for i in ("0/10", "1/10", "3/10"))
|
||
- assert "2/10" not in out
|
||
- assert t.dynamic_miniters and not t.smoothing
|
||
- assert t.miniters == 5
|
||
- t.close()
|
||
+ t1.close()
|
||
+ t2.close()
|
||
|
||
- # Check iterable based tqdm
|
||
- with closing(StringIO()) as our_file:
|
||
- t = tqdm(_range(10), file=our_file, miniters=None, mininterval=None,
|
||
- smoothing=0.5)
|
||
- for _ in t:
|
||
- pass
|
||
- assert t.dynamic_miniters
|
||
+ def test_min_iters(self):
|
||
+ """Test miniters"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(3), file=our_file, leave=True, mininterval=0,
|
||
+ miniters=2):
|
||
+ pass
|
||
|
||
- # No smoothing
|
||
- with closing(StringIO()) as our_file:
|
||
- t = tqdm(_range(10), file=our_file, miniters=None, mininterval=None,
|
||
- smoothing=0)
|
||
- for _ in t:
|
||
- pass
|
||
- assert t.dynamic_miniters
|
||
+ out = our_file.getvalue()
|
||
+ assert '| 0/3 ' in out
|
||
+ assert '| 1/3 ' not in out
|
||
+ assert '| 2/3 ' in out
|
||
+ assert '| 3/3 ' in out
|
||
|
||
- # No dynamic_miniters (miniters is fixed manually)
|
||
- with closing(StringIO()) as our_file:
|
||
- t = tqdm(_range(10), file=our_file, miniters=1, mininterval=None)
|
||
- for _ in t:
|
||
- pass
|
||
- assert not t.dynamic_miniters
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(3), file=our_file, leave=True, mininterval=0,
|
||
+ miniters=1):
|
||
+ pass
|
||
|
||
+ out = our_file.getvalue()
|
||
+ assert '| 0/3 ' in out
|
||
+ assert '| 1/3 ' in out
|
||
+ assert '| 2/3 ' in out
|
||
+ assert '| 3/3 ' in out
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_big_min_interval():
|
||
- """Test large mininterval"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(2), file=our_file, mininterval=1E10):
|
||
- pass
|
||
- assert '50%' not in our_file.getvalue()
|
||
+ def test_dynamic_min_iters(self):
|
||
+ """Test purely dynamic miniters (and manual updates and __del__)"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ total = 10
|
||
+ t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
|
||
+ smoothing=1)
|
||
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(_range(2), file=our_file, mininterval=1E10) as t:
|
||
+ t.update()
|
||
+ # Increase 3 iterations
|
||
+ t.update(3)
|
||
+ # The next two iterations should be skipped because of
|
||
+ # dynamic_miniters
|
||
t.update()
|
||
t.update()
|
||
- assert '50%' not in our_file.getvalue()
|
||
+ # The third iteration should be displayed
|
||
+ t.update()
|
||
|
||
+ out = our_file.getvalue()
|
||
+ assert t.dynamic_miniters
|
||
+ t.__del__() # simulate immediate del gc
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_smoothed_dynamic_min_iters():
|
||
- """Test smoothed dynamic miniters"""
|
||
- timer = DiscreteTimer()
|
||
+ assert ' 0%| | 0/10 [00:00<' in out
|
||
+ assert '40%' in out
|
||
+ assert '50%' not in out
|
||
+ assert '60%' not in out
|
||
+ assert '70%' in out
|
||
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=100, file=our_file, miniters=None, mininterval=0,
|
||
- smoothing=0.5, maxinterval=0) as t:
|
||
- cpu_timify(t, timer)
|
||
+ # Check with smoothing=0, miniters should be set to max update
|
||
+ # seen so far
|
||
+ with closing(StringIO()) as our_file:
|
||
+ total = 10
|
||
+ t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
|
||
+ smoothing=0)
|
||
|
||
- # Increase 10 iterations at once
|
||
- t.update(10)
|
||
- # The next iterations should be partially skipped
|
||
- for _ in _range(2):
|
||
- t.update(4)
|
||
- for _ in _range(20):
|
||
- t.update()
|
||
+ t.update()
|
||
+ t.update(2)
|
||
+ t.update(5) # this should be stored as miniters
|
||
+ t.update(1)
|
||
|
||
out = our_file.getvalue()
|
||
- assert t.dynamic_miniters
|
||
- assert ' 0%| | 0/100 [00:00<' in out
|
||
- assert '10%' in out
|
||
- assert '14%' not in out
|
||
- assert '18%' in out
|
||
- assert '20%' not in out
|
||
- assert '25%' in out
|
||
- assert '30%' not in out
|
||
- assert '32%' in out
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_smoothed_dynamic_min_iters_with_min_interval():
|
||
- """Test smoothed dynamic miniters with mininterval"""
|
||
- timer = DiscreteTimer()
|
||
-
|
||
- # In this test, `miniters` should gradually decline
|
||
- total = 100
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- # Test manual updating tqdm
|
||
- with tqdm(total=total, file=our_file, miniters=None, mininterval=1e-3,
|
||
- smoothing=1, maxinterval=0) as t:
|
||
- cpu_timify(t, timer)
|
||
+ assert all(i in out for i in ("0/10", "1/10", "3/10"))
|
||
+ assert "2/10" not in out
|
||
+ assert t.dynamic_miniters and not t.smoothing
|
||
+ assert t.miniters == 5
|
||
+ t.close()
|
||
|
||
- t.update(10)
|
||
- timer.sleep(1e-2)
|
||
- for _ in _range(4):
|
||
- t.update()
|
||
- timer.sleep(1e-2)
|
||
- out = our_file.getvalue()
|
||
+ # Check iterable based tqdm
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = tqdm(_range(10), file=our_file, miniters=None, mininterval=None,
|
||
+ smoothing=0.5)
|
||
+ for _ in t:
|
||
+ pass
|
||
assert t.dynamic_miniters
|
||
|
||
- with closing(StringIO()) as our_file:
|
||
- # Test iteration-based tqdm
|
||
- with tqdm(_range(total), file=our_file, miniters=None,
|
||
- mininterval=0.01, smoothing=1, maxinterval=0) as t2:
|
||
- cpu_timify(t2, timer)
|
||
+ # No smoothing
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = tqdm(_range(10), file=our_file, miniters=None, mininterval=None,
|
||
+ smoothing=0)
|
||
+ for _ in t:
|
||
+ pass
|
||
+ assert t.dynamic_miniters
|
||
|
||
- for i in t2:
|
||
- if i >= 10:
|
||
- timer.sleep(0.1)
|
||
- if i >= 14:
|
||
- break
|
||
- out2 = our_file.getvalue()
|
||
-
|
||
- assert t.dynamic_miniters
|
||
- assert ' 0%| | 0/100 [00:00<' in out
|
||
- assert '11%' in out and '11%' in out2
|
||
- # assert '12%' not in out and '12%' in out2
|
||
- assert '13%' in out and '13%' in out2
|
||
- assert '14%' in out and '14%' in out2
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_rlock_creation():
|
||
- """Test that importing tqdm does not create multiprocessing objects."""
|
||
- import multiprocessing as mp
|
||
- if sys.version_info < (3, 3):
|
||
- # unittest.mock is a 3.3+ feature
|
||
- raise SkipTest
|
||
+ # No dynamic_miniters (miniters is fixed manually)
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = tqdm(_range(10), file=our_file, miniters=1, mininterval=None)
|
||
+ for _ in t:
|
||
+ pass
|
||
+ assert not t.dynamic_miniters
|
||
|
||
- # Use 'spawn' instead of 'fork' so that the process does not inherit any
|
||
- # globals that have been constructed by running other tests
|
||
- ctx = mp.get_context('spawn')
|
||
- with ctx.Pool(1) as pool:
|
||
- # The pool will propagate the error if the target method fails
|
||
- pool.apply(_rlock_creation_target)
|
||
+ def test_big_min_interval(self):
|
||
+ """Test large mininterval"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(2), file=our_file, mininterval=1E10):
|
||
+ pass
|
||
+ assert '50%' not in our_file.getvalue()
|
||
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(_range(2), file=our_file, mininterval=1E10) as t:
|
||
+ t.update()
|
||
+ t.update()
|
||
+ assert '50%' not in our_file.getvalue()
|
||
|
||
-def _rlock_creation_target():
|
||
- """Check that the RLock has not been constructed."""
|
||
- from unittest.mock import patch
|
||
- import multiprocessing as mp
|
||
+ def test_smoothed_dynamic_min_iters(self):
|
||
+ """Test smoothed dynamic miniters"""
|
||
+ timer = DiscreteTimer()
|
||
|
||
- # Patch the RLock class/method but use the original implementation
|
||
- with patch('multiprocessing.RLock', wraps=mp.RLock) as rlock_mock:
|
||
- # Importing the module should not create a lock
|
||
- from tqdm import tqdm
|
||
- assert rlock_mock.call_count == 0
|
||
- # Creating a progress bar should initialize the lock
|
||
with closing(StringIO()) as our_file:
|
||
- with tqdm(file=our_file) as _: # NOQA
|
||
- pass
|
||
- assert rlock_mock.call_count == 1
|
||
- # Creating a progress bar again should reuse the lock
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(file=our_file) as _: # NOQA
|
||
- pass
|
||
- assert rlock_mock.call_count == 1
|
||
+ with tqdm(total=100, file=our_file, miniters=None, mininterval=0,
|
||
+ smoothing=0.5, maxinterval=0) as t:
|
||
+ cpu_timify(t, timer)
|
||
+
|
||
+ # Increase 10 iterations at once
|
||
+ t.update(10)
|
||
+ # The next iterations should be partially skipped
|
||
+ for _ in _range(2):
|
||
+ t.update(4)
|
||
+ for _ in _range(20):
|
||
+ t.update()
|
||
|
||
+ out = our_file.getvalue()
|
||
+ assert t.dynamic_miniters
|
||
+ assert ' 0%| | 0/100 [00:00<' in out
|
||
+ assert '10%' in out
|
||
+ assert '14%' not in out
|
||
+ assert '18%' in out
|
||
+ assert '20%' not in out
|
||
+ assert '25%' in out
|
||
+ assert '30%' not in out
|
||
+ assert '32%' in out
|
||
+
|
||
+ def test_smoothed_dynamic_min_iters_with_min_interval(self):
|
||
+ """Test smoothed dynamic miniters with mininterval"""
|
||
+ timer = DiscreteTimer()
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_disable():
|
||
- """Test disable"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(3), file=our_file, disable=True):
|
||
- pass
|
||
- assert our_file.getvalue() == ''
|
||
+ # In this test, `miniters` should gradually decline
|
||
+ total = 100
|
||
|
||
- with closing(StringIO()) as our_file:
|
||
- progressbar = tqdm(total=3, file=our_file, miniters=1, disable=True)
|
||
- progressbar.update(3)
|
||
- progressbar.close()
|
||
- assert our_file.getvalue() == ''
|
||
+ with closing(StringIO()) as our_file:
|
||
+ # Test manual updating tqdm
|
||
+ with tqdm(total=total, file=our_file, miniters=None,
|
||
+ mininterval=1e-3, smoothing=1, maxinterval=0) as t:
|
||
+ cpu_timify(t, timer)
|
||
|
||
+ t.update(10)
|
||
+ timer.sleep(1e-2)
|
||
+ for _ in _range(4):
|
||
+ t.update()
|
||
+ timer.sleep(1e-2)
|
||
+ out = our_file.getvalue()
|
||
+ assert t.dynamic_miniters
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_infinite_total():
|
||
- """Test treatment of infinite total"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(3), file=our_file, total=float("inf")):
|
||
- pass
|
||
+ with closing(StringIO()) as our_file:
|
||
+ # Test iteration-based tqdm
|
||
+ with tqdm(_range(total), file=our_file, miniters=None,
|
||
+ mininterval=0.01, smoothing=1, maxinterval=0) as t2:
|
||
+ cpu_timify(t2, timer)
|
||
|
||
+ for i in t2:
|
||
+ if i >= 10:
|
||
+ timer.sleep(0.1)
|
||
+ if i >= 14:
|
||
+ break
|
||
+ out2 = our_file.getvalue()
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_nototal():
|
||
- """Test unknown total length"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm((i for i in range(10)), file=our_file, unit_scale=10):
|
||
- pass
|
||
- assert "100it" in our_file.getvalue()
|
||
+ assert t.dynamic_miniters
|
||
+ assert ' 0%| | 0/100 [00:00<' in out
|
||
+ assert '11%' in out and '11%' in out2
|
||
+ # assert '12%' not in out and '12%' in out2
|
||
+ assert '13%' in out and '13%' in out2
|
||
+ assert '14%' in out and '14%' in out2
|
||
+
|
||
+ def test_rlock_creation(self):
|
||
+ """Test that importing tqdm does not create multiprocessing objects."""
|
||
+ import multiprocessing as mp
|
||
+ if sys.version_info < (3, 3):
|
||
+ # unittest.mock is a 3.3+ feature
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ # Use 'spawn' instead of 'fork' so that the process does not inherit any
|
||
+ # globals that have been constructed by running other tests
|
||
+ ctx = mp.get_context('spawn')
|
||
+ with ctx.Pool(1) as pool:
|
||
+ # The pool will propagate the error if the target method fails
|
||
+ pool.apply(_rlock_creation_target)
|
||
+
|
||
+ def test_disable(self):
|
||
+ """Test disable"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(3), file=our_file, disable=True):
|
||
+ pass
|
||
+ assert our_file.getvalue() == ''
|
||
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm((i for i in range(10)), file=our_file,
|
||
- bar_format="{l_bar}{bar}{r_bar}"):
|
||
- pass
|
||
- assert "10/?" in our_file.getvalue()
|
||
+ with closing(StringIO()) as our_file:
|
||
+ progressbar = tqdm(total=3, file=our_file, miniters=1, disable=True)
|
||
+ progressbar.update(3)
|
||
+ progressbar.close()
|
||
+ assert our_file.getvalue() == ''
|
||
|
||
+ def test_infinite_total(self):
|
||
+ """Test treatment of infinite total"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(3), file=our_file, total=float("inf")):
|
||
+ pass
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_unit():
|
||
- """Test SI unit prefix"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(3), file=our_file, miniters=1, unit="bytes"):
|
||
- pass
|
||
- assert 'bytes/s' in our_file.getvalue()
|
||
+ def test_nototal(self):
|
||
+ """Test unknown total length"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm((i for i in range(10)), file=our_file, unit_scale=10):
|
||
+ pass
|
||
+ assert "100it" in our_file.getvalue()
|
||
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm((i for i in range(10)), file=our_file,
|
||
+ bar_format="{l_bar}{bar}{r_bar}"):
|
||
+ pass
|
||
+ assert "10/?" in our_file.getvalue()
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_ascii():
|
||
- """Test ascii/unicode bar"""
|
||
- # Test ascii autodetection
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, file=our_file, ascii=None) as t:
|
||
- assert t.ascii # TODO: this may fail in the future
|
||
+ def test_unit(self):
|
||
+ """Test SI unit prefix"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(3), file=our_file, miniters=1, unit="bytes"):
|
||
+ pass
|
||
+ assert 'bytes/s' in our_file.getvalue()
|
||
|
||
- # Test ascii bar
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(3), total=15, file=our_file, miniters=1,
|
||
- mininterval=0, ascii=True):
|
||
- pass
|
||
- res = our_file.getvalue().strip("\r").split("\r")
|
||
- assert '7%|6' in res[1]
|
||
- assert '13%|#3' in res[2]
|
||
- assert '20%|##' in res[3]
|
||
-
|
||
- # Test unicode bar
|
||
- with closing(UnicodeIO()) as our_file:
|
||
- with tqdm(total=15, file=our_file, ascii=False, mininterval=0) as t:
|
||
- for _ in _range(3):
|
||
- t.update()
|
||
- res = our_file.getvalue().strip("\r").split("\r")
|
||
- assert u"7%|\u258b" in res[1]
|
||
- assert u"13%|\u2588\u258e" in res[2]
|
||
- assert u"20%|\u2588\u2588" in res[3]
|
||
+ def test_ascii(self):
|
||
+ """Test ascii/unicode bar"""
|
||
+ # Test ascii autodetection
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, file=our_file, ascii=None) as t:
|
||
+ assert t.ascii # TODO: this may fail in the future
|
||
|
||
- # Test custom bar
|
||
- for bars in [" .oO0", " #"]:
|
||
+ # Test ascii bar
|
||
with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(len(bars) - 1), file=our_file, miniters=1,
|
||
- mininterval=0, ascii=bars, ncols=27):
|
||
+ for _ in tqdm(_range(3), total=15, file=our_file, miniters=1,
|
||
+ mininterval=0, ascii=True):
|
||
pass
|
||
res = our_file.getvalue().strip("\r").split("\r")
|
||
- for b, line in zip(bars, res):
|
||
- assert '|' + b + '|' in line
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_update():
|
||
- """Test manual creation and updates"""
|
||
- res = None
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=2, file=our_file, miniters=1, mininterval=0) \
|
||
- as progressbar:
|
||
- assert len(progressbar) == 2
|
||
- progressbar.update(2)
|
||
- assert '| 2/2' in our_file.getvalue()
|
||
- progressbar.desc = 'dynamically notify of 4 increments in total'
|
||
- progressbar.total = 4
|
||
- progressbar.update(-1)
|
||
- progressbar.update(2)
|
||
- res = our_file.getvalue()
|
||
- assert '| 3/4 ' in res
|
||
- assert 'dynamically notify of 4 increments in total' in res
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_close():
|
||
- """Test manual creation and closure and n_instances"""
|
||
-
|
||
- # With `leave` option
|
||
- with closing(StringIO()) as our_file:
|
||
- progressbar = tqdm(total=3, file=our_file, miniters=10)
|
||
- progressbar.update(3)
|
||
- assert '| 3/3 ' not in our_file.getvalue() # Should be blank
|
||
- assert len(tqdm._instances) == 1
|
||
- progressbar.close()
|
||
- assert len(tqdm._instances) == 0
|
||
- assert '| 3/3 ' in our_file.getvalue()
|
||
-
|
||
- # Without `leave` option
|
||
- with closing(StringIO()) as our_file:
|
||
- progressbar = tqdm(total=3, file=our_file, miniters=10, leave=False)
|
||
- progressbar.update(3)
|
||
- progressbar.close()
|
||
- assert '| 3/3 ' not in our_file.getvalue() # Should be blank
|
||
-
|
||
- # With all updates
|
||
- with closing(StringIO()) as our_file:
|
||
- assert len(tqdm._instances) == 0
|
||
- with tqdm(total=3, file=our_file, miniters=0, mininterval=0,
|
||
- leave=True) as progressbar:
|
||
+ assert '7%|6' in res[1]
|
||
+ assert '13%|#3' in res[2]
|
||
+ assert '20%|##' in res[3]
|
||
+
|
||
+ # Test unicode bar
|
||
+ with closing(UnicodeIO()) as our_file:
|
||
+ with tqdm(total=15, file=our_file, ascii=False, mininterval=0) as t:
|
||
+ for _ in _range(3):
|
||
+ t.update()
|
||
+ res = our_file.getvalue().strip("\r").split("\r")
|
||
+ assert u"7%|\u258b" in res[1]
|
||
+ assert u"13%|\u2588\u258e" in res[2]
|
||
+ assert u"20%|\u2588\u2588" in res[3]
|
||
+
|
||
+ # Test custom bar
|
||
+ for bars in [" .oO0", " #"]:
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(len(bars) - 1), file=our_file, miniters=1,
|
||
+ mininterval=0, ascii=bars, ncols=27):
|
||
+ pass
|
||
+ res = our_file.getvalue().strip("\r").split("\r")
|
||
+ for b, line in zip(bars, res):
|
||
+ assert '|' + b + '|' in line
|
||
+
|
||
+ def test_update(self):
|
||
+ """Test manual creation and updates"""
|
||
+ res = None
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=2, file=our_file, miniters=1, mininterval=0) \
|
||
+ as progressbar:
|
||
+ assert len(progressbar) == 2
|
||
+ progressbar.update(2)
|
||
+ assert '| 2/2' in our_file.getvalue()
|
||
+ progressbar.desc = 'dynamically notify of 4 increments in total'
|
||
+ progressbar.total = 4
|
||
+ progressbar.update(-1)
|
||
+ progressbar.update(2)
|
||
+ res = our_file.getvalue()
|
||
+ assert '| 3/4 ' in res
|
||
+ assert 'dynamically notify of 4 increments in total' in res
|
||
+
|
||
+ def test_close(self):
|
||
+ """Test manual creation and closure and n_instances"""
|
||
+
|
||
+ # With `leave` option
|
||
+ with closing(StringIO()) as our_file:
|
||
+ progressbar = tqdm(total=3, file=our_file, miniters=10)
|
||
+ progressbar.update(3)
|
||
+ assert '| 3/3 ' not in our_file.getvalue() # Should be blank
|
||
assert len(tqdm._instances) == 1
|
||
+ progressbar.close()
|
||
+ assert len(tqdm._instances) == 0
|
||
+ assert '| 3/3 ' in our_file.getvalue()
|
||
+
|
||
+ # Without `leave` option
|
||
+ with closing(StringIO()) as our_file:
|
||
+ progressbar = tqdm(total=3, file=our_file, miniters=10, leave=False)
|
||
progressbar.update(3)
|
||
+ progressbar.close()
|
||
+ assert '| 3/3 ' not in our_file.getvalue() # Should be blank
|
||
+
|
||
+ # With all updates
|
||
+ with closing(StringIO()) as our_file:
|
||
+ assert len(tqdm._instances) == 0
|
||
+ with tqdm(total=3, file=our_file, miniters=0, mininterval=0,
|
||
+ leave=True) as progressbar:
|
||
+ assert len(tqdm._instances) == 1
|
||
+ progressbar.update(3)
|
||
+ res = our_file.getvalue()
|
||
+ assert '| 3/3 ' in res # Should be blank
|
||
+ assert '\n' not in res
|
||
+ # close() called
|
||
+ assert len(tqdm._instances) == 0
|
||
+
|
||
+ exres = res.rsplit(', ', 1)[0]
|
||
res = our_file.getvalue()
|
||
- assert '| 3/3 ' in res # Should be blank
|
||
- assert '\n' not in res
|
||
- # close() called
|
||
- assert len(tqdm._instances) == 0
|
||
-
|
||
- exres = res.rsplit(', ', 1)[0]
|
||
- res = our_file.getvalue()
|
||
- assert res[-1] == '\n'
|
||
- if not res.startswith(exres):
|
||
- raise AssertionError(
|
||
- "\n<<< Expected:\n{0}\n>>> Got:\n{1}\n===".format(
|
||
- exres + ', ...it/s]\n', our_file.getvalue()))
|
||
-
|
||
- # Closing after the output stream has closed
|
||
- with closing(StringIO()) as our_file:
|
||
- t = tqdm(total=2, file=our_file)
|
||
- t.update()
|
||
- t.update()
|
||
- t.close()
|
||
+ assert res[-1] == '\n'
|
||
+ if not res.startswith(exres):
|
||
+ raise AssertionError(
|
||
+ "\n<<< Expected:\n{0}\n>>> Got:\n{1}\n===".format(
|
||
+ exres + ', ...it/s]\n', our_file.getvalue()))
|
||
|
||
+ # Closing after the output stream has closed
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = tqdm(total=2, file=our_file)
|
||
+ t.update()
|
||
+ t.update()
|
||
+ t.close()
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_smoothing():
|
||
- """Test exponential weighted average smoothing"""
|
||
- timer = DiscreteTimer()
|
||
+ def test_smoothing(self):
|
||
+ """Test exponential weighted average smoothing"""
|
||
+ timer = DiscreteTimer()
|
||
|
||
- # -- Test disabling smoothing
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(_range(3), file=our_file, smoothing=None, leave=True) as t:
|
||
- cpu_timify(t, timer)
|
||
+ # -- Test disabling smoothing
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(_range(3), file=our_file, smoothing=None,
|
||
+ leave=True) as t:
|
||
+ cpu_timify(t, timer)
|
||
|
||
- for _ in t:
|
||
- pass
|
||
- assert '| 3/3 ' in our_file.getvalue()
|
||
+ for _ in t:
|
||
+ pass
|
||
+ assert '| 3/3 ' in our_file.getvalue()
|
||
|
||
- # -- Test smoothing
|
||
- # Compile the regex to find the rate
|
||
- # 1st case: no smoothing (only use average)
|
||
- with closing(StringIO()) as our_file2:
|
||
- with closing(StringIO()) as our_file:
|
||
- t = tqdm(_range(3), file=our_file2, smoothing=None, leave=True,
|
||
- miniters=1, mininterval=0)
|
||
- cpu_timify(t, timer)
|
||
+ # -- Test smoothing
|
||
+ # Compile the regex to find the rate
|
||
+ # 1st case: no smoothing (only use average)
|
||
+ with closing(StringIO()) as our_file2:
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = tqdm(_range(3), file=our_file2, smoothing=None, leave=True,
|
||
+ miniters=1, mininterval=0)
|
||
+ cpu_timify(t, timer)
|
||
+
|
||
+ with tqdm(_range(3), file=our_file, smoothing=None, leave=True,
|
||
+ miniters=1, mininterval=0) as t2:
|
||
+ cpu_timify(t2, timer)
|
||
+
|
||
+ for i in t2:
|
||
+ # Sleep more for first iteration and
|
||
+ # see how quickly rate is updated
|
||
+ if i == 0:
|
||
+ timer.sleep(0.01)
|
||
+ else:
|
||
+ # Need to sleep in all iterations
|
||
+ # to calculate smoothed rate
|
||
+ # (else delta_t is 0!)
|
||
+ timer.sleep(0.001)
|
||
+ t.update()
|
||
+ n_old = len(tqdm._instances)
|
||
+ t.close()
|
||
+ assert len(tqdm._instances) == n_old - 1
|
||
+ # Get result for iter-based bar
|
||
+ a = progressbar_rate(get_bar(our_file.getvalue(), 3))
|
||
+ # Get result for manually updated bar
|
||
+ a2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
|
||
+
|
||
+ # 2nd case: use max smoothing (= instant rate)
|
||
+ with closing(StringIO()) as our_file2:
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = tqdm(_range(3), file=our_file2, smoothing=1, leave=True,
|
||
+ miniters=1, mininterval=0)
|
||
+ cpu_timify(t, timer)
|
||
+
|
||
+ with tqdm(_range(3), file=our_file, smoothing=1, leave=True,
|
||
+ miniters=1, mininterval=0) as t2:
|
||
+ cpu_timify(t2, timer)
|
||
+
|
||
+ for i in t2:
|
||
+ if i == 0:
|
||
+ timer.sleep(0.01)
|
||
+ else:
|
||
+ timer.sleep(0.001)
|
||
+ t.update()
|
||
+ t.close()
|
||
+ # Get result for iter-based bar
|
||
+ b = progressbar_rate(get_bar(our_file.getvalue(), 3))
|
||
+ # Get result for manually updated bar
|
||
+ b2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
|
||
+
|
||
+ # 3rd case: use medium smoothing
|
||
+ with closing(StringIO()) as our_file2:
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = tqdm(_range(3), file=our_file2, smoothing=0.5, leave=True,
|
||
+ miniters=1, mininterval=0)
|
||
+ cpu_timify(t, timer)
|
||
|
||
- with tqdm(_range(3), file=our_file, smoothing=None, leave=True,
|
||
- miniters=1, mininterval=0) as t2:
|
||
+ t2 = tqdm(_range(3), file=our_file, smoothing=0.5, leave=True,
|
||
+ miniters=1, mininterval=0)
|
||
cpu_timify(t2, timer)
|
||
|
||
for i in t2:
|
||
- # Sleep more for first iteration and
|
||
- # see how quickly rate is updated
|
||
if i == 0:
|
||
timer.sleep(0.01)
|
||
else:
|
||
- # Need to sleep in all iterations
|
||
- # to calculate smoothed rate
|
||
- # (else delta_t is 0!)
|
||
timer.sleep(0.001)
|
||
t.update()
|
||
- n_old = len(tqdm._instances)
|
||
- t.close()
|
||
- assert len(tqdm._instances) == n_old - 1
|
||
- # Get result for iter-based bar
|
||
- a = progressbar_rate(get_bar(our_file.getvalue(), 3))
|
||
- # Get result for manually updated bar
|
||
- a2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
|
||
+ t2.close()
|
||
+ t.close()
|
||
+ # Get result for iter-based bar
|
||
+ c = progressbar_rate(get_bar(our_file.getvalue(), 3))
|
||
+ # Get result for manually updated bar
|
||
+ c2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
|
||
+
|
||
+ # Check that medium smoothing's rate is between no and max
|
||
+ # smoothing rates
|
||
+ assert a <= c <= b
|
||
+ assert a2 <= c2 <= b2
|
||
+
|
||
+ def test_deprecated_nested(self):
|
||
+ """Test nested progress bars"""
|
||
+ if nt_and_no_colorama:
|
||
+ raise unittest.SkipTest
|
||
+ # TODO: test degradation on windows without colorama?
|
||
+
|
||
+ # Artificially test nested loop printing
|
||
+ # Without leave
|
||
+ our_file = StringIO()
|
||
+ try:
|
||
+ tqdm(total=2, file=our_file, nested=True)
|
||
+ except TqdmDeprecationWarning:
|
||
+ if """`nested` is deprecated and automated""" \
|
||
+ not in our_file.getvalue():
|
||
+ raise
|
||
+ else:
|
||
+ raise DeprecationError("Should not allow nested kwarg")
|
||
|
||
- # 2nd case: use max smoothing (= instant rate)
|
||
- with closing(StringIO()) as our_file2:
|
||
+ def test_bar_format(self):
|
||
+ """Test custom bar formatting"""
|
||
with closing(StringIO()) as our_file:
|
||
- t = tqdm(_range(3), file=our_file2, smoothing=1, leave=True,
|
||
- miniters=1, mininterval=0)
|
||
- cpu_timify(t, timer)
|
||
+ bar_format = ('{l_bar}{bar}|{n_fmt}/{total_fmt}-{n}/{total}'
|
||
+ '{percentage}{rate}{rate_fmt}{elapsed}{remaining}')
|
||
+ for _ in trange(2, file=our_file, leave=True,
|
||
+ bar_format=bar_format):
|
||
+ pass
|
||
+ out = our_file.getvalue()
|
||
+ assert "\r 0%| |0/2-0/20.0None?it/s00:00?\r" in out
|
||
|
||
- with tqdm(_range(3), file=our_file, smoothing=1, leave=True,
|
||
- miniters=1, mininterval=0) as t2:
|
||
- cpu_timify(t2, timer)
|
||
+ # Test unicode string auto conversion
|
||
+ with closing(StringIO()) as our_file:
|
||
+ bar_format = r'hello world'
|
||
+ with tqdm(ascii=False, bar_format=bar_format, file=our_file) as t:
|
||
+ assert isinstance(t.bar_format, _unicode)
|
||
+
|
||
+ def test_custom_format(self):
|
||
+ """Test adding additional derived format arguments"""
|
||
+ class TqdmExtraFormat(tqdm):
|
||
+ """Provides a `total_time` format parameter"""
|
||
+ @property
|
||
+ def format_dict(self):
|
||
+ d = super(TqdmExtraFormat, self).format_dict
|
||
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
|
||
+ d.update(total_time=self.format_interval(total_time)
|
||
+ + " in total")
|
||
+ return d
|
||
|
||
- for i in t2:
|
||
- if i == 0:
|
||
- timer.sleep(0.01)
|
||
- else:
|
||
- timer.sleep(0.001)
|
||
- t.update()
|
||
- t.close()
|
||
- # Get result for iter-based bar
|
||
- b = progressbar_rate(get_bar(our_file.getvalue(), 3))
|
||
- # Get result for manually updated bar
|
||
- b2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in TqdmExtraFormat(
|
||
+ range(10), file=our_file,
|
||
+ bar_format="{total_time}: {percentage:.0f}%|{bar}{r_bar}"):
|
||
+ pass
|
||
+ assert "00:00 in total" in our_file.getvalue()
|
||
|
||
- # 3rd case: use medium smoothing
|
||
- with closing(StringIO()) as our_file2:
|
||
+ def test_unpause(self):
|
||
+ """Test unpause"""
|
||
+ timer = DiscreteTimer()
|
||
with closing(StringIO()) as our_file:
|
||
- t = tqdm(_range(3), file=our_file2, smoothing=0.5, leave=True,
|
||
- miniters=1, mininterval=0)
|
||
+ t = trange(10, file=our_file, leave=True, mininterval=0)
|
||
cpu_timify(t, timer)
|
||
+ timer.sleep(0.01)
|
||
+ t.update()
|
||
+ timer.sleep(0.01)
|
||
+ t.update()
|
||
+ timer.sleep(0.1) # longer wait time
|
||
+ t.unpause()
|
||
+ timer.sleep(0.01)
|
||
+ t.update()
|
||
+ timer.sleep(0.01)
|
||
+ t.update()
|
||
+ t.close()
|
||
+ r_before = progressbar_rate(get_bar(our_file.getvalue(), 2))
|
||
+ r_after = progressbar_rate(get_bar(our_file.getvalue(), 3))
|
||
+ assert r_before == r_after
|
||
|
||
- t2 = tqdm(_range(3), file=our_file, smoothing=0.5, leave=True,
|
||
- miniters=1, mininterval=0)
|
||
- cpu_timify(t2, timer)
|
||
-
|
||
- for i in t2:
|
||
- if i == 0:
|
||
- timer.sleep(0.01)
|
||
- else:
|
||
- timer.sleep(0.001)
|
||
+ def test_reset(self):
|
||
+ """Test resetting a bar for re-use"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, file=our_file,
|
||
+ miniters=1, mininterval=0, maxinterval=0) as t:
|
||
+ t.update(9)
|
||
+ t.reset()
|
||
t.update()
|
||
- t2.close()
|
||
- t.close()
|
||
- # Get result for iter-based bar
|
||
- c = progressbar_rate(get_bar(our_file.getvalue(), 3))
|
||
- # Get result for manually updated bar
|
||
- c2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
|
||
-
|
||
- # Check that medium smoothing's rate is between no and max smoothing rates
|
||
- assert a <= c <= b
|
||
- assert a2 <= c2 <= b2
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_deprecated_nested():
|
||
- """Test nested progress bars"""
|
||
- if nt_and_no_colorama:
|
||
- raise SkipTest
|
||
- # TODO: test degradation on windows without colorama?
|
||
-
|
||
- # Artificially test nested loop printing
|
||
- # Without leave
|
||
- our_file = StringIO()
|
||
- try:
|
||
- tqdm(total=2, file=our_file, nested=True)
|
||
- except TqdmDeprecationWarning:
|
||
- if """`nested` is deprecated and automated.
|
||
-Use `position` instead for manual control.""" not in our_file.getvalue():
|
||
- raise
|
||
- else:
|
||
- raise DeprecationError("Should not allow nested kwarg")
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_bar_format():
|
||
- """Test custom bar formatting"""
|
||
- with closing(StringIO()) as our_file:
|
||
- bar_format = ('{l_bar}{bar}|{n_fmt}/{total_fmt}-{n}/{total}'
|
||
- '{percentage}{rate}{rate_fmt}{elapsed}{remaining}')
|
||
- for _ in trange(2, file=our_file, leave=True, bar_format=bar_format):
|
||
- pass
|
||
+ t.reset(total=12)
|
||
+ t.update(10)
|
||
+ assert '| 1/10' in our_file.getvalue()
|
||
+ assert '| 10/12' in our_file.getvalue()
|
||
+
|
||
+ def test_position(self):
|
||
+ """Test positioned progress bars"""
|
||
+ if nt_and_no_colorama:
|
||
+ raise unittest.SkipTest
|
||
+
|
||
+ # Artificially test nested loop printing
|
||
+ # Without leave
|
||
+ our_file = StringIO()
|
||
+ kwargs = dict(file=our_file, miniters=1, mininterval=0, maxinterval=0)
|
||
+ t = tqdm(total=2, desc='pos2 bar', leave=False, position=2, **kwargs)
|
||
+ t.update()
|
||
+ t.close()
|
||
out = our_file.getvalue()
|
||
- assert "\r 0%| |0/2-0/20.0None?it/s00:00?\r" in out
|
||
-
|
||
- # Test unicode string auto conversion
|
||
- with closing(StringIO()) as our_file:
|
||
- bar_format = r'hello world'
|
||
- with tqdm(ascii=False, bar_format=bar_format, file=our_file) as t:
|
||
- assert isinstance(t.bar_format, _unicode)
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_custom_format():
|
||
- """Test adding additional derived format arguments"""
|
||
- class TqdmExtraFormat(tqdm):
|
||
- """Provides a `total_time` format parameter"""
|
||
- @property
|
||
- def format_dict(self):
|
||
- d = super(TqdmExtraFormat, self).format_dict
|
||
- total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
|
||
- d.update(total_time=self.format_interval(total_time) + " in total")
|
||
- return d
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in TqdmExtraFormat(
|
||
- range(10), file=our_file,
|
||
- bar_format="{total_time}: {percentage:.0f}%|{bar}{r_bar}"):
|
||
- pass
|
||
- assert "00:00 in total" in our_file.getvalue()
|
||
+ res = [m[0] for m in RE_pos.findall(out)]
|
||
+ exres = ['\n\n\rpos2 bar: 0%',
|
||
+ '\n\n\rpos2 bar: 50%',
|
||
+ '\n\n\r ']
|
||
|
||
+ pos_line_diff(res, exres)
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_unpause():
|
||
- """Test unpause"""
|
||
- timer = DiscreteTimer()
|
||
- with closing(StringIO()) as our_file:
|
||
- t = trange(10, file=our_file, leave=True, mininterval=0)
|
||
- cpu_timify(t, timer)
|
||
- timer.sleep(0.01)
|
||
- t.update()
|
||
- timer.sleep(0.01)
|
||
- t.update()
|
||
- timer.sleep(0.1) # longer wait time
|
||
- t.unpause()
|
||
- timer.sleep(0.01)
|
||
- t.update()
|
||
- timer.sleep(0.01)
|
||
- t.update()
|
||
- t.close()
|
||
- r_before = progressbar_rate(get_bar(our_file.getvalue(), 2))
|
||
- r_after = progressbar_rate(get_bar(our_file.getvalue(), 3))
|
||
- assert r_before == r_after
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_reset():
|
||
- """Test resetting a bar for re-use"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, file=our_file,
|
||
- miniters=1, mininterval=0, maxinterval=0) as t:
|
||
- t.update(9)
|
||
- t.reset()
|
||
- t.update()
|
||
- t.reset(total=12)
|
||
- t.update(10)
|
||
- assert '| 1/10' in our_file.getvalue()
|
||
- assert '| 10/12' in our_file.getvalue()
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_position():
|
||
- """Test positioned progress bars"""
|
||
- if nt_and_no_colorama:
|
||
- raise SkipTest
|
||
-
|
||
- # Artificially test nested loop printing
|
||
- # Without leave
|
||
- our_file = StringIO()
|
||
- kwargs = dict(file=our_file, miniters=1, mininterval=0, maxinterval=0)
|
||
- t = tqdm(total=2, desc='pos2 bar', leave=False, position=2, **kwargs)
|
||
- t.update()
|
||
- t.close()
|
||
- out = our_file.getvalue()
|
||
- res = [m[0] for m in RE_pos.findall(out)]
|
||
- exres = ['\n\n\rpos2 bar: 0%',
|
||
- '\n\n\rpos2 bar: 50%',
|
||
- '\n\n\r ']
|
||
-
|
||
- pos_line_diff(res, exres)
|
||
-
|
||
- # Test iteration-based tqdm positioning
|
||
- our_file = StringIO()
|
||
- kwargs["file"] = our_file
|
||
- for _ in trange(2, desc='pos0 bar', position=0, **kwargs):
|
||
- for _ in trange(2, desc='pos1 bar', position=1, **kwargs):
|
||
- for _ in trange(2, desc='pos2 bar', position=2, **kwargs):
|
||
- pass
|
||
- out = our_file.getvalue()
|
||
- res = [m[0] for m in RE_pos.findall(out)]
|
||
- exres = ['\rpos0 bar: 0%',
|
||
- '\n\rpos1 bar: 0%',
|
||
- '\n\n\rpos2 bar: 0%',
|
||
- '\n\n\rpos2 bar: 50%',
|
||
- '\n\n\rpos2 bar: 100%',
|
||
- '\rpos2 bar: 100%',
|
||
- '\n\n\rpos1 bar: 50%',
|
||
- '\n\n\rpos2 bar: 0%',
|
||
- '\n\n\rpos2 bar: 50%',
|
||
- '\n\n\rpos2 bar: 100%',
|
||
- '\rpos2 bar: 100%',
|
||
- '\n\n\rpos1 bar: 100%',
|
||
- '\rpos1 bar: 100%',
|
||
- '\n\rpos0 bar: 50%',
|
||
- '\n\rpos1 bar: 0%',
|
||
- '\n\n\rpos2 bar: 0%',
|
||
- '\n\n\rpos2 bar: 50%',
|
||
- '\n\n\rpos2 bar: 100%',
|
||
- '\rpos2 bar: 100%',
|
||
- '\n\n\rpos1 bar: 50%',
|
||
- '\n\n\rpos2 bar: 0%',
|
||
- '\n\n\rpos2 bar: 50%',
|
||
- '\n\n\rpos2 bar: 100%',
|
||
- '\rpos2 bar: 100%',
|
||
- '\n\n\rpos1 bar: 100%',
|
||
- '\rpos1 bar: 100%',
|
||
- '\n\rpos0 bar: 100%',
|
||
- '\rpos0 bar: 100%',
|
||
- '\n']
|
||
- pos_line_diff(res, exres)
|
||
-
|
||
- # Test manual tqdm positioning
|
||
- our_file = StringIO()
|
||
- kwargs["file"] = our_file
|
||
- kwargs["total"] = 2
|
||
- t1 = tqdm(desc='pos0 bar', position=0, **kwargs)
|
||
- t2 = tqdm(desc='pos1 bar', position=1, **kwargs)
|
||
- t3 = tqdm(desc='pos2 bar', position=2, **kwargs)
|
||
- for _ in _range(2):
|
||
- t1.update()
|
||
- t3.update()
|
||
- t2.update()
|
||
- out = our_file.getvalue()
|
||
- res = [m[0] for m in RE_pos.findall(out)]
|
||
- exres = ['\rpos0 bar: 0%',
|
||
- '\n\rpos1 bar: 0%',
|
||
- '\n\n\rpos2 bar: 0%',
|
||
- '\rpos0 bar: 50%',
|
||
- '\n\n\rpos2 bar: 50%',
|
||
- '\n\rpos1 bar: 50%',
|
||
- '\rpos0 bar: 100%',
|
||
- '\n\n\rpos2 bar: 100%',
|
||
- '\n\rpos1 bar: 100%']
|
||
- pos_line_diff(res, exres)
|
||
- t1.close()
|
||
- t2.close()
|
||
- t3.close()
|
||
-
|
||
- # Test auto repositioning of bars when a bar is prematurely closed
|
||
- # tqdm._instances.clear() # reset number of instances
|
||
- with closing(StringIO()) as our_file:
|
||
- t1 = tqdm(total=10, file=our_file, desc='1.pos0 bar', mininterval=0)
|
||
- t2 = tqdm(total=10, file=our_file, desc='2.pos1 bar', mininterval=0)
|
||
- t3 = tqdm(total=10, file=our_file, desc='3.pos2 bar', mininterval=0)
|
||
- res = [m[0] for m in RE_pos.findall(our_file.getvalue())]
|
||
- exres = ['\r1.pos0 bar: 0%',
|
||
- '\n\r2.pos1 bar: 0%',
|
||
- '\n\n\r3.pos2 bar: 0%']
|
||
+ # Test iteration-based tqdm positioning
|
||
+ our_file = StringIO()
|
||
+ kwargs["file"] = our_file
|
||
+ for _ in trange(2, desc='pos0 bar', position=0, **kwargs):
|
||
+ for _ in trange(2, desc='pos1 bar', position=1, **kwargs):
|
||
+ for _ in trange(2, desc='pos2 bar', position=2, **kwargs):
|
||
+ pass
|
||
+ out = our_file.getvalue()
|
||
+ res = [m[0] for m in RE_pos.findall(out)]
|
||
+ exres = ['\rpos0 bar: 0%',
|
||
+ '\n\rpos1 bar: 0%',
|
||
+ '\n\n\rpos2 bar: 0%',
|
||
+ '\n\n\rpos2 bar: 50%',
|
||
+ '\n\n\rpos2 bar: 100%',
|
||
+ '\rpos2 bar: 100%',
|
||
+ '\n\n\rpos1 bar: 50%',
|
||
+ '\n\n\rpos2 bar: 0%',
|
||
+ '\n\n\rpos2 bar: 50%',
|
||
+ '\n\n\rpos2 bar: 100%',
|
||
+ '\rpos2 bar: 100%',
|
||
+ '\n\n\rpos1 bar: 100%',
|
||
+ '\rpos1 bar: 100%',
|
||
+ '\n\rpos0 bar: 50%',
|
||
+ '\n\rpos1 bar: 0%',
|
||
+ '\n\n\rpos2 bar: 0%',
|
||
+ '\n\n\rpos2 bar: 50%',
|
||
+ '\n\n\rpos2 bar: 100%',
|
||
+ '\rpos2 bar: 100%',
|
||
+ '\n\n\rpos1 bar: 50%',
|
||
+ '\n\n\rpos2 bar: 0%',
|
||
+ '\n\n\rpos2 bar: 50%',
|
||
+ '\n\n\rpos2 bar: 100%',
|
||
+ '\rpos2 bar: 100%',
|
||
+ '\n\n\rpos1 bar: 100%',
|
||
+ '\rpos1 bar: 100%',
|
||
+ '\n\rpos0 bar: 100%',
|
||
+ '\rpos0 bar: 100%',
|
||
+ '\n']
|
||
pos_line_diff(res, exres)
|
||
|
||
- t2.close()
|
||
- t4 = tqdm(total=10, file=our_file, desc='4.pos2 bar', mininterval=0)
|
||
- t1.update(1)
|
||
- t3.update(1)
|
||
- t4.update(1)
|
||
- res = [m[0] for m in RE_pos.findall(our_file.getvalue())]
|
||
- exres = ['\r1.pos0 bar: 0%',
|
||
- '\n\r2.pos1 bar: 0%',
|
||
- '\n\n\r3.pos2 bar: 0%',
|
||
- '\r2.pos1 bar: 0%',
|
||
- '\n\n\r4.pos2 bar: 0%',
|
||
- '\r1.pos0 bar: 10%',
|
||
- '\n\n\r3.pos2 bar: 10%',
|
||
- '\n\r4.pos2 bar: 10%']
|
||
+ # Test manual tqdm positioning
|
||
+ our_file = StringIO()
|
||
+ kwargs["file"] = our_file
|
||
+ kwargs["total"] = 2
|
||
+ t1 = tqdm(desc='pos0 bar', position=0, **kwargs)
|
||
+ t2 = tqdm(desc='pos1 bar', position=1, **kwargs)
|
||
+ t3 = tqdm(desc='pos2 bar', position=2, **kwargs)
|
||
+ for _ in _range(2):
|
||
+ t1.update()
|
||
+ t3.update()
|
||
+ t2.update()
|
||
+ out = our_file.getvalue()
|
||
+ res = [m[0] for m in RE_pos.findall(out)]
|
||
+ exres = ['\rpos0 bar: 0%',
|
||
+ '\n\rpos1 bar: 0%',
|
||
+ '\n\n\rpos2 bar: 0%',
|
||
+ '\rpos0 bar: 50%',
|
||
+ '\n\n\rpos2 bar: 50%',
|
||
+ '\n\rpos1 bar: 50%',
|
||
+ '\rpos0 bar: 100%',
|
||
+ '\n\n\rpos2 bar: 100%',
|
||
+ '\n\rpos1 bar: 100%']
|
||
pos_line_diff(res, exres)
|
||
- t4.close()
|
||
- t3.close()
|
||
t1.close()
|
||
+ t2.close()
|
||
+ t3.close()
|
||
|
||
+ # Test auto repositioning of bars when a bar is prematurely closed
|
||
+ # tqdm._instances.clear() # reset number of instances
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t1 = tqdm(total=10, file=our_file, desc='1.pos0 bar', mininterval=0)
|
||
+ t2 = tqdm(total=10, file=our_file, desc='2.pos1 bar', mininterval=0)
|
||
+ t3 = tqdm(total=10, file=our_file, desc='3.pos2 bar', mininterval=0)
|
||
+ res = [m[0] for m in RE_pos.findall(our_file.getvalue())]
|
||
+ exres = ['\r1.pos0 bar: 0%',
|
||
+ '\n\r2.pos1 bar: 0%',
|
||
+ '\n\n\r3.pos2 bar: 0%']
|
||
+ pos_line_diff(res, exres)
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_set_description():
|
||
- """Test set description"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(desc='Hello', file=our_file) as t:
|
||
- assert t.desc == 'Hello'
|
||
- t.set_description_str('World')
|
||
- assert t.desc == 'World'
|
||
- t.set_description()
|
||
- assert t.desc == ''
|
||
- t.set_description('Bye')
|
||
- assert t.desc == 'Bye: '
|
||
- assert "World" in our_file.getvalue()
|
||
-
|
||
- # without refresh
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(desc='Hello', file=our_file) as t:
|
||
- assert t.desc == 'Hello'
|
||
- t.set_description_str('World', False)
|
||
- assert t.desc == 'World'
|
||
- t.set_description(None, False)
|
||
- assert t.desc == ''
|
||
- assert "World" not in our_file.getvalue()
|
||
-
|
||
- # unicode
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, file=our_file) as t:
|
||
- t.set_description(u"\xe1\xe9\xed\xf3\xfa")
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_deprecated_gui():
|
||
- """Test internal GUI properties"""
|
||
- # Check: StatusPrinter iff gui is disabled
|
||
- with closing(StringIO()) as our_file:
|
||
- t = tqdm(total=2, gui=True, file=our_file, miniters=1, mininterval=0)
|
||
- assert not hasattr(t, "sp")
|
||
- try:
|
||
- t.update(1)
|
||
- except TqdmDeprecationWarning as e:
|
||
- if ('Please use `tqdm.gui.tqdm(...)` instead of'
|
||
- ' `tqdm(..., gui=True)`') \
|
||
- not in our_file.getvalue():
|
||
- raise e
|
||
- else:
|
||
- raise DeprecationError('Should not allow manual gui=True without'
|
||
- ' overriding __iter__() and update()')
|
||
- finally:
|
||
- t._instances.clear()
|
||
- # t.close()
|
||
- # len(tqdm._instances) += 1 # undo the close() decrement
|
||
+ t2.close()
|
||
+ t4 = tqdm(total=10, file=our_file, desc='4.pos2 bar', mininterval=0)
|
||
+ t1.update(1)
|
||
+ t3.update(1)
|
||
+ t4.update(1)
|
||
+ res = [m[0] for m in RE_pos.findall(our_file.getvalue())]
|
||
+ exres = ['\r1.pos0 bar: 0%',
|
||
+ '\n\r2.pos1 bar: 0%',
|
||
+ '\n\n\r3.pos2 bar: 0%',
|
||
+ '\r2.pos1 bar: 0%',
|
||
+ '\n\n\r4.pos2 bar: 0%',
|
||
+ '\r1.pos0 bar: 10%',
|
||
+ '\n\n\r3.pos2 bar: 10%',
|
||
+ '\n\r4.pos2 bar: 10%']
|
||
+ pos_line_diff(res, exres)
|
||
+ t4.close()
|
||
+ t3.close()
|
||
+ t1.close()
|
||
|
||
- t = tqdm(_range(3), gui=True, file=our_file, miniters=1, mininterval=0)
|
||
- try:
|
||
- for _ in t:
|
||
- pass
|
||
- except TqdmDeprecationWarning as e:
|
||
- if ('Please use `tqdm.gui.tqdm(...)` instead of'
|
||
- ' `tqdm(..., gui=True)`') \
|
||
- not in our_file.getvalue():
|
||
- raise e
|
||
- else:
|
||
- raise DeprecationError('Should not allow manual gui=True without'
|
||
- ' overriding __iter__() and update()')
|
||
- finally:
|
||
- t._instances.clear()
|
||
- # t.close()
|
||
- # len(tqdm._instances) += 1 # undo the close() decrement
|
||
-
|
||
- with tqdm(total=1, gui=False, file=our_file) as t:
|
||
- assert hasattr(t, "sp")
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_cmp():
|
||
- """Test comparison functions"""
|
||
- with closing(StringIO()) as our_file:
|
||
- t0 = tqdm(total=10, file=our_file)
|
||
- t1 = tqdm(total=10, file=our_file)
|
||
- t2 = tqdm(total=10, file=our_file)
|
||
-
|
||
- assert t0 < t1
|
||
- assert t2 >= t0
|
||
- assert t0 <= t2
|
||
-
|
||
- t3 = tqdm(total=10, file=our_file)
|
||
- t4 = tqdm(total=10, file=our_file)
|
||
- t5 = tqdm(total=10, file=our_file)
|
||
- t5.close()
|
||
- t6 = tqdm(total=10, file=our_file)
|
||
-
|
||
- assert t3 != t4
|
||
- assert t3 > t2
|
||
- assert t5 == t6
|
||
- t6.close()
|
||
- t4.close()
|
||
- t3.close()
|
||
- t2.close()
|
||
- t1.close()
|
||
- t0.close()
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_repr():
|
||
- """Test representation"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, ascii=True, file=our_file) as t:
|
||
- assert str(t) == ' 0%| | 0/10 [00:00<?, ?it/s]'
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_clear():
|
||
- """Test clearing bar display"""
|
||
- with closing(StringIO()) as our_file:
|
||
- t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
|
||
- bar_format='{l_bar}')
|
||
- t2 = trange(10, file=our_file, desc='pos1 bar', bar_format='{l_bar}')
|
||
- before = squash_ctrlchars(our_file.getvalue())
|
||
- t2.clear()
|
||
- t1.clear()
|
||
- after = squash_ctrlchars(our_file.getvalue())
|
||
- t1.close()
|
||
- t2.close()
|
||
- assert before == ['pos0 bar: 0%|', 'pos1 bar: 0%|']
|
||
- assert after == ['', '']
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_clear_disabled():
|
||
- """Test clearing bar display"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
|
||
- bar_format='{l_bar}') as t:
|
||
- t.clear()
|
||
- assert our_file.getvalue() == ''
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_refresh():
|
||
- """Test refresh bar display"""
|
||
- with closing(StringIO()) as our_file:
|
||
- t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
|
||
- bar_format='{l_bar}', mininterval=999, miniters=999)
|
||
- t2 = tqdm(total=10, file=our_file, desc='pos1 bar',
|
||
- bar_format='{l_bar}', mininterval=999, miniters=999)
|
||
- t1.update()
|
||
- t2.update()
|
||
- before = squash_ctrlchars(our_file.getvalue())
|
||
- t1.refresh()
|
||
- t2.refresh()
|
||
- after = squash_ctrlchars(our_file.getvalue())
|
||
- t1.close()
|
||
- t2.close()
|
||
+ def test_set_description(self):
|
||
+ """Test set description"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(desc='Hello', file=our_file) as t:
|
||
+ assert t.desc == 'Hello'
|
||
+ t.set_description_str('World')
|
||
+ assert t.desc == 'World'
|
||
+ t.set_description()
|
||
+ assert t.desc == ''
|
||
+ t.set_description('Bye')
|
||
+ assert t.desc == 'Bye: '
|
||
+ assert "World" in our_file.getvalue()
|
||
+
|
||
+ # without refresh
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(desc='Hello', file=our_file) as t:
|
||
+ assert t.desc == 'Hello'
|
||
+ t.set_description_str('World', False)
|
||
+ assert t.desc == 'World'
|
||
+ t.set_description(None, False)
|
||
+ assert t.desc == ''
|
||
+ assert "World" not in our_file.getvalue()
|
||
+
|
||
+ # unicode
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, file=our_file) as t:
|
||
+ t.set_description(u"\xe1\xe9\xed\xf3\xfa")
|
||
|
||
- # Check that refreshing indeed forced the display to use realtime state
|
||
- assert before == [u'pos0 bar: 0%|', u'pos1 bar: 0%|']
|
||
- assert after == [u'pos0 bar: 10%|', u'pos1 bar: 10%|']
|
||
+ def test_deprecated_gui(self):
|
||
+ """Test internal GUI properties"""
|
||
+ # Check: StatusPrinter iff gui is disabled
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t = tqdm(total=2, gui=True, file=our_file, miniters=1,
|
||
+ mininterval=0)
|
||
+ assert not hasattr(t, "sp")
|
||
+ try:
|
||
+ t.update(1)
|
||
+ except TqdmDeprecationWarning as e:
|
||
+ if ('Please use `tqdm.gui.tqdm(...)` instead of'
|
||
+ ' `tqdm(..., gui=True)`') \
|
||
+ not in our_file.getvalue():
|
||
+ raise e
|
||
+ else:
|
||
+ raise DeprecationError('Should not allow manual gui=True with'
|
||
+ 'out overriding __iter__() and update()')
|
||
+ finally:
|
||
+ t._instances.clear()
|
||
+ # t.close()
|
||
+ # len(tqdm._instances) += 1 # undo the close() decrement
|
||
|
||
+ t = tqdm(_range(3), gui=True, file=our_file, miniters=1,
|
||
+ mininterval=0)
|
||
+ try:
|
||
+ for _ in t:
|
||
+ pass
|
||
+ except TqdmDeprecationWarning as e:
|
||
+ if ('Please use `tqdm.gui.tqdm(...)` instead of'
|
||
+ ' `tqdm(..., gui=True)`') \
|
||
+ not in our_file.getvalue():
|
||
+ raise e
|
||
+ else:
|
||
+ raise DeprecationError('Should not allow manual gui=True with'
|
||
+ 'out overriding __iter__() and update()')
|
||
+ finally:
|
||
+ t._instances.clear()
|
||
+ # t.close()
|
||
+ # len(tqdm._instances) += 1 # undo the close() decrement
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_disabled_refresh():
|
||
- """Test refresh bar display"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
|
||
- bar_format='{l_bar}', mininterval=999, miniters=999) as t:
|
||
- t.update()
|
||
- t.refresh()
|
||
-
|
||
- assert our_file.getvalue() == ''
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_write():
|
||
- """Test write messages"""
|
||
- s = "Hello world"
|
||
- with closing(StringIO()) as our_file:
|
||
- # Change format to keep only left part w/o bar and it/s rate
|
||
- t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
|
||
- bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
- t2 = trange(10, file=our_file, desc='pos1 bar', bar_format='{l_bar}',
|
||
- mininterval=0, miniters=1)
|
||
- t3 = tqdm(total=10, file=our_file, desc='pos2 bar',
|
||
- bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
- t1.update()
|
||
- t2.update()
|
||
- t3.update()
|
||
- before = our_file.getvalue()
|
||
-
|
||
- # Write msg and see if bars are correctly redrawn below the msg
|
||
- t1.write(s, file=our_file) # call as an instance method
|
||
- tqdm.write(s, file=our_file) # call as a class method
|
||
- after = our_file.getvalue()
|
||
+ with tqdm(total=1, gui=False, file=our_file) as t:
|
||
+ assert hasattr(t, "sp")
|
||
|
||
- t1.close()
|
||
- t2.close()
|
||
- t3.close()
|
||
+ def test_cmp(self):
|
||
+ """Test comparison functions"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t0 = tqdm(total=10, file=our_file)
|
||
+ t1 = tqdm(total=10, file=our_file)
|
||
+ t2 = tqdm(total=10, file=our_file)
|
||
+
|
||
+ assert t0 < t1
|
||
+ assert t2 >= t0
|
||
+ assert t0 <= t2
|
||
+
|
||
+ t3 = tqdm(total=10, file=our_file)
|
||
+ t4 = tqdm(total=10, file=our_file)
|
||
+ t5 = tqdm(total=10, file=our_file)
|
||
+ t5.close()
|
||
+ t6 = tqdm(total=10, file=our_file)
|
||
+
|
||
+ assert t3 != t4
|
||
+ assert t3 > t2
|
||
+ assert t5 == t6
|
||
+ t6.close()
|
||
+ t4.close()
|
||
+ t3.close()
|
||
+ t2.close()
|
||
+ t1.close()
|
||
+ t0.close()
|
||
|
||
- before_squashed = squash_ctrlchars(before)
|
||
- after_squashed = squash_ctrlchars(after)
|
||
+ def test_repr(self):
|
||
+ """Test representation"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, ascii=True, file=our_file) as t:
|
||
+ assert str(t) == ' 0%| | 0/10 [00:00<?, ?it/s]'
|
||
|
||
- assert after_squashed == [s, s] + before_squashed
|
||
+ def test_clear(self):
|
||
+ """Test clearing bar display"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
|
||
+ bar_format='{l_bar}')
|
||
+ t2 = trange(10, file=our_file, desc='pos1 bar',
|
||
+ bar_format='{l_bar}')
|
||
+ before = squash_ctrlchars(our_file.getvalue())
|
||
+ t2.clear()
|
||
+ t1.clear()
|
||
+ after = squash_ctrlchars(our_file.getvalue())
|
||
+ t1.close()
|
||
+ t2.close()
|
||
+ assert before == ['pos0 bar: 0%|', 'pos1 bar: 0%|']
|
||
+ assert after == ['', '']
|
||
|
||
- # Check that no bar clearing if different file
|
||
- with closing(StringIO()) as our_file_bar:
|
||
- with closing(StringIO()) as our_file_write:
|
||
- t1 = tqdm(total=10, file=our_file_bar, desc='pos0 bar',
|
||
- bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
+ def test_clear_disabled(self):
|
||
+ """Test clearing bar display"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
|
||
+ bar_format='{l_bar}') as t:
|
||
+ t.clear()
|
||
+ assert our_file.getvalue() == ''
|
||
|
||
+ def test_refresh(self):
|
||
+ """Test refresh bar display"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
|
||
+ bar_format='{l_bar}', mininterval=999, miniters=999)
|
||
+ t2 = tqdm(total=10, file=our_file, desc='pos1 bar',
|
||
+ bar_format='{l_bar}', mininterval=999, miniters=999)
|
||
t1.update()
|
||
- before_bar = our_file_bar.getvalue()
|
||
+ t2.update()
|
||
+ before = squash_ctrlchars(our_file.getvalue())
|
||
+ t1.refresh()
|
||
+ t2.refresh()
|
||
+ after = squash_ctrlchars(our_file.getvalue())
|
||
+ t1.close()
|
||
+ t2.close()
|
||
|
||
- tqdm.write(s, file=our_file_write)
|
||
+ # Check that refreshing indeed forced the display to
|
||
+ # use realtime state
|
||
+ assert before == [u'pos0 bar: 0%|', u'pos1 bar: 0%|']
|
||
+ assert after == [u'pos0 bar: 10%|', u'pos1 bar: 10%|']
|
||
|
||
- after_bar = our_file_bar.getvalue()
|
||
- t1.close()
|
||
+ def test_disabled_refresh(self):
|
||
+ """Test refresh bar display"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
|
||
+ bar_format='{l_bar}', mininterval=999, miniters=999) as t:
|
||
+ t.update()
|
||
+ t.refresh()
|
||
|
||
- assert before_bar == after_bar
|
||
-
|
||
- # Test stdout/stderr anti-mixup strategy
|
||
- # Backup stdout/stderr
|
||
- stde = sys.stderr
|
||
- stdo = sys.stdout
|
||
- # Mock stdout/stderr
|
||
- with closing(StringIO()) as our_stderr:
|
||
- with closing(StringIO()) as our_stdout:
|
||
- sys.stderr = our_stderr
|
||
- sys.stdout = our_stdout
|
||
- t1 = tqdm(total=10, file=sys.stderr, desc='pos0 bar',
|
||
- bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
+ assert our_file.getvalue() == ''
|
||
|
||
+ def test_write(self):
|
||
+ """Test write messages"""
|
||
+ s = "Hello world"
|
||
+ with closing(StringIO()) as our_file:
|
||
+ # Change format to keep only left part w/o bar and it/s rate
|
||
+ t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
|
||
+ bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
+ t2 = trange(10, file=our_file, desc='pos1 bar',
|
||
+ bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
+ t3 = tqdm(total=10, file=our_file, desc='pos2 bar',
|
||
+ bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
t1.update()
|
||
- before_err = sys.stderr.getvalue()
|
||
- before_out = sys.stdout.getvalue()
|
||
+ t2.update()
|
||
+ t3.update()
|
||
+ before = our_file.getvalue()
|
||
|
||
- tqdm.write(s, file=sys.stdout)
|
||
- after_err = sys.stderr.getvalue()
|
||
- after_out = sys.stdout.getvalue()
|
||
+ # Write msg and see if bars are correctly redrawn below the msg
|
||
+ t1.write(s, file=our_file) # call as an instance method
|
||
+ tqdm.write(s, file=our_file) # call as a class method
|
||
+ after = our_file.getvalue()
|
||
|
||
t1.close()
|
||
+ t2.close()
|
||
+ t3.close()
|
||
+
|
||
+ before_squashed = squash_ctrlchars(before)
|
||
+ after_squashed = squash_ctrlchars(after)
|
||
+
|
||
+ assert after_squashed == [s, s] + before_squashed
|
||
+
|
||
+ # Check that no bar clearing if different file
|
||
+ with closing(StringIO()) as our_file_bar:
|
||
+ with closing(StringIO()) as our_file_write:
|
||
+ t1 = tqdm(total=10, file=our_file_bar, desc='pos0 bar',
|
||
+ bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
+
|
||
+ t1.update()
|
||
+ before_bar = our_file_bar.getvalue()
|
||
+
|
||
+ tqdm.write(s, file=our_file_write)
|
||
+
|
||
+ after_bar = our_file_bar.getvalue()
|
||
+ t1.close()
|
||
+
|
||
+ assert before_bar == after_bar
|
||
+
|
||
+ # Test stdout/stderr anti-mixup strategy
|
||
+ # Backup stdout/stderr
|
||
+ stde = sys.stderr
|
||
+ stdo = sys.stdout
|
||
+ # Mock stdout/stderr
|
||
+ with closing(StringIO()) as our_stderr:
|
||
+ with closing(StringIO()) as our_stdout:
|
||
+ sys.stderr = our_stderr
|
||
+ sys.stdout = our_stdout
|
||
+ t1 = tqdm(total=10, file=sys.stderr, desc='pos0 bar',
|
||
+ bar_format='{l_bar}', mininterval=0, miniters=1)
|
||
+
|
||
+ t1.update()
|
||
+ before_err = sys.stderr.getvalue()
|
||
+ before_out = sys.stdout.getvalue()
|
||
+
|
||
+ tqdm.write(s, file=sys.stdout)
|
||
+ after_err = sys.stderr.getvalue()
|
||
+ after_out = sys.stdout.getvalue()
|
||
+
|
||
+ t1.close()
|
||
+
|
||
+ assert before_err == '\rpos0 bar: 0%|\rpos0 bar: 10%|'
|
||
+ assert before_out == ''
|
||
+ after_err_res = [m[0] for m in RE_pos.findall(after_err)]
|
||
+ exres = ['\rpos0 bar: 0%|',
|
||
+ '\rpos0 bar: 10%|',
|
||
+ '\r ',
|
||
+ '\r\rpos0 bar: 10%|']
|
||
+ pos_line_diff(after_err_res, exres)
|
||
+ assert after_out == s + '\n'
|
||
+ # Restore stdout and stderr
|
||
+ sys.stderr = stde
|
||
+ sys.stdout = stdo
|
||
+
|
||
+ def test_len(self):
|
||
+ """Test advance len (numpy array shape)"""
|
||
+ try:
|
||
+ import numpy as np
|
||
+ except ImportError:
|
||
+ raise unittest.SkipTest
|
||
+ with closing(StringIO()) as f:
|
||
+ with tqdm(np.zeros((3, 4)), file=f) as t:
|
||
+ assert len(t) == 3
|
||
+
|
||
+ def test_autodisable_disable(self):
|
||
+ """Test autodisable will disable on non-TTY"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, disable=None, file=our_file) as t:
|
||
+ t.update(3)
|
||
+ assert our_file.getvalue() == ''
|
||
|
||
- assert before_err == '\rpos0 bar: 0%|\rpos0 bar: 10%|'
|
||
- assert before_out == ''
|
||
- after_err_res = [m[0] for m in RE_pos.findall(after_err)]
|
||
- exres = ['\rpos0 bar: 0%|',
|
||
- '\rpos0 bar: 10%|',
|
||
- '\r ',
|
||
- '\r\rpos0 bar: 10%|']
|
||
- pos_line_diff(after_err_res, exres)
|
||
- assert after_out == s + '\n'
|
||
- # Restore stdout and stderr
|
||
- sys.stderr = stde
|
||
- sys.stdout = stdo
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_len():
|
||
- """Test advance len (numpy array shape)"""
|
||
- try:
|
||
- import numpy as np
|
||
- except ImportError:
|
||
- raise SkipTest
|
||
- with closing(StringIO()) as f:
|
||
- with tqdm(np.zeros((3, 4)), file=f) as t:
|
||
- assert len(t) == 3
|
||
+ def test_autodisable_enable(self):
|
||
+ """Test autodisable will not disable on TTY"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ setattr(our_file, "isatty", lambda: True)
|
||
+ with tqdm(total=10, disable=None, file=our_file) as t:
|
||
+ t.update()
|
||
+ assert our_file.getvalue() != ''
|
||
|
||
+ def test_deprecation_exception(self):
|
||
+ def test_TqdmDeprecationWarning(self):
|
||
+ with closing(StringIO()) as our_file:
|
||
+ raise (TqdmDeprecationWarning('Test!', fp_write=getattr(
|
||
+ our_file, 'write', sys.stderr.write)))
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_autodisable_disable():
|
||
- """Test autodisable will disable on non-TTY"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, disable=None, file=our_file) as t:
|
||
- t.update(3)
|
||
- assert our_file.getvalue() == ''
|
||
+ def test_TqdmDeprecationWarning_nofpwrite(self):
|
||
+ raise TqdmDeprecationWarning('Test!', fp_write=None)
|
||
|
||
+ pytest.raises(TqdmDeprecationWarning, test_TqdmDeprecationWarning, self)
|
||
+ pytest.raises(Exception, test_TqdmDeprecationWarning_nofpwrite, self)
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_autodisable_enable():
|
||
- """Test autodisable will not disable on TTY"""
|
||
- with closing(StringIO()) as our_file:
|
||
- setattr(our_file, "isatty", lambda: True)
|
||
- with tqdm(total=10, disable=None, file=our_file) as t:
|
||
- t.update()
|
||
- assert our_file.getvalue() != ''
|
||
+ def test_postfix(self):
|
||
+ """Test postfix"""
|
||
+ postfix = {'float': 0.321034, 'gen': 543, 'str': 'h', 'lst': [2]}
|
||
+ postfix_order = (('w', 'w'), ('a', 0)) # no need for OrderedDict
|
||
+ expected = ['float=0.321', 'gen=543', 'lst=[2]', 'str=h']
|
||
+ expected_order = ['w=w', 'a=0', 'float=0.321', 'gen=543', 'lst=[2]',
|
||
+ 'str=h']
|
||
|
||
+ # Test postfix set at init
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tqdm(total=10, file=our_file, desc='pos0 bar',
|
||
+ bar_format='{r_bar}', postfix=postfix) as t1:
|
||
+ t1.refresh()
|
||
+ out = our_file.getvalue()
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_deprecation_exception():
|
||
- def test_TqdmDeprecationWarning():
|
||
+ # Test postfix set after init
|
||
with closing(StringIO()) as our_file:
|
||
- raise (TqdmDeprecationWarning('Test!', fp_write=getattr(
|
||
- our_file, 'write', sys.stderr.write)))
|
||
+ with trange(10, file=our_file, desc='pos1 bar',
|
||
+ bar_format='{r_bar}', postfix=None) as t2:
|
||
+ t2.set_postfix(**postfix)
|
||
+ t2.refresh()
|
||
+ out2 = our_file.getvalue()
|
||
+
|
||
+ # Order of items in dict may change, so need a loop to check per item
|
||
+ for res in expected:
|
||
+ assert res in out
|
||
+ assert res in out2
|
||
+
|
||
+ # Test postfix (with ordered dict and no refresh) set after init
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with trange(10, file=our_file, desc='pos2 bar',
|
||
+ bar_format='{r_bar}', postfix=None) as t3:
|
||
+ t3.set_postfix(postfix_order, False, **postfix)
|
||
+ t3.refresh() # explicit external refresh
|
||
+ out3 = our_file.getvalue()
|
||
|
||
- def test_TqdmDeprecationWarning_nofpwrite():
|
||
- raise TqdmDeprecationWarning('Test!', fp_write=None)
|
||
+ out3 = out3[1:-1].split(', ')[3:]
|
||
+ assert out3 == expected_order
|
||
|
||
- assert_raises(TqdmDeprecationWarning, test_TqdmDeprecationWarning)
|
||
- assert_raises(Exception, test_TqdmDeprecationWarning_nofpwrite)
|
||
+ # Test postfix (with ordered dict and refresh) set after init
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with trange(10, file=our_file, desc='pos2 bar',
|
||
+ bar_format='{r_bar}', postfix=None) as t4:
|
||
+ t4.set_postfix(postfix_order, True, **postfix)
|
||
+ t4.refresh() # double refresh
|
||
+ out4 = our_file.getvalue()
|
||
|
||
+ assert out4.count('\r') > out3.count('\r')
|
||
+ assert out4.count(", ".join(expected_order)) == 2
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_postfix():
|
||
- """Test postfix"""
|
||
- postfix = {'float': 0.321034, 'gen': 543, 'str': 'h', 'lst': [2]}
|
||
- postfix_order = (('w', 'w'), ('a', 0)) # no need for OrderedDict
|
||
- expected = ['float=0.321', 'gen=543', 'lst=[2]', 'str=h']
|
||
- expected_order = ['w=w', 'a=0', 'float=0.321', 'gen=543', 'lst=[2]',
|
||
- 'str=h']
|
||
+ # Test setting postfix string directly
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with trange(10, file=our_file, desc='pos2 bar',
|
||
+ bar_format='{r_bar}', postfix=None) as t5:
|
||
+ t5.set_postfix_str("Hello", False)
|
||
+ t5.set_postfix_str("World")
|
||
+ out5 = our_file.getvalue()
|
||
+
|
||
+ assert "Hello" not in out5
|
||
+ out5 = out5[1:-1].split(', ')[3:]
|
||
+ assert out5 == ["World"]
|
||
+
|
||
+ def test_file_redirection(self):
|
||
+ """Test redirection of output"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ # Redirect stdout to tqdm.write()
|
||
+ with std_out_err_redirect_tqdm(tqdm_file=our_file):
|
||
+ for _ in trange(3):
|
||
+ print("Such fun")
|
||
+ res = our_file.getvalue()
|
||
+ assert res.count("Such fun\n") == 3
|
||
+ assert "0/3" in res
|
||
+ assert "3/3" in res
|
||
|
||
- # Test postfix set at init
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, file=our_file, desc='pos0 bar',
|
||
- bar_format='{r_bar}', postfix=postfix) as t1:
|
||
- t1.refresh()
|
||
- out = our_file.getvalue()
|
||
+ def test_external_write(self):
|
||
+ """Test external write mode"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ # Redirect stdout to tqdm.write()
|
||
+ for _ in trange(3, file=our_file):
|
||
+ del tqdm._lock # classmethod should be able to recreate lock
|
||
+ with tqdm.external_write_mode(file=our_file):
|
||
+ our_file.write("Such fun\n")
|
||
+ res = our_file.getvalue()
|
||
+ assert res.count("Such fun\n") == 3
|
||
+ assert "0/3" in res
|
||
+ assert "3/3" in res
|
||
|
||
- # Test postfix set after init
|
||
- with closing(StringIO()) as our_file:
|
||
- with trange(10, file=our_file, desc='pos1 bar', bar_format='{r_bar}',
|
||
- postfix=None) as t2:
|
||
- t2.set_postfix(**postfix)
|
||
- t2.refresh()
|
||
- out2 = our_file.getvalue()
|
||
-
|
||
- # Order of items in dict may change, so need a loop to check per item
|
||
- for res in expected:
|
||
- assert res in out
|
||
- assert res in out2
|
||
-
|
||
- # Test postfix (with ordered dict and no refresh) set after init
|
||
- with closing(StringIO()) as our_file:
|
||
- with trange(10, file=our_file, desc='pos2 bar', bar_format='{r_bar}',
|
||
- postfix=None) as t3:
|
||
- t3.set_postfix(postfix_order, False, **postfix)
|
||
- t3.refresh() # explicit external refresh
|
||
- out3 = our_file.getvalue()
|
||
-
|
||
- out3 = out3[1:-1].split(', ')[3:]
|
||
- assert out3 == expected_order
|
||
-
|
||
- # Test postfix (with ordered dict and refresh) set after init
|
||
- with closing(StringIO()) as our_file:
|
||
- with trange(10, file=our_file, desc='pos2 bar',
|
||
- bar_format='{r_bar}', postfix=None) as t4:
|
||
- t4.set_postfix(postfix_order, True, **postfix)
|
||
- t4.refresh() # double refresh
|
||
- out4 = our_file.getvalue()
|
||
-
|
||
- assert out4.count('\r') > out3.count('\r')
|
||
- assert out4.count(", ".join(expected_order)) == 2
|
||
-
|
||
- # Test setting postfix string directly
|
||
- with closing(StringIO()) as our_file:
|
||
- with trange(10, file=our_file, desc='pos2 bar', bar_format='{r_bar}',
|
||
- postfix=None) as t5:
|
||
- t5.set_postfix_str("Hello", False)
|
||
- t5.set_postfix_str("World")
|
||
- out5 = our_file.getvalue()
|
||
-
|
||
- assert "Hello" not in out5
|
||
- out5 = out5[1:-1].split(', ')[3:]
|
||
- assert out5 == ["World"]
|
||
-
|
||
-
|
||
-def test_postfix_direct():
|
||
- """Test directly assigning non-str objects to postfix"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with tqdm(total=10, file=our_file, miniters=1, mininterval=0,
|
||
- bar_format="{postfix[0][name]} {postfix[1]:>5.2f}",
|
||
- postfix=[dict(name="foo"), 42]) as t:
|
||
- for i in range(10):
|
||
- if i % 2:
|
||
- t.postfix[0]["name"] = "abcdefghij"[i]
|
||
+ def test_unit_scale(self):
|
||
+ """Test numeric `unit_scale`"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(9), unit_scale=9, file=our_file,
|
||
+ miniters=1, mininterval=0):
|
||
+ pass
|
||
+ out = our_file.getvalue()
|
||
+ assert '81/81' in out
|
||
+
|
||
+ @patch_lock(thread=False)
|
||
+ def test_threading(self):
|
||
+ """Test multiprocess/thread-realted features"""
|
||
+ pass # TODO: test interleaved output #445
|
||
+
|
||
+ def test_bool(self):
|
||
+ """Test boolean cast"""
|
||
+ def internal(our_file, disable):
|
||
+ kwargs = dict(file=our_file, disable=disable)
|
||
+ with trange(10, **kwargs) as t:
|
||
+ assert t
|
||
+ with trange(0, **kwargs) as t:
|
||
+ assert not t
|
||
+ with tqdm(total=10, **kwargs) as t:
|
||
+ assert bool(t)
|
||
+ with tqdm(total=0, **kwargs) as t:
|
||
+ assert not bool(t)
|
||
+ with tqdm([], **kwargs) as t:
|
||
+ assert not t
|
||
+ with tqdm([0], **kwargs) as t:
|
||
+ assert t
|
||
+ with tqdm((x for x in []), **kwargs) as t:
|
||
+ assert t
|
||
+ with tqdm((x for x in [1, 2, 3]), **kwargs) as t:
|
||
+ assert t
|
||
+ with tqdm(**kwargs) as t:
|
||
+ try:
|
||
+ print(bool(t))
|
||
+ except TypeError:
|
||
+ pass
|
||
else:
|
||
- t.postfix[1] = i
|
||
- t.update()
|
||
- res = our_file.getvalue()
|
||
- assert "f 6.00" in res
|
||
- assert "h 6.00" in res
|
||
- assert "h 8.00" in res
|
||
- assert "j 8.00" in res
|
||
+ raise TypeError("Expected bool(tqdm()) to fail")
|
||
|
||
+ # test with and without disable
|
||
+ with closing(StringIO()) as our_file:
|
||
+ internal(our_file, False)
|
||
+ internal(our_file, True)
|
||
|
||
-@contextmanager
|
||
-def std_out_err_redirect_tqdm(tqdm_file=sys.stderr):
|
||
- orig_out_err = sys.stdout, sys.stderr
|
||
- try:
|
||
- sys.stdout = sys.stderr = DummyTqdmFile(tqdm_file)
|
||
- yield orig_out_err[0]
|
||
- # Relay exceptions
|
||
- except Exception as exc:
|
||
- raise exc
|
||
- # Always restore sys.stdout/err if necessary
|
||
- finally:
|
||
- sys.stdout, sys.stderr = orig_out_err
|
||
+ def backendCheck(self, module):
|
||
+ """Test tqdm-like module fallback"""
|
||
+ tn = module.tqdm
|
||
+ tr = module.trange
|
||
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with tn(total=10, file=our_file) as t:
|
||
+ assert len(t) == 10
|
||
+ with tr(1337) as t:
|
||
+ assert len(t) == 1337
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_file_redirection():
|
||
- """Test redirection of output"""
|
||
- with closing(StringIO()) as our_file:
|
||
- # Redirect stdout to tqdm.write()
|
||
- with std_out_err_redirect_tqdm(tqdm_file=our_file):
|
||
- for _ in trange(3):
|
||
- print("Such fun")
|
||
- res = our_file.getvalue()
|
||
- assert res.count("Such fun\n") == 3
|
||
- assert "0/3" in res
|
||
- assert "3/3" in res
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_external_write():
|
||
- """Test external write mode"""
|
||
- with closing(StringIO()) as our_file:
|
||
- # Redirect stdout to tqdm.write()
|
||
- for _ in trange(3, file=our_file):
|
||
- del tqdm._lock # classmethod should be able to recreate lock
|
||
- with tqdm.external_write_mode(file=our_file):
|
||
- our_file.write("Such fun\n")
|
||
- res = our_file.getvalue()
|
||
- assert res.count("Such fun\n") == 3
|
||
- assert "0/3" in res
|
||
- assert "3/3" in res
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_unit_scale():
|
||
- """Test numeric `unit_scale`"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(9), unit_scale=9, file=our_file,
|
||
- miniters=1, mininterval=0):
|
||
- pass
|
||
- out = our_file.getvalue()
|
||
- assert '81/81' in out
|
||
+ def test_auto(self):
|
||
+ """Test auto fallback"""
|
||
+ from tqdm import autonotebook, auto
|
||
+ self.backendCheck(autonotebook)
|
||
+ self.backendCheck(auto)
|
||
|
||
+ def test_wrapattr(self):
|
||
+ """Test wrapping file-like objects"""
|
||
+ data = "a twenty-char string"
|
||
|
||
-def patch_lock(thread=True):
|
||
- """decorator replacing tqdm's lock with vanilla threading/multiprocessing"""
|
||
- try:
|
||
- if thread:
|
||
- from threading import RLock
|
||
- else:
|
||
- from multiprocessing import RLock
|
||
- lock = RLock()
|
||
- except (ImportError, OSError):
|
||
- raise SkipTest
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with closing(StringIO()) as writer:
|
||
+ with tqdm.wrapattr(
|
||
+ writer, "write", file=our_file, bytes=True) as wrap:
|
||
+ wrap.write(data)
|
||
+ res = writer.getvalue()
|
||
+ assert data == res
|
||
+ res = our_file.getvalue()
|
||
+ assert '%.1fB [' % len(data) in res
|
||
|
||
- def outer(func):
|
||
- """actual decorator"""
|
||
- @wraps(func)
|
||
- def inner(*args, **kwargs):
|
||
- """set & reset lock even if exceptions occur"""
|
||
- default_lock = tqdm.get_lock()
|
||
- try:
|
||
- tqdm.set_lock(lock)
|
||
- return func(*args, **kwargs)
|
||
- finally:
|
||
- tqdm.set_lock(default_lock)
|
||
- return inner
|
||
- return outer
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with closing(StringIO()) as writer:
|
||
+ with tqdm.wrapattr(
|
||
+ writer, "write", file=our_file, bytes=False) as wrap:
|
||
+ wrap.write(data)
|
||
+ res = our_file.getvalue()
|
||
+ assert '%dit [' % len(data) in res
|
||
|
||
+ def test_float_progress(self):
|
||
+ """Test float totals"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with trange(10, total=9.6, file=our_file) as t:
|
||
+ with catch_warnings(record=True) as w:
|
||
+ simplefilter("always", category=TqdmWarning)
|
||
+ for i in t:
|
||
+ if i < 9:
|
||
+ assert not w
|
||
+ assert w
|
||
+ assert "clamping frac" in str(w[-1].message)
|
||
+
|
||
+ def test_screen_shape(self):
|
||
+ """Test screen shape"""
|
||
+ # ncols
|
||
+ with closing(StringIO()) as our_file:
|
||
+ with trange(10, file=our_file, ncols=50) as t:
|
||
+ list(t)
|
||
+
|
||
+ res = our_file.getvalue()
|
||
+ assert all(len(i) == 50 for i in get_bar(res))
|
||
+
|
||
+ # no second/third bar, leave=False
|
||
+ with closing(StringIO()) as our_file:
|
||
+ kwargs = dict(file=our_file, ncols=50, nrows=2, miniters=0,
|
||
+ mininterval=0, leave=False)
|
||
+ with trange(10, desc="one", **kwargs) as t1:
|
||
+ with trange(10, desc="two", **kwargs) as t2:
|
||
+ with trange(10, desc="three", **kwargs) as t3:
|
||
+ list(t3)
|
||
+ list(t2)
|
||
+ list(t1)
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-@patch_lock(thread=False)
|
||
-def test_threading():
|
||
- """Test multiprocess/thread-realted features"""
|
||
- pass # TODO: test interleaved output #445
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_bool():
|
||
- """Test boolean cast"""
|
||
- def internal(our_file, disable):
|
||
- kwargs = dict(file=our_file, disable=disable)
|
||
- with trange(10, **kwargs) as t:
|
||
- assert t
|
||
- with trange(0, **kwargs) as t:
|
||
- assert not t
|
||
- with tqdm(total=10, **kwargs) as t:
|
||
- assert bool(t)
|
||
- with tqdm(total=0, **kwargs) as t:
|
||
- assert not bool(t)
|
||
- with tqdm([], **kwargs) as t:
|
||
- assert not t
|
||
- with tqdm([0], **kwargs) as t:
|
||
- assert t
|
||
- with tqdm((x for x in []), **kwargs) as t:
|
||
- assert t
|
||
- with tqdm((x for x in [1, 2, 3]), **kwargs) as t:
|
||
- assert t
|
||
- with tqdm(**kwargs) as t:
|
||
- try:
|
||
- print(bool(t))
|
||
- except TypeError:
|
||
- pass
|
||
- else:
|
||
- raise TypeError("Expected bool(tqdm()) to fail")
|
||
-
|
||
- # test with and without disable
|
||
- with closing(StringIO()) as our_file:
|
||
- internal(our_file, False)
|
||
- internal(our_file, True)
|
||
-
|
||
-
|
||
-def backendCheck(module):
|
||
- """Test tqdm-like module fallback"""
|
||
- tn = module.tqdm
|
||
- tr = module.trange
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- with tn(total=10, file=our_file) as t:
|
||
- assert len(t) == 10
|
||
- with tr(1337) as t:
|
||
- assert len(t) == 1337
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_auto():
|
||
- """Test auto fallback"""
|
||
- from tqdm import autonotebook, auto
|
||
- backendCheck(autonotebook)
|
||
- backendCheck(auto)
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_wrapattr():
|
||
- """Test wrapping file-like objects"""
|
||
- data = "a twenty-char string"
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- with closing(StringIO()) as writer:
|
||
- with tqdm.wrapattr(
|
||
- writer, "write", file=our_file, bytes=True) as wrap:
|
||
- wrap.write(data)
|
||
- res = writer.getvalue()
|
||
- assert data == res
|
||
- res = our_file.getvalue()
|
||
- assert '%.1fB [' % len(data) in res
|
||
-
|
||
- with closing(StringIO()) as our_file:
|
||
- with closing(StringIO()) as writer:
|
||
- with tqdm.wrapattr(
|
||
- writer, "write", file=our_file, bytes=False) as wrap:
|
||
- wrap.write(data)
|
||
- res = our_file.getvalue()
|
||
- assert '%dit [' % len(data) in res
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_float_progress():
|
||
- """Test float totals"""
|
||
- with closing(StringIO()) as our_file:
|
||
- with trange(10, total=9.6, file=our_file) as t:
|
||
- with catch_warnings(record=True) as w:
|
||
- simplefilter("always", category=TqdmWarning)
|
||
- for i in t:
|
||
- if i < 9:
|
||
- assert not w
|
||
- assert w
|
||
- assert "clamping frac" in str(w[-1].message)
|
||
-
|
||
-
|
||
-@with_setup(pretest, posttest)
|
||
-def test_screen_shape():
|
||
- """Test screen shape"""
|
||
- # ncols
|
||
- with closing(StringIO()) as our_file:
|
||
- with trange(10, file=our_file, ncols=50) as t:
|
||
- list(t)
|
||
-
|
||
- res = our_file.getvalue()
|
||
- assert all(len(i) == 50 for i in get_bar(res))
|
||
-
|
||
- # no second/third bar, leave=False
|
||
- with closing(StringIO()) as our_file:
|
||
- kwargs = dict(file=our_file, ncols=50, nrows=2, miniters=0,
|
||
- mininterval=0, leave=False)
|
||
- with trange(10, desc="one", **kwargs) as t1:
|
||
- with trange(10, desc="two", **kwargs) as t2:
|
||
- with trange(10, desc="three", **kwargs) as t3:
|
||
- list(t3)
|
||
- list(t2)
|
||
- list(t1)
|
||
-
|
||
- res = our_file.getvalue()
|
||
- assert "one" in res
|
||
- assert "two" not in res
|
||
- assert "three" not in res
|
||
- assert "\n\n" not in res
|
||
- assert "more hidden" in res
|
||
- # double-check ncols
|
||
- assert all(len(i) == 50 for i in get_bar(res)
|
||
- if i.strip() and "more hidden" not in i)
|
||
-
|
||
- # all bars, leave=True
|
||
- with closing(StringIO()) as our_file:
|
||
- kwargs = dict(file=our_file, ncols=50, nrows=2, miniters=0,
|
||
- mininterval=0)
|
||
- with trange(10, desc="one", **kwargs) as t1:
|
||
- with trange(10, desc="two", **kwargs) as t2:
|
||
- assert "two" not in our_file.getvalue()
|
||
- with trange(10, desc="three", **kwargs) as t3:
|
||
- assert "three" not in our_file.getvalue()
|
||
- list(t3)
|
||
- list(t2)
|
||
- list(t1)
|
||
-
|
||
- res = our_file.getvalue()
|
||
- assert "one" in res
|
||
- assert "two" in res
|
||
- assert "three" in res
|
||
- assert "\n\n" not in res
|
||
- assert "more hidden" in res
|
||
- # double-check ncols
|
||
- assert all(len(i) == 50 for i in get_bar(res)
|
||
- if i.strip() and "more hidden" not in i)
|
||
-
|
||
- # second bar becomes first, leave=False
|
||
- with closing(StringIO()) as our_file:
|
||
- kwargs = dict(file=our_file, ncols=50, nrows=2, miniters=0,
|
||
- mininterval=0, leave=False)
|
||
- t1 = tqdm(total=10, desc="one", **kwargs)
|
||
- with tqdm(total=10, desc="two", **kwargs) as t2:
|
||
- t1.update()
|
||
- t2.update()
|
||
- t1.close()
|
||
res = our_file.getvalue()
|
||
assert "one" in res
|
||
assert "two" not in res
|
||
+ assert "three" not in res
|
||
+ assert "\n\n" not in res
|
||
assert "more hidden" in res
|
||
- t2.update()
|
||
+ # double-check ncols
|
||
+ assert all(len(i) == 50 for i in get_bar(res)
|
||
+ if i.strip() and "more hidden" not in i)
|
||
|
||
- res = our_file.getvalue()
|
||
- assert "two" in res
|
||
+ # all bars, leave=True
|
||
+ with closing(StringIO()) as our_file:
|
||
+ kwargs = dict(file=our_file, ncols=50, nrows=2, miniters=0,
|
||
+ mininterval=0)
|
||
+ with trange(10, desc="one", **kwargs) as t1:
|
||
+ with trange(10, desc="two", **kwargs) as t2:
|
||
+ assert "two" not in our_file.getvalue()
|
||
+ with trange(10, desc="three", **kwargs) as t3:
|
||
+ assert "three" not in our_file.getvalue()
|
||
+ list(t3)
|
||
+ list(t2)
|
||
+ list(t1)
|
||
|
||
+ res = our_file.getvalue()
|
||
+ assert "one" in res
|
||
+ assert "two" in res
|
||
+ assert "three" in res
|
||
+ assert "\n\n" not in res
|
||
+ assert "more hidden" in res
|
||
+ # double-check ncols
|
||
+ assert all(len(i) == 50 for i in get_bar(res)
|
||
+ if i.strip() and "more hidden" not in i)
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_initial():
|
||
- """Test `initial`"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(9), initial=10, total=19, file=our_file,
|
||
- miniters=1, mininterval=0):
|
||
- pass
|
||
- out = our_file.getvalue()
|
||
- assert '10/19' in out
|
||
- assert '19/19' in out
|
||
+ # second bar becomes first, leave=False
|
||
+ with closing(StringIO()) as our_file:
|
||
+ kwargs = dict(file=our_file, ncols=50, nrows=2, miniters=0,
|
||
+ mininterval=0, leave=False)
|
||
+ t1 = tqdm(total=10, desc="one", **kwargs)
|
||
+ with tqdm(total=10, desc="two", **kwargs) as t2:
|
||
+ t1.update()
|
||
+ t2.update()
|
||
+ t1.close()
|
||
+ res = our_file.getvalue()
|
||
+ assert "one" in res
|
||
+ assert "two" not in res
|
||
+ assert "more hidden" in res
|
||
+ t2.update()
|
||
|
||
+ res = our_file.getvalue()
|
||
+ assert "two" in res
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_colour():
|
||
- """Test `colour`"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for _ in tqdm(_range(9), file=our_file, colour="#beefed"):
|
||
- pass
|
||
- out = our_file.getvalue()
|
||
- assert '\x1b[38;2;%d;%d;%dm' % (0xbe, 0xef, 0xed) in out
|
||
+ def test_initial(self):
|
||
+ """Test `initial`"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(9), initial=10, total=19, file=our_file,
|
||
+ miniters=1, mininterval=0):
|
||
+ pass
|
||
+ out = our_file.getvalue()
|
||
+ assert '10/19' in out
|
||
+ assert '19/19' in out
|
||
|
||
- with catch_warnings(record=True) as w:
|
||
- simplefilter("always", category=TqdmWarning)
|
||
- with tqdm(total=1, file=our_file, colour="charm") as t:
|
||
- assert w
|
||
- t.update()
|
||
- assert "Unknown colour" in str(w[-1].message)
|
||
+ def test_colour(self):
|
||
+ """Test `colour`"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for _ in tqdm(_range(9), file=our_file, colour="#beefed"):
|
||
+ pass
|
||
+ out = our_file.getvalue()
|
||
+ assert '\x1b[38;2;%d;%d;%dm' % (0xbe, 0xef, 0xed) in out
|
||
|
||
- with closing(StringIO()) as our_file2:
|
||
- for _ in tqdm(_range(9), file=our_file2, colour="blue"):
|
||
- pass
|
||
- out = our_file2.getvalue()
|
||
- assert '\x1b[34m' in out
|
||
+ with catch_warnings(record=True) as w:
|
||
+ simplefilter("always", category=TqdmWarning)
|
||
+ with tqdm(total=1, file=our_file, colour="charm") as t:
|
||
+ assert w
|
||
+ t.update()
|
||
+ assert "Unknown colour" in str(w[-1].message)
|
||
|
||
+ with closing(StringIO()) as our_file2:
|
||
+ for _ in tqdm(_range(9), file=our_file2, colour="blue"):
|
||
+ pass
|
||
+ out = our_file2.getvalue()
|
||
+ assert '\x1b[34m' in out
|
||
|
||
-@with_setup(pretest, posttest)
|
||
-def test_closed():
|
||
- """Test writing to closed file"""
|
||
- with closing(StringIO()) as our_file:
|
||
- for i in trange(9, file=our_file, miniters=1, mininterval=0):
|
||
- if i == 5:
|
||
- our_file.close()
|
||
+ def test_closed(self):
|
||
+ """Test writing to closed file"""
|
||
+ with closing(StringIO()) as our_file:
|
||
+ for i in trange(9, file=our_file, miniters=1, mininterval=0):
|
||
+ if i == 5:
|
||
+ our_file.close()
|
||
diff --git a/tqdm/tests/tests_version.py b/tqdm/tests/tests_version.py
|
||
index 226b9980..11f57220 100644
|
||
--- a/tqdm/tests/tests_version.py
|
||
+++ b/tqdm/tests/tests_version.py
|
||
@@ -1,12 +1,13 @@
|
||
import re
|
||
+import unittest
|
||
|
||
-
|
||
-def test_version():
|
||
- """Test version string"""
|
||
- from tqdm import __version__
|
||
- version_parts = re.split('[.-]', __version__)
|
||
- assert 3 <= len(version_parts) # must have at least Major.minor.patch
|
||
- try:
|
||
- map(int, version_parts[:3])
|
||
- except ValueError:
|
||
- raise TypeError('Version Major.minor.patch must be 3 integers')
|
||
+class TestTqdmVersion(unittest.TestCase):
|
||
+ def test_version(self):
|
||
+ """Test version string"""
|
||
+ from tqdm import __version__
|
||
+ version_parts = re.split('[.-]', __version__)
|
||
+ assert 3 <= len(version_parts) # must have at least Major.minor.patch
|
||
+ try:
|
||
+ map(int, version_parts[:3])
|
||
+ except ValueError:
|
||
+ raise TypeError('Version Major.minor.patch must be 3 integers')
|
||
|