forked from pool/python-hdf5storage
Accepting request 949874 from devel:languages:python:numeric
- Update 0.1.18 (performance improving release): - Pull Request #111 from Daniel Hrisca. Many repeated calls to the __getitem__ methods of objects were turned into single calls. - Further reducionts in __getitem__ calls in the spirit of PR #111. - Update to 0.1.17 (bugfix and deprecation workaround release): - Issue #109. Fixed the fix Issue #102 for 32-bit platforms (previous fix was segfaulting). - Moved to using pkg_resources.parse_version from setuptools with distutils.version classes as a fallback instead of just the later to prepare for the removal of distutils (PEP 632) and prevent warnings on Python versions where it is marked as deprecated. - Issue #110. Changed all uses of the tostring method on numpy types to using tobytes if available, with tostring as the fallback for old versions of numpy where it is not. - Add nose-to-pytest.patch which allows use of pytest instead of nose (not based on the upstream solution). OBS-URL: https://build.opensuse.org/request/show/949874 OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/python-hdf5storage?expand=0&rev=3
This commit is contained in:
commit
e3ad6bb52c
@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5047e7d8750050e2b1d60fff88ae2cdcbc08c1450ee1bf7bde41b6308a23043b
|
||||
size 124764
|
3
hdf5storage-0.1.18.zip
Normal file
3
hdf5storage-0.1.18.zip
Normal file
@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5bbf19aaa5a61096ed075b642f413638da0b7d465b02b04bfbe3ed8bfc120723
|
||||
size 126907
|
840
nose-to-pytest.patch
Normal file
840
nose-to-pytest.patch
Normal file
@ -0,0 +1,840 @@
|
||||
---
|
||||
README.rst | 2
|
||||
requirements_tests.txt | 5
|
||||
setup.py | 3
|
||||
tests/test_hdf5_filters.py | 219 ++++++++----------------
|
||||
tests/test_matlab_compatibility.py | 31 +--
|
||||
tests/test_string_utf16_conversion.py | 22 +-
|
||||
tests/test_write_readback.py | 302 ++++++++++++++++------------------
|
||||
7 files changed, 247 insertions(+), 337 deletions(-)
|
||||
|
||||
--- a/README.rst
|
||||
+++ b/README.rst
|
||||
@@ -56,7 +56,7 @@ Then to install the package, run the com
|
||||
Running Tests
|
||||
-------------
|
||||
|
||||
-For testing, the package nose (>= 1.0) is required as well as unittest2
|
||||
+For testing, the package pytest (>= 5.0) is required as well as unittest2
|
||||
on Python 2.6. There are some tests that require Matlab and scipy to be
|
||||
installed and be in the executable path. Not having them means that
|
||||
those tests cannot be run (they will be skipped) but all the other
|
||||
--- a/requirements_tests.txt
|
||||
+++ b/requirements_tests.txt
|
||||
@@ -1,3 +1,2 @@
|
||||
--r requirements.txt
|
||||
-unittest2 ; python_version == '2.6'
|
||||
-nose>=1.0
|
||||
+-r requirements.txt
|
||||
+pytest>=5.0
|
||||
--- a/setup.py
|
||||
+++ b/setup.py
|
||||
@@ -66,6 +66,5 @@ setup(name='hdf5storage',
|
||||
"Topic :: Database",
|
||||
"Topic :: Software Development :: Libraries :: Python Modules"
|
||||
],
|
||||
- test_suite='nose.collector',
|
||||
- tests_require='nose>=1.0'
|
||||
+ tests_require='pytest>=5.0'
|
||||
)
|
||||
--- a/tests/test_hdf5_filters.py
|
||||
+++ b/tests/test_hdf5_filters.py
|
||||
@@ -1,4 +1,4 @@
|
||||
-# Copyright (c) 2013-2016, Freja Nordsiek
|
||||
+# Copyright (c) 2013-2021, Freja Nordsiek
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
@@ -24,37 +24,39 @@
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
-import os
|
||||
import os.path
|
||||
import random
|
||||
+import tempfile
|
||||
|
||||
import h5py
|
||||
|
||||
-import hdf5storage
|
||||
+import pytest
|
||||
|
||||
-from nose.tools import raises
|
||||
+import hdf5storage
|
||||
|
||||
-from asserts import *
|
||||
-from make_randoms import *
|
||||
+from asserts import assert_equal
|
||||
+from make_randoms import random_numpy, random_numpy_shape, \
|
||||
+ max_array_axis_length, dtypes, random_name
|
||||
|
||||
random.seed()
|
||||
|
||||
|
||||
-filename = 'data.mat'
|
||||
+@pytest.mark.parametrize(
|
||||
+ 'compression,shuffle,fletcher32,gzip_level',
|
||||
+ [(compression, shuffle, fletcher32, level)
|
||||
+ for compression in ('gzip', 'lzf')
|
||||
+ for shuffle in (True, False)
|
||||
+ for fletcher32 in (True, False)
|
||||
+ for level in range(10)])
|
||||
+def test_read_filtered_data(compression, shuffle, fletcher32,
|
||||
+ gzip_level):
|
||||
+ # Make the filters dict.
|
||||
+ filts = {'compression': compression,
|
||||
+ 'shuffle': shuffle,
|
||||
+ 'fletcher32': fletcher32}
|
||||
+ if compression == 'gzip':
|
||||
+ filts['compression_opts'] = gzip_level
|
||||
|
||||
-
|
||||
-def check_read_filters(filters):
|
||||
- # Read out the filter arguments.
|
||||
- filts = {'compression': 'gzip',
|
||||
- 'shuffle': True,
|
||||
- 'fletcher32': True,
|
||||
- 'gzip_level': 7}
|
||||
- for k, v in filters.items():
|
||||
- filts[k] = v
|
||||
- if filts['compression'] == 'gzip':
|
||||
- filts['compression_opts'] = filts['gzip_level']
|
||||
- del filts['gzip_level']
|
||||
-
|
||||
# Make some random data.
|
||||
dims = random.randint(1, 4)
|
||||
data = random_numpy(shape=random_numpy_shape(dims,
|
||||
@@ -64,34 +66,28 @@ def check_read_filters(filters):
|
||||
# Make a random name.
|
||||
name = random_name()
|
||||
|
||||
- # Write the data to the proper file with the given name with the
|
||||
- # provided filters and read it backt. The file needs to be deleted
|
||||
- # before and after to keep junk from building up.
|
||||
- if os.path.exists(filename):
|
||||
- os.remove(filename)
|
||||
- try:
|
||||
+ # Write the data to the file with the given name with the provided
|
||||
+ # filters and read it back.
|
||||
+ with tempfile.TemporaryDirectory() as folder:
|
||||
+ filename = os.path.join(folder, 'data.h5')
|
||||
with h5py.File(filename, mode='w') as f:
|
||||
f.create_dataset(name, data=data, chunks=True, **filts)
|
||||
out = hdf5storage.read(path=name, filename=filename,
|
||||
matlab_compatible=False)
|
||||
- except:
|
||||
- raise
|
||||
- finally:
|
||||
- if os.path.exists(filename):
|
||||
- os.remove(filename)
|
||||
|
||||
# Compare
|
||||
assert_equal(out, data)
|
||||
|
||||
|
||||
-def check_write_filters(filters):
|
||||
- # Read out the filter arguments.
|
||||
- filts = {'compression': 'gzip',
|
||||
- 'shuffle': True,
|
||||
- 'fletcher32': True,
|
||||
- 'gzip_level': 7}
|
||||
- for k, v in filters.items():
|
||||
- filts[k] = v
|
||||
+@pytest.mark.parametrize(
|
||||
+ 'compression,shuffle,fletcher32,gzip_level',
|
||||
+ [(compression, shuffle, fletcher32, level)
|
||||
+ for compression in ('gzip', 'lzf')
|
||||
+ for shuffle in (True, False)
|
||||
+ for fletcher32 in (True, False)
|
||||
+ for level in range(10)])
|
||||
+def test_write_filtered_data(compression, shuffle, fletcher32,
|
||||
+ gzip_level):
|
||||
|
||||
# Make some random data. The dtype must be restricted so that it can
|
||||
# be read back reliably.
|
||||
@@ -105,54 +101,56 @@ def check_write_filters(filters):
|
||||
# Make a random name.
|
||||
name = random_name()
|
||||
|
||||
- # Write the data to the proper file with the given name with the
|
||||
- # provided filters and read it backt. The file needs to be deleted
|
||||
- # before and after to keep junk from building up.
|
||||
- if os.path.exists(filename):
|
||||
- os.remove(filename)
|
||||
- try:
|
||||
- hdf5storage.write(data, path=name, filename=filename, \
|
||||
- store_python_metadata=False, matlab_compatible=False, \
|
||||
- compress=True, compress_size_threshold=0, \
|
||||
- compression_algorithm=filts['compression'], \
|
||||
- gzip_compression_level=filts['gzip_level'], \
|
||||
- shuffle_filter=filts['shuffle'], \
|
||||
- compressed_fletcher32_filter=filts['fletcher32'])
|
||||
+ # Write the data to the file with the given name with the provided
|
||||
+ # filters and read it back.
|
||||
+ with tempfile.TemporaryDirectory() as folder:
|
||||
+ filename = os.path.join(folder, 'data.h5')
|
||||
+ hdf5storage.write(data, path=name, filename=filename,
|
||||
+ store_python_metadata=False,
|
||||
+ matlab_compatible=False,
|
||||
+ compress=True, compress_size_threshold=0,
|
||||
+ compression_algorithm=compression,
|
||||
+ gzip_compression_level=gzip_level,
|
||||
+ shuffle_filter=shuffle,
|
||||
+ compressed_fletcher32_filter=fletcher32)
|
||||
|
||||
with h5py.File(filename, mode='r') as f:
|
||||
d = f[name]
|
||||
- fletcher32 = d.fletcher32
|
||||
- shuffle = d.shuffle
|
||||
- compression = d.compression
|
||||
- gzip_level = d.compression_opts
|
||||
+ filts = {'fletcher32': d.fletcher32,
|
||||
+ 'shuffle': d.shuffle,
|
||||
+ 'compression': d.compression,
|
||||
+ 'gzip_level': d.compression_opts}
|
||||
out = d[...]
|
||||
- except:
|
||||
- raise
|
||||
- finally:
|
||||
- if os.path.exists(filename):
|
||||
- os.remove(filename)
|
||||
|
||||
# Check the filters
|
||||
assert fletcher32 == filts['fletcher32']
|
||||
assert shuffle == filts['shuffle']
|
||||
assert compression == filts['compression']
|
||||
- if filts['compression'] == 'gzip':
|
||||
+ if compression == 'gzip':
|
||||
assert gzip_level == filts['gzip_level']
|
||||
|
||||
# Compare
|
||||
assert_equal(out, data)
|
||||
|
||||
|
||||
-def check_uncompressed_write_filters(method,
|
||||
- uncompressed_fletcher32_filter,
|
||||
- filters):
|
||||
- # Read out the filter arguments.
|
||||
- filts = {'compression': 'gzip',
|
||||
- 'shuffle': True,
|
||||
- 'fletcher32': True,
|
||||
- 'gzip_level': 7}
|
||||
- for k, v in filters.items():
|
||||
- filts[k] = v
|
||||
+@pytest.mark.parametrize(
|
||||
+ 'method,uncompressed_fletcher32_filter,compression,shuffle,'
|
||||
+ 'fletcher32,gzip_level',
|
||||
+ [(method, uf, compression, shuffle, fletcher32, level)
|
||||
+ for method in ('compression_disabled', 'data_too_small')
|
||||
+ for uf in (True, False)
|
||||
+ for compression in ('gzip', 'lzf')
|
||||
+ for shuffle in (True, False)
|
||||
+ for fletcher32 in (True, False)
|
||||
+ for level in range(10)])
|
||||
+def test_uncompressed_write_filtered_data(
|
||||
+ method, uncompressed_fletcher32_filter, compression, shuffle,
|
||||
+ fletcher32, gzip_level):
|
||||
+ # Make the filters dict.
|
||||
+ filts = {'compression': compression,
|
||||
+ 'shuffle': shuffle,
|
||||
+ 'fletcher32': fletcher32,
|
||||
+ 'gzip_level': gzip_level}
|
||||
|
||||
# Make some random data. The dtype must be restricted so that it can
|
||||
# be read back reliably.
|
||||
@@ -175,12 +173,10 @@ def check_uncompressed_write_filters(met
|
||||
opts = {'compress': True,
|
||||
'compress_size_threshold': data.nbytes + 1}
|
||||
|
||||
- # Write the data to the proper file with the given name with the
|
||||
- # provided filters and read it backt. The file needs to be deleted
|
||||
- # before and after to keep junk from building up.
|
||||
- if os.path.exists(filename):
|
||||
- os.remove(filename)
|
||||
- try:
|
||||
+ # Write the data to the file with the given name with the provided
|
||||
+ # filters and read it back.
|
||||
+ with tempfile.TemporaryDirectory() as folder:
|
||||
+ filename = os.path.join(folder, 'data.h5')
|
||||
hdf5storage.write(data, path=name, filename=filename, \
|
||||
store_python_metadata=False, matlab_compatible=False, \
|
||||
compression_algorithm=filts['compression'], \
|
||||
@@ -198,74 +194,11 @@ def check_uncompressed_write_filters(met
|
||||
compression = d.compression
|
||||
gzip_level = d.compression_opts
|
||||
out = d[...]
|
||||
- except:
|
||||
- raise
|
||||
- finally:
|
||||
- if os.path.exists(filename):
|
||||
- os.remove(filename)
|
||||
|
||||
# Check the filters
|
||||
- assert compression == None
|
||||
- assert shuffle == False
|
||||
+ assert compression is None
|
||||
+ assert shuffle is False
|
||||
assert fletcher32 == uncompressed_fletcher32_filter
|
||||
|
||||
# Compare
|
||||
assert_equal(out, data)
|
||||
-
|
||||
-
|
||||
-def test_read_filtered_data():
|
||||
- for compression in ('gzip', 'lzf'):
|
||||
- for shuffle in (True, False):
|
||||
- for fletcher32 in (True, False):
|
||||
- if compression != 'gzip':
|
||||
- filters = {'compression': compression,
|
||||
- 'shuffle': shuffle,
|
||||
- 'fletcher32': fletcher32}
|
||||
- yield check_read_filters, filters
|
||||
- else:
|
||||
- for level in range(10):
|
||||
- filters = {'compression': compression,
|
||||
- 'shuffle': shuffle,
|
||||
- 'fletcher32': fletcher32,
|
||||
- 'gzip_level': level}
|
||||
- yield check_read_filters, filters
|
||||
-
|
||||
-
|
||||
-def test_write_filtered_data():
|
||||
- for compression in ('gzip', 'lzf'):
|
||||
- for shuffle in (True, False):
|
||||
- for fletcher32 in (True, False):
|
||||
- if compression != 'gzip':
|
||||
- filters = {'compression': compression,
|
||||
- 'shuffle': shuffle,
|
||||
- 'fletcher32': fletcher32}
|
||||
- yield check_read_filters, filters
|
||||
- else:
|
||||
- for level in range(10):
|
||||
- filters = {'compression': compression,
|
||||
- 'shuffle': shuffle,
|
||||
- 'fletcher32': fletcher32,
|
||||
- 'gzip_level': level}
|
||||
- yield check_write_filters, filters
|
||||
-
|
||||
-
|
||||
-def test_uncompressed_write_filtered_data():
|
||||
- for method in ('compression_disabled', 'data_too_small'):
|
||||
- for uncompressed_fletcher32_filter in (True, False):
|
||||
- for compression in ('gzip', 'lzf'):
|
||||
- for shuffle in (True, False):
|
||||
- for fletcher32 in (True, False):
|
||||
- if compression != 'gzip':
|
||||
- filters = {'compression': compression,
|
||||
- 'shuffle': shuffle,
|
||||
- 'fletcher32': fletcher32}
|
||||
- yield check_read_filters, filters
|
||||
- else:
|
||||
- for level in range(10):
|
||||
- filters = {'compression': compression,
|
||||
- 'shuffle': shuffle,
|
||||
- 'fletcher32': fletcher32,
|
||||
- 'gzip_level': level}
|
||||
- yield check_uncompressed_write_filters,\
|
||||
- method, uncompressed_fletcher32_filter,\
|
||||
- filters
|
||||
--- a/tests/test_matlab_compatibility.py
|
||||
+++ b/tests/test_matlab_compatibility.py
|
||||
@@ -28,7 +28,7 @@ import os
|
||||
import os.path
|
||||
import subprocess
|
||||
|
||||
-from nose.plugins.skip import SkipTest
|
||||
+import pytest
|
||||
|
||||
import hdf5storage
|
||||
|
||||
@@ -82,25 +82,16 @@ def teardown_module():
|
||||
if os.path.exists(name):
|
||||
os.remove(name)
|
||||
|
||||
-
|
||||
-def test_read_from_matlab():
|
||||
- if not ran_matlab_successful[0]:
|
||||
- raise SkipTest
|
||||
- for k in (set(types_v7.keys()) - set(['__version__', '__header__', \
|
||||
- '__globals__'])):
|
||||
- yield check_variable_from_matlab, k
|
||||
-
|
||||
-
|
||||
-def test_to_matlab_back():
|
||||
- if not ran_matlab_successful[0]:
|
||||
- raise SkipTest
|
||||
- for k in set(types_v7p3.keys()):
|
||||
- yield check_variable_to_matlab_back, k
|
||||
+@pytest.mark.skipif(not ran_matlab_successful[0], reason="Cannot run MATLAB")
|
||||
+@pytest.mark.parametrize("name",
|
||||
+ (types_v7p3.keys()))
|
||||
+def test_to_matlab_back(name):
|
||||
+ assert_equal_from_matlab(python_v7p3[name], types_v7[name])
|
||||
|
||||
|
||||
-def check_variable_from_matlab(name):
|
||||
+@pytest.mark.skipif(not ran_matlab_successful[0], reason="Cannot run MATLAB")
|
||||
+@pytest.mark.parametrize("name",
|
||||
+ (set(types_v7.keys()) -
|
||||
+ set(['__version__', '__header__', ])))
|
||||
+def test_read_from_matlab(name):
|
||||
assert_equal_from_matlab(types_v7p3[name], types_v7[name])
|
||||
-
|
||||
-
|
||||
-def check_variable_to_matlab_back(name):
|
||||
- assert_equal_from_matlab(python_v7p3[name], types_v7[name])
|
||||
--- a/tests/test_string_utf16_conversion.py
|
||||
+++ b/tests/test_string_utf16_conversion.py
|
||||
@@ -36,7 +36,7 @@ import h5py
|
||||
|
||||
import hdf5storage
|
||||
|
||||
-import nose.tools
|
||||
+import pytest
|
||||
|
||||
|
||||
# A test to make sure that the following are written as UTF-16
|
||||
@@ -46,7 +46,14 @@ import nose.tools
|
||||
# * str
|
||||
# * numpy.unicode_ scalars
|
||||
|
||||
-def check_conv_utf16(tp):
|
||||
+if sys.hexversion < 0x3000000:
|
||||
+ tps_tuple = (unicode, np.unicode_)
|
||||
+else:
|
||||
+ tps_tuple = (str, np.unicode_)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("tp", tps_tuple)
|
||||
+def test_conv_utf16(tp):
|
||||
name = '/a'
|
||||
data = tp('abcdefghijklmnopqrstuvwxyz')
|
||||
fld = None
|
||||
@@ -59,18 +66,9 @@ def check_conv_utf16(tp):
|
||||
store_python_metadata=False,
|
||||
convert_numpy_str_to_utf16=True)
|
||||
with h5py.File(filename, mode='r') as f:
|
||||
- nose.tools.assert_equal(f[name].dtype.type, np.uint16)
|
||||
+ assert f[name].dtype.type == np.uint16
|
||||
except:
|
||||
raise
|
||||
finally:
|
||||
if fld is not None:
|
||||
os.remove(fld[1])
|
||||
-
|
||||
-
|
||||
-def test_conv_utf16():
|
||||
- if sys.hexversion < 0x3000000:
|
||||
- tps = (unicode, np.unicode_)
|
||||
- else:
|
||||
- tps = (str, np.unicode_)
|
||||
- for tp in tps:
|
||||
- yield check_conv_utf16, tp
|
||||
--- a/tests/test_write_readback.py
|
||||
+++ b/tests/test_write_readback.py
|
||||
@@ -37,7 +37,8 @@ import numpy.random
|
||||
|
||||
import hdf5storage
|
||||
|
||||
-from nose.tools import raises
|
||||
+import pytest
|
||||
+from parameterized import parameterized
|
||||
|
||||
from asserts import *
|
||||
from make_randoms import *
|
||||
@@ -45,6 +46,16 @@ from make_randoms import *
|
||||
|
||||
random.seed()
|
||||
|
||||
+# Need a list of the supported numeric dtypes to test, excluding
|
||||
+# those not supported by MATLAB. 'S' and 'U' dtype chars have to
|
||||
+# be used for the bare byte and unicode string dtypes since the
|
||||
+# dtype strings (but not chars) are not the same in Python 2 and
|
||||
+# 3.
|
||||
+dtypes = ['bool', 'uint8', 'uint16', 'uint32', 'uint64',
|
||||
+ 'int8', 'int16', 'int32', 'int64',
|
||||
+ 'float32', 'float64', 'complex64', 'complex128',
|
||||
+ 'S', 'U']
|
||||
+
|
||||
|
||||
class TestPythonMatlabFormat(object):
|
||||
# Test for the ability to write python types to an HDF5 file that
|
||||
@@ -54,16 +65,6 @@ class TestPythonMatlabFormat(object):
|
||||
self.filename = 'data.mat'
|
||||
self.options = hdf5storage.Options()
|
||||
|
||||
- # Need a list of the supported numeric dtypes to test, excluding
|
||||
- # those not supported by MATLAB. 'S' and 'U' dtype chars have to
|
||||
- # be used for the bare byte and unicode string dtypes since the
|
||||
- # dtype strings (but not chars) are not the same in Python 2 and
|
||||
- # 3.
|
||||
- self.dtypes = ['bool', 'uint8', 'uint16', 'uint32', 'uint64',
|
||||
- 'int8', 'int16', 'int32', 'int64',
|
||||
- 'float32', 'float64', 'complex64', 'complex128',
|
||||
- 'S', 'U']
|
||||
-
|
||||
def write_readback(self, data, name, options, read_options=None):
|
||||
# Write the data to the proper file with the given name, read it
|
||||
# back, and return the result. The file needs to be deleted
|
||||
@@ -119,7 +120,7 @@ class TestPythonMatlabFormat(object):
|
||||
out = self.write_readback(data, random_name(),
|
||||
self.options)
|
||||
self.assert_equal(out, data)
|
||||
-
|
||||
+
|
||||
def check_numpy_structured_array(self, dimensions):
|
||||
# Makes a random structured ndarray of the given type, writes it
|
||||
# and reads it back, and then compares it.
|
||||
@@ -129,7 +130,7 @@ class TestPythonMatlabFormat(object):
|
||||
out = self.write_readback(data, random_name(),
|
||||
self.options)
|
||||
self.assert_equal(out, data)
|
||||
-
|
||||
+
|
||||
def check_numpy_structured_array_empty(self, dimensions):
|
||||
# Makes a random structured ndarray of the given type, writes it
|
||||
# and reads it back, and then compares it.
|
||||
@@ -366,15 +367,15 @@ class TestPythonMatlabFormat(object):
|
||||
self.options)
|
||||
self.assert_equal(out, data)
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_int_or_long_too_big(self):
|
||||
- if sys.hexversion >= 0x03000000:
|
||||
- data = 2**64 * random_int()
|
||||
- else:
|
||||
- data = long(2)**64 * long(random_int())
|
||||
- out = self.write_readback(data, random_name(),
|
||||
- self.options)
|
||||
- self.assert_equal(out, data)
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ if sys.hexversion >= 0x03000000:
|
||||
+ data = 2**64 * random_int()
|
||||
+ else:
|
||||
+ data = long(2)**64 * long(random_int())
|
||||
+ out = self.write_readback(data, random_name(),
|
||||
+ self.options)
|
||||
+ self.assert_equal(out, data)
|
||||
|
||||
def test_float(self):
|
||||
data = random_float()
|
||||
@@ -425,20 +426,20 @@ class TestPythonMatlabFormat(object):
|
||||
self.options)
|
||||
self.assert_equal(out, data)
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_str_ascii_encoded_utf8(self):
|
||||
- ltrs = string.ascii_letters + string.digits
|
||||
- data = 'a'
|
||||
- if sys.hexversion < 0x03000000:
|
||||
- data = unicode(data)
|
||||
- ltrs = unicode(ltrs)
|
||||
- while all([(c in ltrs) for c in data]):
|
||||
- data = random_str_some_unicode(random.randint(1, \
|
||||
- max_string_length))
|
||||
- data = data.encode('utf-8')
|
||||
- out = self.write_readback(data, random_name(),
|
||||
- self.options)
|
||||
- self.assert_equal(out, data)
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ ltrs = string.ascii_letters + string.digits
|
||||
+ data = 'a'
|
||||
+ if sys.hexversion < 0x03000000:
|
||||
+ data = unicode(data)
|
||||
+ ltrs = unicode(ltrs)
|
||||
+ while all([(c in ltrs) for c in data]):
|
||||
+ data = random_str_some_unicode(random.randint(1, \
|
||||
+ max_string_length))
|
||||
+ data = data.encode('utf-8')
|
||||
+ out = self.write_readback(data, random_name(),
|
||||
+ self.options)
|
||||
+ self.assert_equal(out, data)
|
||||
|
||||
def test_str_unicode(self):
|
||||
data = random_str_some_unicode(random.randint(1,
|
||||
@@ -479,51 +480,41 @@ class TestPythonMatlabFormat(object):
|
||||
self.options)
|
||||
self.assert_equal(out, data)
|
||||
|
||||
- def test_numpy_scalar(self):
|
||||
- for dt in self.dtypes:
|
||||
- yield self.check_numpy_scalar, dt
|
||||
-
|
||||
- def test_numpy_array_1d(self):
|
||||
- dts = copy.deepcopy(self.dtypes)
|
||||
- dts.append('object')
|
||||
- for dt in dts:
|
||||
- yield self.check_numpy_array, dt, 1
|
||||
-
|
||||
- def test_numpy_array_2d(self):
|
||||
- dts = copy.deepcopy(self.dtypes)
|
||||
- dts.append('object')
|
||||
- for dt in dts:
|
||||
- yield self.check_numpy_array, dt, 2
|
||||
-
|
||||
- def test_numpy_array_3d(self):
|
||||
- dts = copy.deepcopy(self.dtypes)
|
||||
- dts.append('object')
|
||||
- for dt in dts:
|
||||
- yield self.check_numpy_array, dt, 3
|
||||
-
|
||||
- def test_numpy_matrix(self):
|
||||
- dts = copy.deepcopy(self.dtypes)
|
||||
- dts.append('object')
|
||||
- for dt in dts:
|
||||
- yield self.check_numpy_matrix, dt
|
||||
-
|
||||
- def test_numpy_empty(self):
|
||||
- for dt in self.dtypes:
|
||||
- yield self.check_numpy_empty, dt
|
||||
-
|
||||
- def test_numpy_stringlike_empty(self):
|
||||
- dts = ['S', 'U']
|
||||
- for dt in dts:
|
||||
- for n in range(1,10):
|
||||
- yield self.check_numpy_stringlike_empty, dt, n
|
||||
-
|
||||
- def test_numpy_structured_array(self):
|
||||
- for i in range(1, 4):
|
||||
- yield self.check_numpy_structured_array, i
|
||||
-
|
||||
- def test_numpy_structured_array_empty(self):
|
||||
- for i in range(1, 4):
|
||||
- yield self.check_numpy_structured_array_empty, i
|
||||
+ @parameterized.expand((dtypes))
|
||||
+ def test_numpy_scalar(self, dt):
|
||||
+ self.check_numpy_scalar(dt)
|
||||
+
|
||||
+ @parameterized.expand((copy.deepcopy(dtypes) + ['object']))
|
||||
+ def test_numpy_array_1d(self, dt):
|
||||
+ self.check_numpy_array(dt, 1)
|
||||
+
|
||||
+ @parameterized.expand((copy.deepcopy(dtypes) + ['object']))
|
||||
+ def test_numpy_array_2d(self, dt):
|
||||
+ self.check_numpy_array(dt, 2)
|
||||
+
|
||||
+ @parameterized.expand((copy.deepcopy(dtypes) + ['object']))
|
||||
+ def test_numpy_array_3d(self, dt):
|
||||
+ self.check_numpy_array(dt, 3)
|
||||
+
|
||||
+ @parameterized.expand((copy.deepcopy(dtypes) + ['object']))
|
||||
+ def test_numpy_matrix(self, dt):
|
||||
+ self.check_numpy_matrix(dt)
|
||||
+
|
||||
+ @parameterized.expand((dtypes))
|
||||
+ def test_numpy_empty(self, dt):
|
||||
+ self.check_numpy_empty(dt)
|
||||
+
|
||||
+ @parameterized.expand(tuple(zip(('S', 'U'), range(1, 10))))
|
||||
+ def test_numpy_stringlike_empty(self, dt, n):
|
||||
+ self.check_numpy_stringlike_empty(dt, n)
|
||||
+
|
||||
+ @parameterized.expand([range(1, 4)])
|
||||
+ def test_numpy_structured_array(self, i):
|
||||
+ self.check_numpy_structured_array(i)
|
||||
+
|
||||
+ @parameterized.expand([range(1, 4)])
|
||||
+ def test_numpy_structured_array_empty(self, i):
|
||||
+ self.check_numpy_structured_array_empty(i)
|
||||
|
||||
def test_numpy_structured_array_unicode_fields(self):
|
||||
# Makes a random 1d structured ndarray with non-ascii characters
|
||||
@@ -537,27 +528,27 @@ class TestPythonMatlabFormat(object):
|
||||
self.options)
|
||||
self.assert_equal(out, data)
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_numpy_structured_array_field_null_character(self):
|
||||
- self.check_numpy_structured_array_field_special_char('\x00')
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ self.check_numpy_structured_array_field_special_char('\x00')
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_numpy_structured_array_field_forward_slash(self):
|
||||
- self.check_numpy_structured_array_field_special_char('/')
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ self.check_numpy_structured_array_field_special_char('/')
|
||||
|
||||
- def test_numpy_recarray(self):
|
||||
- for i in range(1, 4):
|
||||
- yield self.check_numpy_recarray, i
|
||||
-
|
||||
- def test_numpy_recarray_empty(self):
|
||||
- for i in range(1, 4):
|
||||
- yield self.check_numpy_recarray_empty, i
|
||||
+ @parameterized.expand([range(1, 4)])
|
||||
+ def test_numpy_recarray(self, i):
|
||||
+ self.check_numpy_recarray(i)
|
||||
+
|
||||
+ @parameterized.expand([range(1, 4)])
|
||||
+ def test_numpy_recarray_empty(self, i):
|
||||
+ self.check_numpy_recarray_empty(i)
|
||||
|
||||
def test_numpy_recarray_unicode_fields(self):
|
||||
# Makes a random 1d structured ndarray with non-ascii characters
|
||||
# in its fields, converts it to a recarray, writes it and reads
|
||||
# it back, and then compares it.
|
||||
- shape = random_numpy_shape(1, \
|
||||
+ shape = random_numpy_shape(1,
|
||||
max_structured_ndarray_axis_length)
|
||||
data = random_structured_numpy_array(shape,
|
||||
nonascii_fields=True)
|
||||
@@ -565,43 +556,42 @@ class TestPythonMatlabFormat(object):
|
||||
self.options)
|
||||
self.assert_equal(out, data)
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_numpy_recarray_field_null_character(self):
|
||||
- self.check_numpy_recarray_field_special_char('\x00')
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ self.check_numpy_recarray_field_special_char('\x00')
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_numpy_recarray_field_forward_slash(self):
|
||||
- self.check_numpy_recarray_field_special_char('/')
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ self.check_numpy_recarray_field_special_char('/')
|
||||
|
||||
- def test_numpy_chararray(self):
|
||||
- dims = range(1, 4)
|
||||
- for dim in dims:
|
||||
- yield self.check_numpy_chararray, dim
|
||||
-
|
||||
- def test_numpy_chararray_empty(self):
|
||||
- for n in range(1, 10):
|
||||
- yield self.check_numpy_chararray_empty, n
|
||||
-
|
||||
- def test_numpy_sized_dtype_nested_0(self):
|
||||
- for zero_shaped in (False, True):
|
||||
- yield self.check_numpy_sized_dtype_nested_0, zero_shaped
|
||||
-
|
||||
- def test_numpy_sized_dtype_nested_1(self):
|
||||
- for zero_shaped in (False, True):
|
||||
- yield self.check_numpy_sized_dtype_nested_1, zero_shaped
|
||||
-
|
||||
- def test_numpy_sized_dtype_nested_2(self):
|
||||
- for zero_shaped in (False, True):
|
||||
- yield self.check_numpy_sized_dtype_nested_2, zero_shaped
|
||||
-
|
||||
- def test_numpy_sized_dtype_nested_3(self):
|
||||
- for zero_shaped in (False, True):
|
||||
- yield self.check_numpy_sized_dtype_nested_3, zero_shaped
|
||||
-
|
||||
- def test_python_collection(self):
|
||||
- for tp in (list, tuple, set, frozenset, collections.deque):
|
||||
- yield self.check_python_collection, tp, 'same-dims'
|
||||
- yield self.check_python_collection, tp, 'diff-dims'
|
||||
+ @parameterized.expand([range(1, 4)])
|
||||
+ def test_numpy_chararray(self, dim):
|
||||
+ self.check_numpy_chararray(dim)
|
||||
+
|
||||
+ @parameterized.expand([range(1, 10)])
|
||||
+ def test_numpy_chararray_empty(self, n):
|
||||
+ self.check_numpy_chararray_empty(n)
|
||||
+
|
||||
+ @parameterized.expand([(False,), (True,)])
|
||||
+ def test_numpy_sized_dtype_nested_0(self, zero_shaped):
|
||||
+ self.check_numpy_sized_dtype_nested_0(zero_shaped)
|
||||
+
|
||||
+ @parameterized.expand([(False,), (True,)])
|
||||
+ def test_numpy_sized_dtype_nested_1(self, zero_shaped):
|
||||
+ self.check_numpy_sized_dtype_nested_1(zero_shaped)
|
||||
+
|
||||
+ @parameterized.expand([(False,), (True,)])
|
||||
+ def test_numpy_sized_dtype_nested_2(self, zero_shaped):
|
||||
+ self.check_numpy_sized_dtype_nested_2(zero_shaped)
|
||||
+
|
||||
+ @parameterized.expand([(False,), (True,)])
|
||||
+ def test_numpy_sized_dtype_nested_3(self, zero_shaped):
|
||||
+ self.check_numpy_sized_dtype_nested_3(zero_shaped)
|
||||
+
|
||||
+ @parameterized.expand([(list, tuple, set, frozenset, collections.deque)])
|
||||
+ def test_python_collection(self, tp):
|
||||
+ self.check_python_collection(tp, 'same-dims')
|
||||
+ self.check_python_collection(tp, 'diff-dims')
|
||||
|
||||
def test_dict(self):
|
||||
data = random_dict()
|
||||
@@ -609,42 +599,42 @@ class TestPythonMatlabFormat(object):
|
||||
self.options)
|
||||
self.assert_equal(out, data)
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_dict_bytes_key(self):
|
||||
- data = random_dict()
|
||||
- key = random_bytes(max_dict_key_length)
|
||||
- data[key] = random_int()
|
||||
- out = self.write_readback(data, random_name(),
|
||||
- self.options)
|
||||
- self.assert_equal(out, data)
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ data = random_dict()
|
||||
+ key = random_bytes(max_dict_key_length)
|
||||
+ data[key] = random_int()
|
||||
+ out = self.write_readback(data, random_name(),
|
||||
+ self.options)
|
||||
+ self.assert_equal(out, data)
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_dict_key_null_character(self):
|
||||
- data = random_dict()
|
||||
- if sys.hexversion >= 0x03000000:
|
||||
- ch = '\x00'
|
||||
- else:
|
||||
- ch = unicode('\x00')
|
||||
- key = ch.join([random_str_ascii(max_dict_key_length)
|
||||
- for i in range(2)])
|
||||
- data[key] = random_int()
|
||||
- out = self.write_readback(data, random_name(),
|
||||
- self.options)
|
||||
- self.assert_equal(out, data)
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ data = random_dict()
|
||||
+ if sys.hexversion >= 0x03000000:
|
||||
+ ch = '\x00'
|
||||
+ else:
|
||||
+ ch = unicode('\x00')
|
||||
+ key = ch.join([random_str_ascii(max_dict_key_length)
|
||||
+ for i in range(2)])
|
||||
+ data[key] = random_int()
|
||||
+ out = self.write_readback(data, random_name(),
|
||||
+ self.options)
|
||||
+ self.assert_equal(out, data)
|
||||
|
||||
- @raises(NotImplementedError)
|
||||
def test_dict_key_forward_slash(self):
|
||||
- data = random_dict()
|
||||
- if sys.hexversion >= 0x03000000:
|
||||
- ch = '/'
|
||||
- else:
|
||||
- ch = unicode('/')
|
||||
- key = ch.join([random_str_ascii(max_dict_key_length)
|
||||
- for i in range(2)])
|
||||
- data[key] = random_int()
|
||||
- out = self.write_readback(data, random_name(),
|
||||
- self.options)
|
||||
- self.assert_equal(out, data)
|
||||
+ with pytest.raises(NotImplementedError):
|
||||
+ data = random_dict()
|
||||
+ if sys.hexversion >= 0x03000000:
|
||||
+ ch = '/'
|
||||
+ else:
|
||||
+ ch = unicode('/')
|
||||
+ key = ch.join([random_str_ascii(max_dict_key_length)
|
||||
+ for i in range(2)])
|
||||
+ data[key] = random_int()
|
||||
+ out = self.write_readback(data, random_name(),
|
||||
+ self.options)
|
||||
+ self.assert_equal(out, data)
|
||||
|
||||
|
||||
class TestPythonFormat(TestPythonMatlabFormat):
|
||||
@@ -690,7 +680,7 @@ class TestNoneFormat(TestPythonMatlabFor
|
||||
matlab_compatible=False)
|
||||
|
||||
# Add in float16 to the set of types tested.
|
||||
- self.dtypes.append('float16')
|
||||
+ # self.dtypes.append('float16') it doesn't seem to be used
|
||||
|
||||
# Won't throw an exception unlike the parent.
|
||||
def test_str_ascii_encoded_utf8(self):
|
@ -1,3 +1,26 @@
|
||||
-------------------------------------------------------------------
|
||||
Sat Jan 29 08:31:32 UTC 2022 - Matej Cepl <mcepl@suse.com>
|
||||
|
||||
- Update 0.1.18 (performance improving release):
|
||||
- Pull Request #111 from Daniel Hrisca. Many repeated calls to
|
||||
the __getitem__ methods of objects were turned into single
|
||||
calls.
|
||||
- Further reducionts in __getitem__ calls in the spirit of PR
|
||||
#111.
|
||||
- Update to 0.1.17 (bugfix and deprecation workaround release):
|
||||
- Issue #109. Fixed the fix Issue #102 for 32-bit platforms
|
||||
(previous fix was segfaulting).
|
||||
- Moved to using pkg_resources.parse_version from setuptools
|
||||
with distutils.version classes as a fallback instead of just
|
||||
the later to prepare for the removal of distutils (PEP 632)
|
||||
and prevent warnings on Python versions where it is marked as
|
||||
deprecated.
|
||||
- Issue #110. Changed all uses of the tostring method on numpy
|
||||
types to using tobytes if available, with tostring as the
|
||||
fallback for old versions of numpy where it is not.
|
||||
- Add nose-to-pytest.patch which allows use of pytest instead of
|
||||
nose (not based on the upstream solution).
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Mon Mar 8 18:40:19 UTC 2021 - Ben Greiner <code@bnavigator.de>
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# spec file for package python-hdf5storage
|
||||
#
|
||||
# Copyright (c) 2021 SUSE LLC
|
||||
# Copyright (c) 2022 SUSE LLC
|
||||
#
|
||||
# All modifications and additions to the file contributed by third parties
|
||||
# remain the property of their copyright owners, unless otherwise agreed
|
||||
@ -20,13 +20,16 @@
|
||||
%define skip_python2 1
|
||||
%define skip_python36 1
|
||||
Name: python-hdf5storage
|
||||
Version: 0.1.16
|
||||
Version: 0.1.18
|
||||
Release: 0
|
||||
Summary: Utilities to read/write HDF5 files, including MATLAB v7.3 MAT files
|
||||
License: BSD-3-Clause
|
||||
Group: Development/Languages/Python
|
||||
URL: https://github.com/frejanordsiek/hdf5storage
|
||||
Source: https://files.pythonhosted.org/packages/source/h/hdf5storage/hdf5storage-%{version}.zip
|
||||
# PATCH-FIX-UPSTREAM nose-to-pytest.patch gh#frejanordsiek/hdf5storage#96 mcepl@suse.com
|
||||
# uses pytest instead of nose
|
||||
Patch0: nose-to-pytest.patch
|
||||
BuildRequires: %{python_module h5py >= 2.1}
|
||||
BuildRequires: %{python_module numpy}
|
||||
BuildRequires: %{python_module setuptools}
|
||||
@ -35,7 +38,9 @@ BuildRequires: python-rpm-macros
|
||||
BuildRequires: unzip
|
||||
# SECTION test requirements
|
||||
# next release will use pytest gh#frejanordsiek/hdf5storage#96
|
||||
BuildRequires: %{python_module nose}
|
||||
BuildRequires: %{python_module pytest >= 5.0}
|
||||
# I don't know how to do @pytest.mark.parametrize on class methods
|
||||
BuildRequires: %{python_module parameterized}
|
||||
BuildRequires: %{python_module scipy}
|
||||
# /SECTION
|
||||
Requires: python-h5py >= 2.1
|
||||
@ -66,10 +71,7 @@ sed -i 's/\r$//' COPYING.txt
|
||||
%python_expand %fdupes %{buildroot}%{$python_sitelib}
|
||||
|
||||
%check
|
||||
%{python_expand export PYTHONPATH=%{buildroot}%{$python_sitelib}
|
||||
export PYTHONDONTWRITEBYTECODE=1
|
||||
nosetests-%{$python_bin_suffix} -v
|
||||
}
|
||||
%pytest
|
||||
|
||||
%files %{python_files}
|
||||
%doc README.rst
|
||||
|
Loading…
Reference in New Issue
Block a user