forked from pool/python-hdf5storage
- Unpin numpy 2
* Add hdf5storage-pr134-numpy2.patch * gh#frejanordsiek/hdf5storage#134 (backported) - Make it noarch again OBS-URL: https://build.opensuse.org/package/show/devel:languages:python:numeric/python-hdf5storage?expand=0&rev=12
This commit is contained in:
commit
df11e96f83
23
.gitattributes
vendored
Normal file
23
.gitattributes
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
## Default LFS
|
||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bsp filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gem filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.jar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lzma filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.obscpio filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.oxt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pdf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.png filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rpm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tbz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tbz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ttf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.txz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.whl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
.osc
|
3
hdf5storage-0.1.19.tar.gz
Normal file
3
hdf5storage-0.1.19.tar.gz
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:7a1a6badf546e8942f4d22d598aee14021796bc28918519c9687a6abb0eeef86
|
||||||
|
size 100839
|
226
hdf5storage-pr134-numpy2.patch
Normal file
226
hdf5storage-pr134-numpy2.patch
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
From 9814bc28874a56757e16479186523b2b77d5c553 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Jesse R Codling <codling@umich.edu>
|
||||||
|
Date: Wed, 14 Aug 2024 12:34:47 -0400
|
||||||
|
Subject: [PATCH 2/3] Numpy 2.0: Remove all np.unicode_ for np.str_
|
||||||
|
|
||||||
|
---
|
||||||
|
doc/source/storage_format.rst | 6 ++--
|
||||||
|
pyproject.toml | 2 +-
|
||||||
|
hdf5storage/Marshallers.py | 20 +++++++------
|
||||||
|
hdf5storage/__init__.py | 6 ++--
|
||||||
|
hdf5storage/utilities.py | 40 ++++++++++++-------------
|
||||||
|
tests/asserts.py | 14 ++++-----
|
||||||
|
tests/make_randoms.py | 4 +--
|
||||||
|
tests/test_dict_like_storage_methods.py | 6 ++--
|
||||||
|
tests/test_str_conv_utils.py | 8 ++---
|
||||||
|
tests/test_string_utf16_conversion.py | 4 +--
|
||||||
|
tests/test_write_readback.py | 6 ++--
|
||||||
|
11 files changed, 59 insertions(+), 57 deletions(-)
|
||||||
|
|
||||||
|
Index: hdf5storage-0.1.19/tests/make_randoms.py
|
||||||
|
===================================================================
|
||||||
|
--- hdf5storage-0.1.19.orig/tests/make_randoms.py
|
||||||
|
+++ hdf5storage-0.1.19/tests/make_randoms.py
|
||||||
|
@@ -156,7 +156,7 @@ def random_numpy(shape, dtype, allow_nan
|
||||||
|
chars = random_str_some_unicode(length)
|
||||||
|
else:
|
||||||
|
chars = random_str_ascii(length)
|
||||||
|
- data[index] = np.unicode_(chars)
|
||||||
|
+ data[index] = np.str_(chars)
|
||||||
|
return data
|
||||||
|
elif dtype == 'object':
|
||||||
|
data = np.zeros(shape=shape, dtype='object')
|
||||||
|
Index: hdf5storage-0.1.19/tests/test_string_utf16_conversion.py
|
||||||
|
===================================================================
|
||||||
|
--- hdf5storage-0.1.19.orig/tests/test_string_utf16_conversion.py
|
||||||
|
+++ hdf5storage-0.1.19/tests/test_string_utf16_conversion.py
|
||||||
|
@@ -44,12 +44,12 @@ import pytest
|
||||||
|
# convert_numpy_str_to_utf16 option is set.
|
||||||
|
#
|
||||||
|
# * str
|
||||||
|
-# * numpy.unicode_ scalars
|
||||||
|
+# * numpy.str_ scalars
|
||||||
|
|
||||||
|
if sys.hexversion < 0x3000000:
|
||||||
|
- tps_tuple = (unicode, np.unicode_)
|
||||||
|
+ tps_tuple = (unicode, np.str_)
|
||||||
|
else:
|
||||||
|
- tps_tuple = (str, np.unicode_)
|
||||||
|
+ tps_tuple = (str, np.str_)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("tp", tps_tuple)
|
||||||
|
Index: hdf5storage-0.1.19/hdf5storage/Marshallers.py
|
||||||
|
===================================================================
|
||||||
|
--- hdf5storage-0.1.19.orig/hdf5storage/Marshallers.py
|
||||||
|
+++ hdf5storage-0.1.19/hdf5storage/Marshallers.py
|
||||||
|
@@ -480,7 +480,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
'MATLAB_int_decode',
|
||||||
|
'MATLAB_fields'])
|
||||||
|
# As np.str_ is the unicode type string in Python 3 and the bare
|
||||||
|
- # bytes string in Python 2, we have to use np.unicode_ which is
|
||||||
|
+ # bytes string in Python 2, we have to use np.str_ which is
|
||||||
|
# or points to the unicode one in both versions.
|
||||||
|
self.types = [np.ndarray, np.matrix,
|
||||||
|
np.chararray, np.core.records.recarray,
|
||||||
|
@@ -489,7 +489,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
np.int8, np.int16, np.int32, np.int64,
|
||||||
|
np.float32, np.float64,
|
||||||
|
np.complex64, np.complex128,
|
||||||
|
- np.bytes_, np.unicode_, np.object_]
|
||||||
|
+ np.bytes_, np.str_, np.object_]
|
||||||
|
self._numpy_types = list(self.types)
|
||||||
|
# Using Python 3 type strings.
|
||||||
|
self.python_type_strings = ['numpy.ndarray', 'numpy.matrix',
|
||||||
|
@@ -525,7 +525,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
np.complex64: 'single',
|
||||||
|
np.complex128: 'double',
|
||||||
|
np.bytes_: 'char',
|
||||||
|
- np.unicode_: 'char',
|
||||||
|
+ np.str_: 'char',
|
||||||
|
np.object_: 'cell'}
|
||||||
|
|
||||||
|
# Make a dict to look up the opposite direction (given a matlab
|
||||||
|
@@ -542,7 +542,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
'int64': np.int64,
|
||||||
|
'single': np.float32,
|
||||||
|
'double': np.float64,
|
||||||
|
- 'char': np.unicode_,
|
||||||
|
+ 'char': np.str_,
|
||||||
|
'cell': np.object_,
|
||||||
|
'canonical empty': np.float64,
|
||||||
|
'struct': np.object_}
|
||||||
|
@@ -601,18 +601,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
raise NotImplementedError( \
|
||||||
|
'Can''t write non-ASCII numpy.bytes_.')
|
||||||
|
|
||||||
|
- # As of 2013-12-13, h5py cannot write numpy.str_ (UTF-32
|
||||||
|
- # encoding) types (its numpy.unicode_ in Python 2, which is an
|
||||||
|
- # alias for it in Python 3). If the option is set to try to
|
||||||
|
- # convert them to UTF-16, then an attempt at the conversion is
|
||||||
|
- # made. If no conversion is to be done, the conversion throws an
|
||||||
|
- # exception (a UTF-32 character had no UTF-16 equivalent), or a
|
||||||
|
- # UTF-32 character gets turned into a UTF-16 doublet (the
|
||||||
|
- # increase in the number of columns will be by a factor more
|
||||||
|
- # than the length of the strings); then it will be simply
|
||||||
|
- # converted to uint32's byte for byte instead.
|
||||||
|
-
|
||||||
|
- if data.dtype.type == np.unicode_:
|
||||||
|
+ if data.dtype.type == np.str_:
|
||||||
|
new_data = None
|
||||||
|
if options.convert_numpy_str_to_utf16:
|
||||||
|
try:
|
||||||
|
@@ -620,7 +609,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
data_to_store)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
- if new_data is None or (type(data_to_store) == np.unicode_ \
|
||||||
|
+ if new_data is None or (type(data_to_store) == np.str_ \
|
||||||
|
and len(data_to_store) != len(new_data)) \
|
||||||
|
or (isinstance(data_to_store, np.ndarray) \
|
||||||
|
and new_data.shape[-1] != data_to_store.shape[-1] \
|
||||||
|
@@ -1049,7 +1038,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
str_attrs[attr_name] = value
|
||||||
|
elif isinstance(value, bytes):
|
||||||
|
str_attrs[attr_name] = value.decode()
|
||||||
|
- elif isinstance(value, np.unicode_):
|
||||||
|
+ elif isinstance(value, np.str_):
|
||||||
|
str_attrs[attr_name] = str(value)
|
||||||
|
elif isinstance(value, np.bytes_):
|
||||||
|
str_attrs[attr_name] = value.decode()
|
||||||
|
@@ -1313,7 +1302,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
elif underlying_type.startswith('str') \
|
||||||
|
or matlab_class == 'char':
|
||||||
|
if underlying_type == 'str':
|
||||||
|
- data = np.unicode_('')
|
||||||
|
+ data = np.str_('')
|
||||||
|
elif underlying_type.startswith('str'):
|
||||||
|
data = convert_to_numpy_str(data, \
|
||||||
|
length=int(underlying_type[3:])//32)
|
||||||
|
@@ -1344,7 +1333,7 @@ class NumpyScalarArrayMarshaller(TypeMar
|
||||||
|
data = data.flatten()[0]
|
||||||
|
elif underlying_type.startswith('str'):
|
||||||
|
if python_empty == 1:
|
||||||
|
- data = np.unicode_('')
|
||||||
|
+ data = np.str_('')
|
||||||
|
elif isinstance(data, np.ndarray):
|
||||||
|
data = data.flatten()[0]
|
||||||
|
else:
|
||||||
|
@@ -1511,7 +1500,7 @@ class PythonStringMarshaller(NumpyScalar
|
||||||
|
if (sys.hexversion >= 0x03000000 and isinstance(data, str)) \
|
||||||
|
or (sys.hexversion < 0x03000000 \
|
||||||
|
and isinstance(data, unicode)):
|
||||||
|
- cdata = np.unicode_(data)
|
||||||
|
+ cdata = np.str_(data)
|
||||||
|
else:
|
||||||
|
cdata = np.bytes_(data)
|
||||||
|
|
||||||
|
Index: hdf5storage-0.1.19/hdf5storage/utilities.py
|
||||||
|
===================================================================
|
||||||
|
--- hdf5storage-0.1.19.orig/hdf5storage/utilities.py
|
||||||
|
+++ hdf5storage-0.1.19/hdf5storage/utilities.py
|
||||||
|
@@ -408,7 +408,7 @@ def convert_to_str(data):
|
||||||
|
# assuming it is in UTF-8. Otherwise, data has to be returned as is.
|
||||||
|
|
||||||
|
if isinstance(data, (np.ndarray, np.uint8, np.uint16, np.uint32,
|
||||||
|
- np.bytes_, np.unicode_)):
|
||||||
|
+ np.bytes_, np.str_)):
|
||||||
|
if data.dtype.name == 'uint8':
|
||||||
|
return numpy_to_bytes(data.flatten()).decode('UTF-8')
|
||||||
|
elif data.dtype.name == 'uint16':
|
||||||
|
@@ -477,7 +477,7 @@ def convert_to_numpy_str(data, length=No
|
||||||
|
|
||||||
|
"""
|
||||||
|
# The method of conversion depends on its type.
|
||||||
|
- if isinstance(data, np.unicode_) or (isinstance(data, np.ndarray) \
|
||||||
|
+ if isinstance(data, np.str_) or (isinstance(data, np.ndarray) \
|
||||||
|
and data.dtype.char == 'U'):
|
||||||
|
# It is already an np.str_ or array of them, so nothing needs to
|
||||||
|
# be done.
|
||||||
|
@@ -486,16 +486,16 @@ def convert_to_numpy_str(data, length=No
|
||||||
|
or (sys.hexversion < 0x03000000 \
|
||||||
|
and isinstance(data, unicode)):
|
||||||
|
# Easily converted through constructor.
|
||||||
|
- return np.unicode_(data)
|
||||||
|
+ return np.str_(data)
|
||||||
|
elif isinstance(data, (bytes, bytearray, np.bytes_)):
|
||||||
|
# All of them can be decoded and then passed through the
|
||||||
|
# constructor.
|
||||||
|
- return np.unicode_(data.decode('UTF-8'))
|
||||||
|
+ return np.str_(data.decode('UTF-8'))
|
||||||
|
elif isinstance(data, (np.uint8, np.uint16)):
|
||||||
|
# They are single UTF-8 or UTF-16 scalars, and are easily
|
||||||
|
# converted to a UTF-8 string and then passed through the
|
||||||
|
# constructor.
|
||||||
|
- return np.unicode_(convert_to_str(data))
|
||||||
|
+ return np.str_(convert_to_str(data))
|
||||||
|
elif isinstance(data, np.uint32):
|
||||||
|
# It is just the uint32 version of the character, so it just
|
||||||
|
# needs to be have the dtype essentially changed by having its
|
||||||
|
@@ -507,7 +507,7 @@ def convert_to_numpy_str(data, length=No
|
||||||
|
new_data = np.zeros(shape=data.shape,
|
||||||
|
dtype='U' + str(data.dtype.itemsize))
|
||||||
|
for index, x in np.ndenumerate(data):
|
||||||
|
- new_data[index] = np.unicode_(x.decode('UTF-8'))
|
||||||
|
+ new_data[index] = np.str_(x.decode('UTF-8'))
|
||||||
|
return new_data
|
||||||
|
elif isinstance(data, np.ndarray) \
|
||||||
|
and data.dtype.name in ('uint8', 'uint16', 'uint32'):
|
||||||
|
@@ -559,7 +559,7 @@ def convert_to_numpy_str(data, length=No
|
||||||
|
dtype=new_data.dtype,
|
||||||
|
buffer=numpy_to_bytes(chunk))[()]
|
||||||
|
else:
|
||||||
|
- new_data[i] = np.unicode_(convert_to_str(chunk))
|
||||||
|
+ new_data[i] = np.str_(convert_to_str(chunk))
|
||||||
|
|
||||||
|
# Only thing is left is to reshape it.
|
||||||
|
return new_data.reshape(tuple(new_shape))
|
||||||
|
@@ -896,7 +896,7 @@ def get_attribute_string(target, name):
|
||||||
|
return value
|
||||||
|
elif isinstance(value, bytes):
|
||||||
|
return value.decode()
|
||||||
|
- elif isinstance(value, np.unicode_):
|
||||||
|
+ elif isinstance(value, np.str_):
|
||||||
|
return str(value)
|
||||||
|
elif isinstance(value, np.bytes_):
|
||||||
|
return value.decode()
|
840
nose-to-pytest.patch
Normal file
840
nose-to-pytest.patch
Normal file
@ -0,0 +1,840 @@
|
|||||||
|
---
|
||||||
|
README.rst | 2
|
||||||
|
requirements_tests.txt | 5
|
||||||
|
setup.py | 3
|
||||||
|
tests/test_hdf5_filters.py | 219 ++++++++----------------
|
||||||
|
tests/test_matlab_compatibility.py | 31 +--
|
||||||
|
tests/test_string_utf16_conversion.py | 22 +-
|
||||||
|
tests/test_write_readback.py | 302 ++++++++++++++++------------------
|
||||||
|
7 files changed, 247 insertions(+), 337 deletions(-)
|
||||||
|
|
||||||
|
--- a/README.rst
|
||||||
|
+++ b/README.rst
|
||||||
|
@@ -56,7 +56,7 @@ Then to install the package, run the com
|
||||||
|
Running Tests
|
||||||
|
-------------
|
||||||
|
|
||||||
|
-For testing, the package nose (>= 1.0) is required as well as unittest2
|
||||||
|
+For testing, the package pytest (>= 5.0) is required as well as unittest2
|
||||||
|
on Python 2.6. There are some tests that require Matlab and scipy to be
|
||||||
|
installed and be in the executable path. Not having them means that
|
||||||
|
those tests cannot be run (they will be skipped) but all the other
|
||||||
|
--- a/requirements_tests.txt
|
||||||
|
+++ b/requirements_tests.txt
|
||||||
|
@@ -1,3 +1,2 @@
|
||||||
|
--r requirements.txt
|
||||||
|
-unittest2 ; python_version == '2.6'
|
||||||
|
-nose>=1.0
|
||||||
|
+-r requirements.txt
|
||||||
|
+pytest>=5.0
|
||||||
|
--- a/setup.py
|
||||||
|
+++ b/setup.py
|
||||||
|
@@ -66,6 +66,5 @@ setup(name='hdf5storage',
|
||||||
|
"Topic :: Database",
|
||||||
|
"Topic :: Software Development :: Libraries :: Python Modules"
|
||||||
|
],
|
||||||
|
- test_suite='nose.collector',
|
||||||
|
- tests_require='nose>=1.0'
|
||||||
|
+ tests_require='pytest>=5.0'
|
||||||
|
)
|
||||||
|
--- a/tests/test_hdf5_filters.py
|
||||||
|
+++ b/tests/test_hdf5_filters.py
|
||||||
|
@@ -1,4 +1,4 @@
|
||||||
|
-# Copyright (c) 2013-2016, Freja Nordsiek
|
||||||
|
+# Copyright (c) 2013-2021, Freja Nordsiek
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
@@ -24,37 +24,39 @@
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
-import os
|
||||||
|
import os.path
|
||||||
|
import random
|
||||||
|
+import tempfile
|
||||||
|
|
||||||
|
import h5py
|
||||||
|
|
||||||
|
-import hdf5storage
|
||||||
|
+import pytest
|
||||||
|
|
||||||
|
-from nose.tools import raises
|
||||||
|
+import hdf5storage
|
||||||
|
|
||||||
|
-from asserts import *
|
||||||
|
-from make_randoms import *
|
||||||
|
+from asserts import assert_equal
|
||||||
|
+from make_randoms import random_numpy, random_numpy_shape, \
|
||||||
|
+ max_array_axis_length, dtypes, random_name
|
||||||
|
|
||||||
|
random.seed()
|
||||||
|
|
||||||
|
|
||||||
|
-filename = 'data.mat'
|
||||||
|
+@pytest.mark.parametrize(
|
||||||
|
+ 'compression,shuffle,fletcher32,gzip_level',
|
||||||
|
+ [(compression, shuffle, fletcher32, level)
|
||||||
|
+ for compression in ('gzip', 'lzf')
|
||||||
|
+ for shuffle in (True, False)
|
||||||
|
+ for fletcher32 in (True, False)
|
||||||
|
+ for level in range(10)])
|
||||||
|
+def test_read_filtered_data(compression, shuffle, fletcher32,
|
||||||
|
+ gzip_level):
|
||||||
|
+ # Make the filters dict.
|
||||||
|
+ filts = {'compression': compression,
|
||||||
|
+ 'shuffle': shuffle,
|
||||||
|
+ 'fletcher32': fletcher32}
|
||||||
|
+ if compression == 'gzip':
|
||||||
|
+ filts['compression_opts'] = gzip_level
|
||||||
|
|
||||||
|
-
|
||||||
|
-def check_read_filters(filters):
|
||||||
|
- # Read out the filter arguments.
|
||||||
|
- filts = {'compression': 'gzip',
|
||||||
|
- 'shuffle': True,
|
||||||
|
- 'fletcher32': True,
|
||||||
|
- 'gzip_level': 7}
|
||||||
|
- for k, v in filters.items():
|
||||||
|
- filts[k] = v
|
||||||
|
- if filts['compression'] == 'gzip':
|
||||||
|
- filts['compression_opts'] = filts['gzip_level']
|
||||||
|
- del filts['gzip_level']
|
||||||
|
-
|
||||||
|
# Make some random data.
|
||||||
|
dims = random.randint(1, 4)
|
||||||
|
data = random_numpy(shape=random_numpy_shape(dims,
|
||||||
|
@@ -64,34 +66,28 @@ def check_read_filters(filters):
|
||||||
|
# Make a random name.
|
||||||
|
name = random_name()
|
||||||
|
|
||||||
|
- # Write the data to the proper file with the given name with the
|
||||||
|
- # provided filters and read it backt. The file needs to be deleted
|
||||||
|
- # before and after to keep junk from building up.
|
||||||
|
- if os.path.exists(filename):
|
||||||
|
- os.remove(filename)
|
||||||
|
- try:
|
||||||
|
+ # Write the data to the file with the given name with the provided
|
||||||
|
+ # filters and read it back.
|
||||||
|
+ with tempfile.TemporaryDirectory() as folder:
|
||||||
|
+ filename = os.path.join(folder, 'data.h5')
|
||||||
|
with h5py.File(filename, mode='w') as f:
|
||||||
|
f.create_dataset(name, data=data, chunks=True, **filts)
|
||||||
|
out = hdf5storage.read(path=name, filename=filename,
|
||||||
|
matlab_compatible=False)
|
||||||
|
- except:
|
||||||
|
- raise
|
||||||
|
- finally:
|
||||||
|
- if os.path.exists(filename):
|
||||||
|
- os.remove(filename)
|
||||||
|
|
||||||
|
# Compare
|
||||||
|
assert_equal(out, data)
|
||||||
|
|
||||||
|
|
||||||
|
-def check_write_filters(filters):
|
||||||
|
- # Read out the filter arguments.
|
||||||
|
- filts = {'compression': 'gzip',
|
||||||
|
- 'shuffle': True,
|
||||||
|
- 'fletcher32': True,
|
||||||
|
- 'gzip_level': 7}
|
||||||
|
- for k, v in filters.items():
|
||||||
|
- filts[k] = v
|
||||||
|
+@pytest.mark.parametrize(
|
||||||
|
+ 'compression,shuffle,fletcher32,gzip_level',
|
||||||
|
+ [(compression, shuffle, fletcher32, level)
|
||||||
|
+ for compression in ('gzip', 'lzf')
|
||||||
|
+ for shuffle in (True, False)
|
||||||
|
+ for fletcher32 in (True, False)
|
||||||
|
+ for level in range(10)])
|
||||||
|
+def test_write_filtered_data(compression, shuffle, fletcher32,
|
||||||
|
+ gzip_level):
|
||||||
|
|
||||||
|
# Make some random data. The dtype must be restricted so that it can
|
||||||
|
# be read back reliably.
|
||||||
|
@@ -105,54 +101,56 @@ def check_write_filters(filters):
|
||||||
|
# Make a random name.
|
||||||
|
name = random_name()
|
||||||
|
|
||||||
|
- # Write the data to the proper file with the given name with the
|
||||||
|
- # provided filters and read it backt. The file needs to be deleted
|
||||||
|
- # before and after to keep junk from building up.
|
||||||
|
- if os.path.exists(filename):
|
||||||
|
- os.remove(filename)
|
||||||
|
- try:
|
||||||
|
- hdf5storage.write(data, path=name, filename=filename, \
|
||||||
|
- store_python_metadata=False, matlab_compatible=False, \
|
||||||
|
- compress=True, compress_size_threshold=0, \
|
||||||
|
- compression_algorithm=filts['compression'], \
|
||||||
|
- gzip_compression_level=filts['gzip_level'], \
|
||||||
|
- shuffle_filter=filts['shuffle'], \
|
||||||
|
- compressed_fletcher32_filter=filts['fletcher32'])
|
||||||
|
+ # Write the data to the file with the given name with the provided
|
||||||
|
+ # filters and read it back.
|
||||||
|
+ with tempfile.TemporaryDirectory() as folder:
|
||||||
|
+ filename = os.path.join(folder, 'data.h5')
|
||||||
|
+ hdf5storage.write(data, path=name, filename=filename,
|
||||||
|
+ store_python_metadata=False,
|
||||||
|
+ matlab_compatible=False,
|
||||||
|
+ compress=True, compress_size_threshold=0,
|
||||||
|
+ compression_algorithm=compression,
|
||||||
|
+ gzip_compression_level=gzip_level,
|
||||||
|
+ shuffle_filter=shuffle,
|
||||||
|
+ compressed_fletcher32_filter=fletcher32)
|
||||||
|
|
||||||
|
with h5py.File(filename, mode='r') as f:
|
||||||
|
d = f[name]
|
||||||
|
- fletcher32 = d.fletcher32
|
||||||
|
- shuffle = d.shuffle
|
||||||
|
- compression = d.compression
|
||||||
|
- gzip_level = d.compression_opts
|
||||||
|
+ filts = {'fletcher32': d.fletcher32,
|
||||||
|
+ 'shuffle': d.shuffle,
|
||||||
|
+ 'compression': d.compression,
|
||||||
|
+ 'gzip_level': d.compression_opts}
|
||||||
|
out = d[...]
|
||||||
|
- except:
|
||||||
|
- raise
|
||||||
|
- finally:
|
||||||
|
- if os.path.exists(filename):
|
||||||
|
- os.remove(filename)
|
||||||
|
|
||||||
|
# Check the filters
|
||||||
|
assert fletcher32 == filts['fletcher32']
|
||||||
|
assert shuffle == filts['shuffle']
|
||||||
|
assert compression == filts['compression']
|
||||||
|
- if filts['compression'] == 'gzip':
|
||||||
|
+ if compression == 'gzip':
|
||||||
|
assert gzip_level == filts['gzip_level']
|
||||||
|
|
||||||
|
# Compare
|
||||||
|
assert_equal(out, data)
|
||||||
|
|
||||||
|
|
||||||
|
-def check_uncompressed_write_filters(method,
|
||||||
|
- uncompressed_fletcher32_filter,
|
||||||
|
- filters):
|
||||||
|
- # Read out the filter arguments.
|
||||||
|
- filts = {'compression': 'gzip',
|
||||||
|
- 'shuffle': True,
|
||||||
|
- 'fletcher32': True,
|
||||||
|
- 'gzip_level': 7}
|
||||||
|
- for k, v in filters.items():
|
||||||
|
- filts[k] = v
|
||||||
|
+@pytest.mark.parametrize(
|
||||||
|
+ 'method,uncompressed_fletcher32_filter,compression,shuffle,'
|
||||||
|
+ 'fletcher32,gzip_level',
|
||||||
|
+ [(method, uf, compression, shuffle, fletcher32, level)
|
||||||
|
+ for method in ('compression_disabled', 'data_too_small')
|
||||||
|
+ for uf in (True, False)
|
||||||
|
+ for compression in ('gzip', 'lzf')
|
||||||
|
+ for shuffle in (True, False)
|
||||||
|
+ for fletcher32 in (True, False)
|
||||||
|
+ for level in range(10)])
|
||||||
|
+def test_uncompressed_write_filtered_data(
|
||||||
|
+ method, uncompressed_fletcher32_filter, compression, shuffle,
|
||||||
|
+ fletcher32, gzip_level):
|
||||||
|
+ # Make the filters dict.
|
||||||
|
+ filts = {'compression': compression,
|
||||||
|
+ 'shuffle': shuffle,
|
||||||
|
+ 'fletcher32': fletcher32,
|
||||||
|
+ 'gzip_level': gzip_level}
|
||||||
|
|
||||||
|
# Make some random data. The dtype must be restricted so that it can
|
||||||
|
# be read back reliably.
|
||||||
|
@@ -175,12 +173,10 @@ def check_uncompressed_write_filters(met
|
||||||
|
opts = {'compress': True,
|
||||||
|
'compress_size_threshold': data.nbytes + 1}
|
||||||
|
|
||||||
|
- # Write the data to the proper file with the given name with the
|
||||||
|
- # provided filters and read it backt. The file needs to be deleted
|
||||||
|
- # before and after to keep junk from building up.
|
||||||
|
- if os.path.exists(filename):
|
||||||
|
- os.remove(filename)
|
||||||
|
- try:
|
||||||
|
+ # Write the data to the file with the given name with the provided
|
||||||
|
+ # filters and read it back.
|
||||||
|
+ with tempfile.TemporaryDirectory() as folder:
|
||||||
|
+ filename = os.path.join(folder, 'data.h5')
|
||||||
|
hdf5storage.write(data, path=name, filename=filename, \
|
||||||
|
store_python_metadata=False, matlab_compatible=False, \
|
||||||
|
compression_algorithm=filts['compression'], \
|
||||||
|
@@ -198,74 +194,11 @@ def check_uncompressed_write_filters(met
|
||||||
|
compression = d.compression
|
||||||
|
gzip_level = d.compression_opts
|
||||||
|
out = d[...]
|
||||||
|
- except:
|
||||||
|
- raise
|
||||||
|
- finally:
|
||||||
|
- if os.path.exists(filename):
|
||||||
|
- os.remove(filename)
|
||||||
|
|
||||||
|
# Check the filters
|
||||||
|
- assert compression == None
|
||||||
|
- assert shuffle == False
|
||||||
|
+ assert compression is None
|
||||||
|
+ assert shuffle is False
|
||||||
|
assert fletcher32 == uncompressed_fletcher32_filter
|
||||||
|
|
||||||
|
# Compare
|
||||||
|
assert_equal(out, data)
|
||||||
|
-
|
||||||
|
-
|
||||||
|
-def test_read_filtered_data():
|
||||||
|
- for compression in ('gzip', 'lzf'):
|
||||||
|
- for shuffle in (True, False):
|
||||||
|
- for fletcher32 in (True, False):
|
||||||
|
- if compression != 'gzip':
|
||||||
|
- filters = {'compression': compression,
|
||||||
|
- 'shuffle': shuffle,
|
||||||
|
- 'fletcher32': fletcher32}
|
||||||
|
- yield check_read_filters, filters
|
||||||
|
- else:
|
||||||
|
- for level in range(10):
|
||||||
|
- filters = {'compression': compression,
|
||||||
|
- 'shuffle': shuffle,
|
||||||
|
- 'fletcher32': fletcher32,
|
||||||
|
- 'gzip_level': level}
|
||||||
|
- yield check_read_filters, filters
|
||||||
|
-
|
||||||
|
-
|
||||||
|
-def test_write_filtered_data():
|
||||||
|
- for compression in ('gzip', 'lzf'):
|
||||||
|
- for shuffle in (True, False):
|
||||||
|
- for fletcher32 in (True, False):
|
||||||
|
- if compression != 'gzip':
|
||||||
|
- filters = {'compression': compression,
|
||||||
|
- 'shuffle': shuffle,
|
||||||
|
- 'fletcher32': fletcher32}
|
||||||
|
- yield check_read_filters, filters
|
||||||
|
- else:
|
||||||
|
- for level in range(10):
|
||||||
|
- filters = {'compression': compression,
|
||||||
|
- 'shuffle': shuffle,
|
||||||
|
- 'fletcher32': fletcher32,
|
||||||
|
- 'gzip_level': level}
|
||||||
|
- yield check_write_filters, filters
|
||||||
|
-
|
||||||
|
-
|
||||||
|
-def test_uncompressed_write_filtered_data():
|
||||||
|
- for method in ('compression_disabled', 'data_too_small'):
|
||||||
|
- for uncompressed_fletcher32_filter in (True, False):
|
||||||
|
- for compression in ('gzip', 'lzf'):
|
||||||
|
- for shuffle in (True, False):
|
||||||
|
- for fletcher32 in (True, False):
|
||||||
|
- if compression != 'gzip':
|
||||||
|
- filters = {'compression': compression,
|
||||||
|
- 'shuffle': shuffle,
|
||||||
|
- 'fletcher32': fletcher32}
|
||||||
|
- yield check_read_filters, filters
|
||||||
|
- else:
|
||||||
|
- for level in range(10):
|
||||||
|
- filters = {'compression': compression,
|
||||||
|
- 'shuffle': shuffle,
|
||||||
|
- 'fletcher32': fletcher32,
|
||||||
|
- 'gzip_level': level}
|
||||||
|
- yield check_uncompressed_write_filters,\
|
||||||
|
- method, uncompressed_fletcher32_filter,\
|
||||||
|
- filters
|
||||||
|
--- a/tests/test_matlab_compatibility.py
|
||||||
|
+++ b/tests/test_matlab_compatibility.py
|
||||||
|
@@ -28,7 +28,7 @@ import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
-from nose.plugins.skip import SkipTest
|
||||||
|
+import pytest
|
||||||
|
|
||||||
|
import hdf5storage
|
||||||
|
|
||||||
|
@@ -82,25 +82,16 @@ def teardown_module():
|
||||||
|
if os.path.exists(name):
|
||||||
|
os.remove(name)
|
||||||
|
|
||||||
|
-
|
||||||
|
-def test_read_from_matlab():
|
||||||
|
- if not ran_matlab_successful[0]:
|
||||||
|
- raise SkipTest
|
||||||
|
- for k in (set(types_v7.keys()) - set(['__version__', '__header__', \
|
||||||
|
- '__globals__'])):
|
||||||
|
- yield check_variable_from_matlab, k
|
||||||
|
-
|
||||||
|
-
|
||||||
|
-def test_to_matlab_back():
|
||||||
|
- if not ran_matlab_successful[0]:
|
||||||
|
- raise SkipTest
|
||||||
|
- for k in set(types_v7p3.keys()):
|
||||||
|
- yield check_variable_to_matlab_back, k
|
||||||
|
+@pytest.mark.skipif(not ran_matlab_successful[0], reason="Cannot run MATLAB")
|
||||||
|
+@pytest.mark.parametrize("name",
|
||||||
|
+ (types_v7p3.keys()))
|
||||||
|
+def test_to_matlab_back(name):
|
||||||
|
+ assert_equal_from_matlab(python_v7p3[name], types_v7[name])
|
||||||
|
|
||||||
|
|
||||||
|
-def check_variable_from_matlab(name):
|
||||||
|
+@pytest.mark.skipif(not ran_matlab_successful[0], reason="Cannot run MATLAB")
|
||||||
|
+@pytest.mark.parametrize("name",
|
||||||
|
+ (set(types_v7.keys()) -
|
||||||
|
+ set(['__version__', '__header__', ])))
|
||||||
|
+def test_read_from_matlab(name):
|
||||||
|
assert_equal_from_matlab(types_v7p3[name], types_v7[name])
|
||||||
|
-
|
||||||
|
-
|
||||||
|
-def check_variable_to_matlab_back(name):
|
||||||
|
- assert_equal_from_matlab(python_v7p3[name], types_v7[name])
|
||||||
|
--- a/tests/test_string_utf16_conversion.py
|
||||||
|
+++ b/tests/test_string_utf16_conversion.py
|
||||||
|
@@ -36,7 +36,7 @@ import h5py
|
||||||
|
|
||||||
|
import hdf5storage
|
||||||
|
|
||||||
|
-import nose.tools
|
||||||
|
+import pytest
|
||||||
|
|
||||||
|
|
||||||
|
# A test to make sure that the following are written as UTF-16
|
||||||
|
@@ -46,7 +46,14 @@ import nose.tools
|
||||||
|
# * str
|
||||||
|
# * numpy.unicode_ scalars
|
||||||
|
|
||||||
|
-def check_conv_utf16(tp):
|
||||||
|
+if sys.hexversion < 0x3000000:
|
||||||
|
+ tps_tuple = (unicode, np.unicode_)
|
||||||
|
+else:
|
||||||
|
+ tps_tuple = (str, np.unicode_)
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+@pytest.mark.parametrize("tp", tps_tuple)
|
||||||
|
+def test_conv_utf16(tp):
|
||||||
|
name = '/a'
|
||||||
|
data = tp('abcdefghijklmnopqrstuvwxyz')
|
||||||
|
fld = None
|
||||||
|
@@ -59,18 +66,9 @@ def check_conv_utf16(tp):
|
||||||
|
store_python_metadata=False,
|
||||||
|
convert_numpy_str_to_utf16=True)
|
||||||
|
with h5py.File(filename, mode='r') as f:
|
||||||
|
- nose.tools.assert_equal(f[name].dtype.type, np.uint16)
|
||||||
|
+ assert f[name].dtype.type == np.uint16
|
||||||
|
except:
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
if fld is not None:
|
||||||
|
os.remove(fld[1])
|
||||||
|
-
|
||||||
|
-
|
||||||
|
-def test_conv_utf16():
|
||||||
|
- if sys.hexversion < 0x3000000:
|
||||||
|
- tps = (unicode, np.unicode_)
|
||||||
|
- else:
|
||||||
|
- tps = (str, np.unicode_)
|
||||||
|
- for tp in tps:
|
||||||
|
- yield check_conv_utf16, tp
|
||||||
|
--- a/tests/test_write_readback.py
|
||||||
|
+++ b/tests/test_write_readback.py
|
||||||
|
@@ -37,7 +37,8 @@ import numpy.random
|
||||||
|
|
||||||
|
import hdf5storage
|
||||||
|
|
||||||
|
-from nose.tools import raises
|
||||||
|
+import pytest
|
||||||
|
+from parameterized import parameterized
|
||||||
|
|
||||||
|
from asserts import *
|
||||||
|
from make_randoms import *
|
||||||
|
@@ -45,6 +46,16 @@ from make_randoms import *
|
||||||
|
|
||||||
|
random.seed()
|
||||||
|
|
||||||
|
+# Need a list of the supported numeric dtypes to test, excluding
|
||||||
|
+# those not supported by MATLAB. 'S' and 'U' dtype chars have to
|
||||||
|
+# be used for the bare byte and unicode string dtypes since the
|
||||||
|
+# dtype strings (but not chars) are not the same in Python 2 and
|
||||||
|
+# 3.
|
||||||
|
+dtypes = ['bool', 'uint8', 'uint16', 'uint32', 'uint64',
|
||||||
|
+ 'int8', 'int16', 'int32', 'int64',
|
||||||
|
+ 'float32', 'float64', 'complex64', 'complex128',
|
||||||
|
+ 'S', 'U']
|
||||||
|
+
|
||||||
|
|
||||||
|
class TestPythonMatlabFormat(object):
|
||||||
|
# Test for the ability to write python types to an HDF5 file that
|
||||||
|
@@ -54,16 +65,6 @@ class TestPythonMatlabFormat(object):
|
||||||
|
self.filename = 'data.mat'
|
||||||
|
self.options = hdf5storage.Options()
|
||||||
|
|
||||||
|
- # Need a list of the supported numeric dtypes to test, excluding
|
||||||
|
- # those not supported by MATLAB. 'S' and 'U' dtype chars have to
|
||||||
|
- # be used for the bare byte and unicode string dtypes since the
|
||||||
|
- # dtype strings (but not chars) are not the same in Python 2 and
|
||||||
|
- # 3.
|
||||||
|
- self.dtypes = ['bool', 'uint8', 'uint16', 'uint32', 'uint64',
|
||||||
|
- 'int8', 'int16', 'int32', 'int64',
|
||||||
|
- 'float32', 'float64', 'complex64', 'complex128',
|
||||||
|
- 'S', 'U']
|
||||||
|
-
|
||||||
|
def write_readback(self, data, name, options, read_options=None):
|
||||||
|
# Write the data to the proper file with the given name, read it
|
||||||
|
# back, and return the result. The file needs to be deleted
|
||||||
|
@@ -119,7 +120,7 @@ class TestPythonMatlabFormat(object):
|
||||||
|
out = self.write_readback(data, random_name(),
|
||||||
|
self.options)
|
||||||
|
self.assert_equal(out, data)
|
||||||
|
-
|
||||||
|
+
|
||||||
|
def check_numpy_structured_array(self, dimensions):
|
||||||
|
# Makes a random structured ndarray of the given type, writes it
|
||||||
|
# and reads it back, and then compares it.
|
||||||
|
@@ -129,7 +130,7 @@ class TestPythonMatlabFormat(object):
|
||||||
|
out = self.write_readback(data, random_name(),
|
||||||
|
self.options)
|
||||||
|
self.assert_equal(out, data)
|
||||||
|
-
|
||||||
|
+
|
||||||
|
def check_numpy_structured_array_empty(self, dimensions):
|
||||||
|
# Makes a random structured ndarray of the given type, writes it
|
||||||
|
# and reads it back, and then compares it.
|
||||||
|
@@ -366,15 +367,15 @@ class TestPythonMatlabFormat(object):
|
||||||
|
self.options)
|
||||||
|
self.assert_equal(out, data)
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_int_or_long_too_big(self):
|
||||||
|
- if sys.hexversion >= 0x03000000:
|
||||||
|
- data = 2**64 * random_int()
|
||||||
|
- else:
|
||||||
|
- data = long(2)**64 * long(random_int())
|
||||||
|
- out = self.write_readback(data, random_name(),
|
||||||
|
- self.options)
|
||||||
|
- self.assert_equal(out, data)
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ if sys.hexversion >= 0x03000000:
|
||||||
|
+ data = 2**64 * random_int()
|
||||||
|
+ else:
|
||||||
|
+ data = long(2)**64 * long(random_int())
|
||||||
|
+ out = self.write_readback(data, random_name(),
|
||||||
|
+ self.options)
|
||||||
|
+ self.assert_equal(out, data)
|
||||||
|
|
||||||
|
def test_float(self):
|
||||||
|
data = random_float()
|
||||||
|
@@ -425,20 +426,20 @@ class TestPythonMatlabFormat(object):
|
||||||
|
self.options)
|
||||||
|
self.assert_equal(out, data)
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_str_ascii_encoded_utf8(self):
|
||||||
|
- ltrs = string.ascii_letters + string.digits
|
||||||
|
- data = 'a'
|
||||||
|
- if sys.hexversion < 0x03000000:
|
||||||
|
- data = unicode(data)
|
||||||
|
- ltrs = unicode(ltrs)
|
||||||
|
- while all([(c in ltrs) for c in data]):
|
||||||
|
- data = random_str_some_unicode(random.randint(1, \
|
||||||
|
- max_string_length))
|
||||||
|
- data = data.encode('utf-8')
|
||||||
|
- out = self.write_readback(data, random_name(),
|
||||||
|
- self.options)
|
||||||
|
- self.assert_equal(out, data)
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ ltrs = string.ascii_letters + string.digits
|
||||||
|
+ data = 'a'
|
||||||
|
+ if sys.hexversion < 0x03000000:
|
||||||
|
+ data = unicode(data)
|
||||||
|
+ ltrs = unicode(ltrs)
|
||||||
|
+ while all([(c in ltrs) for c in data]):
|
||||||
|
+ data = random_str_some_unicode(random.randint(1, \
|
||||||
|
+ max_string_length))
|
||||||
|
+ data = data.encode('utf-8')
|
||||||
|
+ out = self.write_readback(data, random_name(),
|
||||||
|
+ self.options)
|
||||||
|
+ self.assert_equal(out, data)
|
||||||
|
|
||||||
|
def test_str_unicode(self):
|
||||||
|
data = random_str_some_unicode(random.randint(1,
|
||||||
|
@@ -479,51 +480,41 @@ class TestPythonMatlabFormat(object):
|
||||||
|
self.options)
|
||||||
|
self.assert_equal(out, data)
|
||||||
|
|
||||||
|
- def test_numpy_scalar(self):
|
||||||
|
- for dt in self.dtypes:
|
||||||
|
- yield self.check_numpy_scalar, dt
|
||||||
|
-
|
||||||
|
- def test_numpy_array_1d(self):
|
||||||
|
- dts = copy.deepcopy(self.dtypes)
|
||||||
|
- dts.append('object')
|
||||||
|
- for dt in dts:
|
||||||
|
- yield self.check_numpy_array, dt, 1
|
||||||
|
-
|
||||||
|
- def test_numpy_array_2d(self):
|
||||||
|
- dts = copy.deepcopy(self.dtypes)
|
||||||
|
- dts.append('object')
|
||||||
|
- for dt in dts:
|
||||||
|
- yield self.check_numpy_array, dt, 2
|
||||||
|
-
|
||||||
|
- def test_numpy_array_3d(self):
|
||||||
|
- dts = copy.deepcopy(self.dtypes)
|
||||||
|
- dts.append('object')
|
||||||
|
- for dt in dts:
|
||||||
|
- yield self.check_numpy_array, dt, 3
|
||||||
|
-
|
||||||
|
- def test_numpy_matrix(self):
|
||||||
|
- dts = copy.deepcopy(self.dtypes)
|
||||||
|
- dts.append('object')
|
||||||
|
- for dt in dts:
|
||||||
|
- yield self.check_numpy_matrix, dt
|
||||||
|
-
|
||||||
|
- def test_numpy_empty(self):
|
||||||
|
- for dt in self.dtypes:
|
||||||
|
- yield self.check_numpy_empty, dt
|
||||||
|
-
|
||||||
|
- def test_numpy_stringlike_empty(self):
|
||||||
|
- dts = ['S', 'U']
|
||||||
|
- for dt in dts:
|
||||||
|
- for n in range(1,10):
|
||||||
|
- yield self.check_numpy_stringlike_empty, dt, n
|
||||||
|
-
|
||||||
|
- def test_numpy_structured_array(self):
|
||||||
|
- for i in range(1, 4):
|
||||||
|
- yield self.check_numpy_structured_array, i
|
||||||
|
-
|
||||||
|
- def test_numpy_structured_array_empty(self):
|
||||||
|
- for i in range(1, 4):
|
||||||
|
- yield self.check_numpy_structured_array_empty, i
|
||||||
|
+ @parameterized.expand((dtypes))
|
||||||
|
+ def test_numpy_scalar(self, dt):
|
||||||
|
+ self.check_numpy_scalar(dt)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand((copy.deepcopy(dtypes) + ['object']))
|
||||||
|
+ def test_numpy_array_1d(self, dt):
|
||||||
|
+ self.check_numpy_array(dt, 1)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand((copy.deepcopy(dtypes) + ['object']))
|
||||||
|
+ def test_numpy_array_2d(self, dt):
|
||||||
|
+ self.check_numpy_array(dt, 2)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand((copy.deepcopy(dtypes) + ['object']))
|
||||||
|
+ def test_numpy_array_3d(self, dt):
|
||||||
|
+ self.check_numpy_array(dt, 3)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand((copy.deepcopy(dtypes) + ['object']))
|
||||||
|
+ def test_numpy_matrix(self, dt):
|
||||||
|
+ self.check_numpy_matrix(dt)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand((dtypes))
|
||||||
|
+ def test_numpy_empty(self, dt):
|
||||||
|
+ self.check_numpy_empty(dt)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand(tuple(zip(('S', 'U'), range(1, 10))))
|
||||||
|
+ def test_numpy_stringlike_empty(self, dt, n):
|
||||||
|
+ self.check_numpy_stringlike_empty(dt, n)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([range(1, 4)])
|
||||||
|
+ def test_numpy_structured_array(self, i):
|
||||||
|
+ self.check_numpy_structured_array(i)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([range(1, 4)])
|
||||||
|
+ def test_numpy_structured_array_empty(self, i):
|
||||||
|
+ self.check_numpy_structured_array_empty(i)
|
||||||
|
|
||||||
|
def test_numpy_structured_array_unicode_fields(self):
|
||||||
|
# Makes a random 1d structured ndarray with non-ascii characters
|
||||||
|
@@ -537,27 +528,27 @@ class TestPythonMatlabFormat(object):
|
||||||
|
self.options)
|
||||||
|
self.assert_equal(out, data)
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_numpy_structured_array_field_null_character(self):
|
||||||
|
- self.check_numpy_structured_array_field_special_char('\x00')
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ self.check_numpy_structured_array_field_special_char('\x00')
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_numpy_structured_array_field_forward_slash(self):
|
||||||
|
- self.check_numpy_structured_array_field_special_char('/')
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ self.check_numpy_structured_array_field_special_char('/')
|
||||||
|
|
||||||
|
- def test_numpy_recarray(self):
|
||||||
|
- for i in range(1, 4):
|
||||||
|
- yield self.check_numpy_recarray, i
|
||||||
|
-
|
||||||
|
- def test_numpy_recarray_empty(self):
|
||||||
|
- for i in range(1, 4):
|
||||||
|
- yield self.check_numpy_recarray_empty, i
|
||||||
|
+ @parameterized.expand([range(1, 4)])
|
||||||
|
+ def test_numpy_recarray(self, i):
|
||||||
|
+ self.check_numpy_recarray(i)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([range(1, 4)])
|
||||||
|
+ def test_numpy_recarray_empty(self, i):
|
||||||
|
+ self.check_numpy_recarray_empty(i)
|
||||||
|
|
||||||
|
def test_numpy_recarray_unicode_fields(self):
|
||||||
|
# Makes a random 1d structured ndarray with non-ascii characters
|
||||||
|
# in its fields, converts it to a recarray, writes it and reads
|
||||||
|
# it back, and then compares it.
|
||||||
|
- shape = random_numpy_shape(1, \
|
||||||
|
+ shape = random_numpy_shape(1,
|
||||||
|
max_structured_ndarray_axis_length)
|
||||||
|
data = random_structured_numpy_array(shape,
|
||||||
|
nonascii_fields=True)
|
||||||
|
@@ -565,43 +556,42 @@ class TestPythonMatlabFormat(object):
|
||||||
|
self.options)
|
||||||
|
self.assert_equal(out, data)
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_numpy_recarray_field_null_character(self):
|
||||||
|
- self.check_numpy_recarray_field_special_char('\x00')
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ self.check_numpy_recarray_field_special_char('\x00')
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_numpy_recarray_field_forward_slash(self):
|
||||||
|
- self.check_numpy_recarray_field_special_char('/')
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ self.check_numpy_recarray_field_special_char('/')
|
||||||
|
|
||||||
|
- def test_numpy_chararray(self):
|
||||||
|
- dims = range(1, 4)
|
||||||
|
- for dim in dims:
|
||||||
|
- yield self.check_numpy_chararray, dim
|
||||||
|
-
|
||||||
|
- def test_numpy_chararray_empty(self):
|
||||||
|
- for n in range(1, 10):
|
||||||
|
- yield self.check_numpy_chararray_empty, n
|
||||||
|
-
|
||||||
|
- def test_numpy_sized_dtype_nested_0(self):
|
||||||
|
- for zero_shaped in (False, True):
|
||||||
|
- yield self.check_numpy_sized_dtype_nested_0, zero_shaped
|
||||||
|
-
|
||||||
|
- def test_numpy_sized_dtype_nested_1(self):
|
||||||
|
- for zero_shaped in (False, True):
|
||||||
|
- yield self.check_numpy_sized_dtype_nested_1, zero_shaped
|
||||||
|
-
|
||||||
|
- def test_numpy_sized_dtype_nested_2(self):
|
||||||
|
- for zero_shaped in (False, True):
|
||||||
|
- yield self.check_numpy_sized_dtype_nested_2, zero_shaped
|
||||||
|
-
|
||||||
|
- def test_numpy_sized_dtype_nested_3(self):
|
||||||
|
- for zero_shaped in (False, True):
|
||||||
|
- yield self.check_numpy_sized_dtype_nested_3, zero_shaped
|
||||||
|
-
|
||||||
|
- def test_python_collection(self):
|
||||||
|
- for tp in (list, tuple, set, frozenset, collections.deque):
|
||||||
|
- yield self.check_python_collection, tp, 'same-dims'
|
||||||
|
- yield self.check_python_collection, tp, 'diff-dims'
|
||||||
|
+ @parameterized.expand([range(1, 4)])
|
||||||
|
+ def test_numpy_chararray(self, dim):
|
||||||
|
+ self.check_numpy_chararray(dim)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([range(1, 10)])
|
||||||
|
+ def test_numpy_chararray_empty(self, n):
|
||||||
|
+ self.check_numpy_chararray_empty(n)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([(False,), (True,)])
|
||||||
|
+ def test_numpy_sized_dtype_nested_0(self, zero_shaped):
|
||||||
|
+ self.check_numpy_sized_dtype_nested_0(zero_shaped)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([(False,), (True,)])
|
||||||
|
+ def test_numpy_sized_dtype_nested_1(self, zero_shaped):
|
||||||
|
+ self.check_numpy_sized_dtype_nested_1(zero_shaped)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([(False,), (True,)])
|
||||||
|
+ def test_numpy_sized_dtype_nested_2(self, zero_shaped):
|
||||||
|
+ self.check_numpy_sized_dtype_nested_2(zero_shaped)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([(False,), (True,)])
|
||||||
|
+ def test_numpy_sized_dtype_nested_3(self, zero_shaped):
|
||||||
|
+ self.check_numpy_sized_dtype_nested_3(zero_shaped)
|
||||||
|
+
|
||||||
|
+ @parameterized.expand([(list, tuple, set, frozenset, collections.deque)])
|
||||||
|
+ def test_python_collection(self, tp):
|
||||||
|
+ self.check_python_collection(tp, 'same-dims')
|
||||||
|
+ self.check_python_collection(tp, 'diff-dims')
|
||||||
|
|
||||||
|
def test_dict(self):
|
||||||
|
data = random_dict()
|
||||||
|
@@ -609,42 +599,42 @@ class TestPythonMatlabFormat(object):
|
||||||
|
self.options)
|
||||||
|
self.assert_equal(out, data)
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_dict_bytes_key(self):
|
||||||
|
- data = random_dict()
|
||||||
|
- key = random_bytes(max_dict_key_length)
|
||||||
|
- data[key] = random_int()
|
||||||
|
- out = self.write_readback(data, random_name(),
|
||||||
|
- self.options)
|
||||||
|
- self.assert_equal(out, data)
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ data = random_dict()
|
||||||
|
+ key = random_bytes(max_dict_key_length)
|
||||||
|
+ data[key] = random_int()
|
||||||
|
+ out = self.write_readback(data, random_name(),
|
||||||
|
+ self.options)
|
||||||
|
+ self.assert_equal(out, data)
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_dict_key_null_character(self):
|
||||||
|
- data = random_dict()
|
||||||
|
- if sys.hexversion >= 0x03000000:
|
||||||
|
- ch = '\x00'
|
||||||
|
- else:
|
||||||
|
- ch = unicode('\x00')
|
||||||
|
- key = ch.join([random_str_ascii(max_dict_key_length)
|
||||||
|
- for i in range(2)])
|
||||||
|
- data[key] = random_int()
|
||||||
|
- out = self.write_readback(data, random_name(),
|
||||||
|
- self.options)
|
||||||
|
- self.assert_equal(out, data)
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ data = random_dict()
|
||||||
|
+ if sys.hexversion >= 0x03000000:
|
||||||
|
+ ch = '\x00'
|
||||||
|
+ else:
|
||||||
|
+ ch = unicode('\x00')
|
||||||
|
+ key = ch.join([random_str_ascii(max_dict_key_length)
|
||||||
|
+ for i in range(2)])
|
||||||
|
+ data[key] = random_int()
|
||||||
|
+ out = self.write_readback(data, random_name(),
|
||||||
|
+ self.options)
|
||||||
|
+ self.assert_equal(out, data)
|
||||||
|
|
||||||
|
- @raises(NotImplementedError)
|
||||||
|
def test_dict_key_forward_slash(self):
|
||||||
|
- data = random_dict()
|
||||||
|
- if sys.hexversion >= 0x03000000:
|
||||||
|
- ch = '/'
|
||||||
|
- else:
|
||||||
|
- ch = unicode('/')
|
||||||
|
- key = ch.join([random_str_ascii(max_dict_key_length)
|
||||||
|
- for i in range(2)])
|
||||||
|
- data[key] = random_int()
|
||||||
|
- out = self.write_readback(data, random_name(),
|
||||||
|
- self.options)
|
||||||
|
- self.assert_equal(out, data)
|
||||||
|
+ with pytest.raises(NotImplementedError):
|
||||||
|
+ data = random_dict()
|
||||||
|
+ if sys.hexversion >= 0x03000000:
|
||||||
|
+ ch = '/'
|
||||||
|
+ else:
|
||||||
|
+ ch = unicode('/')
|
||||||
|
+ key = ch.join([random_str_ascii(max_dict_key_length)
|
||||||
|
+ for i in range(2)])
|
||||||
|
+ data[key] = random_int()
|
||||||
|
+ out = self.write_readback(data, random_name(),
|
||||||
|
+ self.options)
|
||||||
|
+ self.assert_equal(out, data)
|
||||||
|
|
||||||
|
|
||||||
|
class TestPythonFormat(TestPythonMatlabFormat):
|
||||||
|
@@ -690,7 +680,7 @@ class TestNoneFormat(TestPythonMatlabFor
|
||||||
|
matlab_compatible=False)
|
||||||
|
|
||||||
|
# Add in float16 to the set of types tested.
|
||||||
|
- self.dtypes.append('float16')
|
||||||
|
+ # self.dtypes.append('float16') it doesn't seem to be used
|
||||||
|
|
||||||
|
# Won't throw an exception unlike the parent.
|
||||||
|
def test_str_ascii_encoded_utf8(self):
|
156
python-hdf5storage.changes
Normal file
156
python-hdf5storage.changes
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
-------------------------------------------------------------------
|
||||||
|
Fri Aug 30 13:11:44 UTC 2024 - Ben Greiner <code@bnavigator.de>
|
||||||
|
|
||||||
|
- Unpin numpy 2
|
||||||
|
* Add hdf5storage-pr134-numpy2.patch
|
||||||
|
* gh#frejanordsiek/hdf5storage#134 (backported)
|
||||||
|
- Make it noarch again
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Sun Jul 7 12:31:47 UTC 2024 - Ben Greiner <code@bnavigator.de>
|
||||||
|
|
||||||
|
- Limit to numpy <2: Not compatible
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Wed Apr 5 07:07:55 UTC 2023 - Steve Kowalik <steven.kowalik@suse.com>
|
||||||
|
|
||||||
|
- Update to 0.1.19 Bugfix release:
|
||||||
|
* Issue #122 and #124. Replaced use of deprecated ``numpy.asscalar``
|
||||||
|
functions with the ``numpy.ndarray.item`` method.
|
||||||
|
* Issue #123. Forced the use of English month and day of the week names
|
||||||
|
in the HDF5 header for MATLAB compatibility.
|
||||||
|
* Issue #125. Fixed accidental collection of
|
||||||
|
``pkg_resources.parse_version`` from setuptools as a Marshaller now
|
||||||
|
that it is a class.
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Sat Jan 29 08:31:32 UTC 2022 - Matej Cepl <mcepl@suse.com>
|
||||||
|
|
||||||
|
- Update 0.1.18 (performance improving release):
|
||||||
|
- Pull Request #111 from Daniel Hrisca. Many repeated calls to
|
||||||
|
the __getitem__ methods of objects were turned into single
|
||||||
|
calls.
|
||||||
|
- Further reducionts in __getitem__ calls in the spirit of PR
|
||||||
|
#111.
|
||||||
|
- Update to 0.1.17 (bugfix and deprecation workaround release):
|
||||||
|
- Issue #109. Fixed the fix Issue #102 for 32-bit platforms
|
||||||
|
(previous fix was segfaulting).
|
||||||
|
- Moved to using pkg_resources.parse_version from setuptools
|
||||||
|
with distutils.version classes as a fallback instead of just
|
||||||
|
the later to prepare for the removal of distutils (PEP 632)
|
||||||
|
and prevent warnings on Python versions where it is marked as
|
||||||
|
deprecated.
|
||||||
|
- Issue #110. Changed all uses of the tostring method on numpy
|
||||||
|
types to using tobytes if available, with tostring as the
|
||||||
|
fallback for old versions of numpy where it is not.
|
||||||
|
- Add nose-to-pytest.patch which allows use of pytest instead of
|
||||||
|
nose (not based on the upstream solution).
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Mon Mar 8 18:40:19 UTC 2021 - Ben Greiner <code@bnavigator.de>
|
||||||
|
|
||||||
|
- Update to 0.1.16
|
||||||
|
* Issue #81 and #82. h5py.File will require the mode to be
|
||||||
|
passed explicitly in the future. All calls without passing it
|
||||||
|
were fixed to pass it.
|
||||||
|
* Issue #102. Added support for h5py 3.0 and 3.1.
|
||||||
|
* Issue #73. Fixed bug where a missing variable in loadmat would
|
||||||
|
cause the function to think that the file is a pre v7.3 format
|
||||||
|
MAT file fall back to scipy.io.loadmat which won't work since
|
||||||
|
the file is a v7.3 format MAT file.
|
||||||
|
* Fixed formatting issues in the docstrings and the
|
||||||
|
documentation that prevented the documentation from building.
|
||||||
|
- Release 0.1.15
|
||||||
|
* Issue #68. Fixed bug where str and numpy.unicode_ strings (but
|
||||||
|
not ndarrays of them) were saved in uint32 format regardless
|
||||||
|
of the value of Options.convert_numpy_bytes_to_utf16.
|
||||||
|
* Issue #70. Updated setup.py and requirements.txt to specify
|
||||||
|
the maximum versions of numpy and h5py that can be used for
|
||||||
|
specific python versions (avoid version with dropped support).
|
||||||
|
* Issue #71. Fixed bug where the 'python_fields' attribute
|
||||||
|
wouldn't always be written when doing python metadata for data
|
||||||
|
written in a struct-like fashion. The bug caused the field
|
||||||
|
order to not be preserved when writing and reading.
|
||||||
|
* Fixed an assertion in the tests to handle field re-ordering
|
||||||
|
when no metadata is used for structured dtypes that only
|
||||||
|
worked on older versions of numpy.
|
||||||
|
* Issue #72. Fixed bug where python collections filled with
|
||||||
|
ndarrays that all have the same shape were converted to
|
||||||
|
multi-dimensional object ndarrays instead of a 1D object
|
||||||
|
ndarray of the elements.
|
||||||
|
- Skip python2 (dropped upstream) and python36 (NumPy 1.20, NEP 29)
|
||||||
|
- Don't build for 32-bit gh#frejanordsiek/hdf5storage#109
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Thu May 24 17:34:42 UTC 2018 - toddrme2178@gmail.com
|
||||||
|
|
||||||
|
- Spec file cleanups
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Sun Jun 11 06:17:53 UTC 2017 - toddrme2178@gmail.com
|
||||||
|
|
||||||
|
|
||||||
|
- Implement single-spec version
|
||||||
|
- Fix source URL
|
||||||
|
- Update to version 0.1.14.
|
||||||
|
+ Bugfix release that also added a couple features.
|
||||||
|
* Issue #45. Fixed syntax errors in unicode strings for Python 3.0 to 3.2.
|
||||||
|
* Issues #44 and #47. Fixed bugs in testing of conversion and storage of string types.
|
||||||
|
* Issue #46. Fixed raising of RuntimeWarnings in tests due to signalling NaNs.
|
||||||
|
* Added requirements files for building documentation and running tests.
|
||||||
|
* Made it so that Matlab compatability tests are skipped if Matlab is not found, instead of raising errors.
|
||||||
|
- Update to version 0.1.13.
|
||||||
|
+ Bugfix release fixing the following bug.
|
||||||
|
* Issue #36. Fixed bugs in writing int and long to HDF5 and their tests on 32 bit systems.
|
||||||
|
- Update to version 0.1.12.
|
||||||
|
+ Bugfix release fixing the following bugs. In addition, copyright years were also updated and notices put in the Matlab files used for testing.
|
||||||
|
* Issue #32. Fixed transposing before reshaping np.ndarray when reading from HDF5 files where python metadata was stored but not Matlab metadata.
|
||||||
|
* Issue #33. Fixed the loss of the number of characters when reading empty numpy string arrays.
|
||||||
|
* Issue #34. Fixed a conversion error when np.chararray are written with Matlab metadata.
|
||||||
|
- Update to version 0.1.11.
|
||||||
|
+ Bugfix release fixing the following.
|
||||||
|
* Issue #30. Fixed loadmat not opening files in read mode.
|
||||||
|
- Update to version 0.1.10.
|
||||||
|
+ Minor feature/performance fix release doing the following.
|
||||||
|
* Issue #29. Added writes and reads functions to write and read more than one piece of data at a time and made savemat and loadmat use them to increase performance. Previously, the HDF5 file was being opened and closed for each piece of data, which impacted performance, especially for large files.
|
||||||
|
- Update to version 0.1.9.
|
||||||
|
+ Bugfix and minor feature release doing the following.
|
||||||
|
* Issue #23. Fixed bug where a structured np.ndarray with a field name of 'O' could never be written as an HDF5 COMPOUND Dataset (falsely thought a field’s dtype was object).
|
||||||
|
* Issue #6. Added optional data compression and the storage of data checksums. Controlled by several new options.
|
||||||
|
- Update to version 0.1.8.
|
||||||
|
+ Bugfix release fixing the following two bugs.
|
||||||
|
* Issue #21. Fixed bug where the 'MATLAB_class' Attribute is not set when writing dict types when writing MATLAB metadata.
|
||||||
|
* Issue #22. Fixed bug where null characters ('\x00') and forward slashes ('/') were allowed in dict keys and the field names of structured np.ndarray (except that forward slashes are allowed when the structured_numpy_ndarray_as_struct is not set as is the case when the matlab_compatible option is set). These cause problems for the h5py package and the HDF5 library. NotImplementedError is now thrown in these cases.
|
||||||
|
- Update to version 0.1.7.
|
||||||
|
+ Bugfix release with an added compatibility option and some added test code. Did the following.
|
||||||
|
* Fixed an issue reading variables larger than 2 GB in MATLAB MAT v7.3 files when no explicit variable names to read are given to hdf5storage.loadmat. Fix also reduces memory consumption and processing time a little bit by removing an unneeded memory copy.
|
||||||
|
* Options now will accept any additional keyword arguments it doesn’t support, ignoring them, to be API compatible with future package versions with added options.
|
||||||
|
* Added tests for reading data that has been compressed or had other HDF5 filters applied.
|
||||||
|
- Update to version 0.1.6.
|
||||||
|
+ Bugfix release fixing a bug with determining the maximum size of a Python 2.x int on a 32-bit system.
|
||||||
|
- Update to version 0.1.5.
|
||||||
|
+ Bugfix release fixing the following bug.
|
||||||
|
* Fixed bug where an int could be stored that is too big to fit into an int when read back in Python 2.x. When it is too big, it is converted to a long.
|
||||||
|
* Fixed a bug where an int or long that is too big to big to fit into an np.int64 raised the wrong exception.
|
||||||
|
* Fixed bug where fields names for structured np.ndarray with non-ASCII characters (assumed to be UTF-8 encoded in Python 2.x) can’t be read or written properly.
|
||||||
|
* Fixed bug where np.bytes_ with non-ASCII characters can were converted incorrectly to UTF-16 when that option is set (set implicitly when doing MATLAB compatibility). Now, it throws a NotImplementedError.
|
||||||
|
- Update to version 0.1.4.
|
||||||
|
+ Bugfix release fixing the following bugs. Thanks goes to mrdomino for writing the bug fixes.
|
||||||
|
* Fixed bug where dtype is used as a keyword parameter of np.ndarray.astype when it is a positional argument.
|
||||||
|
* Fixed error caused by h5py.__version__ being absent on Ubuntu 12.04.
|
||||||
|
- Update to version 0.1.3.
|
||||||
|
+ Bugfix release fixing the following bug.
|
||||||
|
* Fixed broken ability to correctly read and write empty structured np.ndarray (has fields).
|
||||||
|
- Update to version 0.1.2.
|
||||||
|
+ Bugfix release fixing the following bugs.
|
||||||
|
* Removed mistaken support for np.float16 for h5py versions before 2.2 since that was when support for it was introduced.
|
||||||
|
* Structured np.ndarray where one or more fields is of the 'object' dtype can now be written without an error when the structured_numpy_ndarray_as_struct option is not set. They are written as an HDF5 Group, as if the option was set.
|
||||||
|
* Support for the 'MATLAB_fields' Attribute for data types that are structures in MATLAB has been added for when the version of the h5py package being used is 2.3 or greater. Support is still missing for earlier versions (this package requires a minimum version of 2.1).
|
||||||
|
* The check for non-unicode string keys (str in Python 3 and unicode in Python 2) in the type dict is done right before any changes are made to the HDF5 file instead of in the middle so that no changes are applied if an invalid key is present.
|
||||||
|
* HDF5 userblock set with the proper metadata for MATLAB support right at the beginning of when data is being written to an HDF5 file instead of at the end, meaning the writing can crash and the file will still be a valid MATLAB file.
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Fri May 30 13:26:15 UTC 2014 - toddrme2178@gmail.com
|
||||||
|
|
||||||
|
- initial version
|
||||||
|
|
77
python-hdf5storage.spec
Normal file
77
python-hdf5storage.spec
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
#
|
||||||
|
# spec file for package python-hdf5storage
|
||||||
|
#
|
||||||
|
# Copyright (c) 2024 SUSE LLC
|
||||||
|
#
|
||||||
|
# All modifications and additions to the file contributed by third parties
|
||||||
|
# remain the property of their copyright owners, unless otherwise agreed
|
||||||
|
# upon. The license for this file, and modifications and additions to the
|
||||||
|
# file, is the same license as for the pristine package itself (unless the
|
||||||
|
# license for the pristine package is not an Open Source License, in which
|
||||||
|
# case the license is the MIT License). An "Open Source License" is a
|
||||||
|
# license that conforms to the Open Source Definition (Version 1.9)
|
||||||
|
# published by the Open Source Initiative.
|
||||||
|
|
||||||
|
# Please submit bugfixes or comments via https://bugs.opensuse.org/
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
Name: python-hdf5storage
|
||||||
|
Version: 0.1.19
|
||||||
|
Release: 0
|
||||||
|
Summary: Utilities to read/write HDF5 files, including MATLAB v7.3 MAT files
|
||||||
|
License: BSD-3-Clause
|
||||||
|
URL: https://github.com/frejanordsiek/hdf5storage
|
||||||
|
Source: https://files.pythonhosted.org/packages/source/h/hdf5storage/hdf5storage-%{version}.tar.gz
|
||||||
|
# PATCH-FIX-UPSTREAM nose-to-pytest.patch gh#frejanordsiek/hdf5storage#96 mcepl@suse.com, uses pytest instead of nose
|
||||||
|
Patch0: nose-to-pytest.patch
|
||||||
|
# PATCH-FIX-UPSTREAM hdf5storage-pr134-numpy2.patch gh#frejanordsiek/hdf5storage#135 (backport to 0.1)
|
||||||
|
Patch1: hdf5storage-pr134-numpy2.patch
|
||||||
|
BuildRequires: %{python_module h5py >= 3.3}
|
||||||
|
BuildRequires: %{python_module numpy}
|
||||||
|
BuildRequires: %{python_module pip}
|
||||||
|
BuildRequires: %{python_module setuptools}
|
||||||
|
BuildRequires: %{python_module wheel}
|
||||||
|
BuildRequires: fdupes
|
||||||
|
BuildRequires: python-rpm-macros
|
||||||
|
# SECTION test requirements
|
||||||
|
BuildRequires: %{python_module pytest >= 6.0}
|
||||||
|
# I don't know how to do @pytest.mark.parametrize on class methods
|
||||||
|
BuildRequires: %{python_module parameterized}
|
||||||
|
BuildRequires: %{python_module scipy}
|
||||||
|
# /SECTION
|
||||||
|
Requires: python-h5py >= 3.3
|
||||||
|
Requires: python-numpy
|
||||||
|
Recommends: python-scipy
|
||||||
|
BuildArch: noarch
|
||||||
|
%python_subpackages
|
||||||
|
|
||||||
|
%description
|
||||||
|
This Python package provides high level utilities to read/write a
|
||||||
|
variety of Python types to/from HDF5 (Heirarchal Data Format) formatted
|
||||||
|
files. This package also provides support for MATLAB MAT v7.3 formatted
|
||||||
|
files, which are just HDF5 files with a different extension and some
|
||||||
|
extra meta-data.
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%autosetup -p1 -n hdf5storage-%{version}
|
||||||
|
# fix end-of-line encoding
|
||||||
|
sed -i 's/\r$//' COPYING.txt
|
||||||
|
|
||||||
|
%build
|
||||||
|
%pyproject_wheel
|
||||||
|
|
||||||
|
%install
|
||||||
|
%pyproject_install
|
||||||
|
%python_expand %fdupes %{buildroot}%{$python_sitelib}
|
||||||
|
|
||||||
|
%check
|
||||||
|
%pytest
|
||||||
|
|
||||||
|
%files %{python_files}
|
||||||
|
%doc README.rst
|
||||||
|
%license COPYING.txt
|
||||||
|
%{python_sitelib}/hdf5storage/
|
||||||
|
%{python_sitelib}/hdf5storage-%{version}.dist-info
|
||||||
|
|
||||||
|
%changelog
|
Loading…
Reference in New Issue
Block a user