Sync from SUSE:SLFO:Main hdf5 revision 2b2c4d975fc4e3ab2e2093b936dc59fd
This commit is contained in:
commit
d2abe55ac0
23
.gitattributes
vendored
Normal file
23
.gitattributes
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
## Default LFS
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.bsp filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.gem filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.jar filter=lfs diff=lfs merge=lfs -text
|
||||
*.lz filter=lfs diff=lfs merge=lfs -text
|
||||
*.lzma filter=lfs diff=lfs merge=lfs -text
|
||||
*.obscpio filter=lfs diff=lfs merge=lfs -text
|
||||
*.oxt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pdf filter=lfs diff=lfs merge=lfs -text
|
||||
*.png filter=lfs diff=lfs merge=lfs -text
|
||||
*.rpm filter=lfs diff=lfs merge=lfs -text
|
||||
*.tbz filter=lfs diff=lfs merge=lfs -text
|
||||
*.tbz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.ttf filter=lfs diff=lfs merge=lfs -text
|
||||
*.txz filter=lfs diff=lfs merge=lfs -text
|
||||
*.whl filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
@ -0,0 +1,66 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Sat Feb 11 13:54:17 2023 +0100
|
||||
Subject: Check for overflow when calculating on-disk attribute data size (#2459)
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: https://github.com/HDFGroup/hdf5
|
||||
Git-commit: 0d026daa13a81be72495872f651c036fdc84ae5e
|
||||
References:
|
||||
|
||||
A bogus hdf5 file may contain dataspace messages with sizes
|
||||
which lead to the on-disk data sizes to exceed what is addressable.
|
||||
When calculating the size, make sure, the multiplication does not
|
||||
overflow.
|
||||
The test case was crafted in a way that the overflow caused the
|
||||
size to be 0.
|
||||
|
||||
This fixes CVE-2021-37501 / Bug #2458.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Oattr.c | 3 +++
|
||||
src/H5private.h | 18 ++++++++++++++++++
|
||||
2 files changed, 21 insertions(+)
|
||||
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
|
||||
index 4dee7aa187..3ef0b99aa4 100644
|
||||
--- a/src/H5Oattr.c
|
||||
+++ b/src/H5Oattr.c
|
||||
@@ -235,6 +235,9 @@ H5O_attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, un
|
||||
|
||||
/* Compute the size of the data */
|
||||
H5_CHECKED_ASSIGN(attr->shared->data_size, size_t, ds_size * (hsize_t)dt_size, hsize_t);
|
||||
+ H5_CHECK_MUL_OVERFLOW(attr->shared->data_size, ds_size, dt_size,
|
||||
+ HGOTO_ERROR(H5E_RESOURCE, H5E_OVERFLOW, NULL,
|
||||
+ "data size exceeds addressable range"))
|
||||
|
||||
/* Go get the data */
|
||||
if (attr->shared->data_size) {
|
||||
diff --git a/src/H5private.h b/src/H5private.h
|
||||
index 931d7b9046..a115aee1a4 100644
|
||||
--- a/src/H5private.h
|
||||
+++ b/src/H5private.h
|
||||
@@ -1605,6 +1605,24 @@ H5_DLL int HDvasprintf(char **bufp, const char *fmt, va_list _ap);
|
||||
#define H5_CHECK_OVERFLOW(var, vartype, casttype)
|
||||
#endif /* NDEBUG */
|
||||
|
||||
+/*
|
||||
+ * A macro for checking whether a multiplication has overflown
|
||||
+ * r is assumed to be the result of a prior multiplication of a and b
|
||||
+ */
|
||||
+#define H5_CHECK_MUL_OVERFLOW(r, a, b, err) \
|
||||
+ { \
|
||||
+ bool mul_overflow = false; \
|
||||
+ if (r != 0) { \
|
||||
+ if (r / a != b) \
|
||||
+ mul_overflow = true; \
|
||||
+ } else { \
|
||||
+ if (a != 0 && b != 0) \
|
||||
+ mul_overflow = true; \
|
||||
+ } \
|
||||
+ if (mul_overflow) \
|
||||
+ err \
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* A macro for detecting over/under-flow when assigning between types
|
||||
*/
|
51
Compound-datatypes-may-not-have-members-of-size-0.patch
Normal file
51
Compound-datatypes-may-not-have-members-of-size-0.patch
Normal file
@ -0,0 +1,51 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Wed Oct 5 15:47:54 2022 +0200
|
||||
Subject: Compound datatypes may not have members of size 0
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 88ea94d38fdfecba173dbea18502a5f82a46601b
|
||||
References:
|
||||
|
||||
A member size of 0 may lead to an FPE later on as reported in
|
||||
CVE-2021-46244. To avoid this, check for this as soon as the
|
||||
member is decoded.
|
||||
This should probably be done in H5O_dtype_decode_helper() already,
|
||||
however it is not clear whether all sizes are expected to be != 0.
|
||||
This fixes CVE-2021-46244.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Odtype.c | 6 ++++++
|
||||
src/H5T.c | 2 ++
|
||||
2 files changed, 8 insertions(+)
|
||||
diff --git a/src/H5Odtype.c b/src/H5Odtype.c
|
||||
index 9af79f4e9a..d35fc65322 100644
|
||||
--- a/src/H5Odtype.c
|
||||
+++ b/src/H5Odtype.c
|
||||
@@ -333,6 +333,12 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t
|
||||
H5MM_xfree(dt->shared->u.compnd.memb);
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "unable to decode member type")
|
||||
} /* end if */
|
||||
+ if (temp_type->shared->size == 0) {
|
||||
+ for (j = 0; j <= i; j++)
|
||||
+ H5MM_xfree(dt->shared->u.compnd.memb[j].name);
|
||||
+ H5MM_xfree(dt->shared->u.compnd.memb);
|
||||
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "invalid field size in member type")
|
||||
+ }
|
||||
|
||||
/* Upgrade the version if we can and it is necessary */
|
||||
if (can_upgrade && temp_type->shared->version > version) {
|
||||
diff --git a/src/H5T.c b/src/H5T.c
|
||||
index 3bb220ac26..04b96c5676 100644
|
||||
--- a/src/H5T.c
|
||||
+++ b/src/H5T.c
|
||||
@@ -3591,6 +3591,8 @@ H5T__complete_copy(H5T_t *new_dt, const H5T_t *old_dt, H5T_shared_t *reopened_fo
|
||||
if (new_dt->shared->u.compnd.memb[i].type->shared->size !=
|
||||
old_dt->shared->u.compnd.memb[old_match].type->shared->size) {
|
||||
/* Adjust the size of the member */
|
||||
+ if (old_dt->shared->u.compnd.memb[old_match].size == 0)
|
||||
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid field size in datatype")
|
||||
new_dt->shared->u.compnd.memb[i].size =
|
||||
(old_dt->shared->u.compnd.memb[old_match].size * tmp->shared->size) /
|
||||
old_dt->shared->u.compnd.memb[old_match].type->shared->size;
|
36
Disable-phdf5-tests.patch
Normal file
36
Disable-phdf5-tests.patch
Normal file
@ -0,0 +1,36 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Sat Nov 17 20:11:13 2018 +0100
|
||||
Subject: Disable phdf5 tests
|
||||
Patch-mainline: never
|
||||
Git-commit: 16d758d17d9c49ab9e34d510675929b9ccc8be5a
|
||||
References:
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
Index: hdf5-1.12.2/testpar/CMakeTests.cmake
|
||||
===================================================================
|
||||
--- hdf5-1.12.2.orig/testpar/CMakeTests.cmake
|
||||
+++ hdf5-1.12.2/testpar/CMakeTests.cmake
|
||||
@@ -43,7 +43,7 @@ foreach (skiptest ${SKIP_tests})
|
||||
set (SKIP_testphdf5 "${SKIP_testphdf5};-x;${skiptest}")
|
||||
endforeach ()
|
||||
|
||||
-add_test (NAME MPI_TEST_testphdf5 COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:testphdf5> ${MPIEXEC_POSTFLAGS} ${SKIP_testphdf5})
|
||||
+##add_test (NAME MPI_TEST_testphdf5 COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:testphdf5> ${MPIEXEC_POSTFLAGS} ${SKIP_testphdf5})
|
||||
set_tests_properties (MPI_TEST_testphdf5 PROPERTIES
|
||||
FIXTURES_REQUIRED par_clear_testphdf5
|
||||
ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}"
|
||||
Index: hdf5-1.12.2/testpar/Makefile.am
|
||||
===================================================================
|
||||
--- hdf5-1.12.2.orig/testpar/Makefile.am
|
||||
+++ hdf5-1.12.2/testpar/Makefile.am
|
||||
@@ -30,7 +30,7 @@ check_SCRIPTS = $(TEST_SCRIPT_PARA)
|
||||
|
||||
# Test programs. These are our main targets.
|
||||
#
|
||||
-TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio
|
||||
+TEST_PROG_PARA=t_mpi t_bigio t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio
|
||||
|
||||
# t_pflush1 and t_pflush2 are used by testpflush.sh
|
||||
check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2
|
@ -0,0 +1,26 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Sun Oct 9 08:08:24 2022 +0200
|
||||
Subject: Fix error message: not the name but the link information is parsed
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 7b0b8bc5703ace47aec51d7f60c1149cd3e383b1
|
||||
References:
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Olink.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
diff --git a/src/H5Olink.c b/src/H5Olink.c
|
||||
index 51c44a36b0..ee2a413dc1 100644
|
||||
--- a/src/H5Olink.c
|
||||
+++ b/src/H5Olink.c
|
||||
@@ -245,7 +245,7 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE
|
||||
/* Make sure that length doesn't exceed buffer size, which could
|
||||
occur when the file is corrupted */
|
||||
if (p + len > p_end)
|
||||
- HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "name length causes read past end of buffer")
|
||||
+ HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "link information length causes read past end of buffer")
|
||||
|
||||
if (NULL == (lnk->u.ud.udata = H5MM_malloc((size_t)len)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
|
@ -0,0 +1,33 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Tue Sep 27 10:29:56 2022 +0200
|
||||
Subject: H5IMget_image_info: H5Sget_simple_extent_dims() does not exceed array size
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: c1baab0937c8956a15efc41240f68d573c7b7324
|
||||
References:
|
||||
|
||||
Malformed hdf5 files may provide more dimensions than the array dim[] is
|
||||
able to hold. Check number of elements first by calling
|
||||
H5Sget_simple_extent_dims() with NULL for both 'dims' and 'maxdims' arguments.
|
||||
This will cause the function to return only the number of dimensions.
|
||||
|
||||
This fixes CVE-2018-17439
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
hl/src/H5IM.c | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
diff --git a/hl/src/H5IM.c b/hl/src/H5IM.c
|
||||
index ff10d573c7..e37c696e25 100644
|
||||
--- a/hl/src/H5IM.c
|
||||
+++ b/hl/src/H5IM.c
|
||||
@@ -283,6 +283,8 @@ H5IMget_image_info(hid_t loc_id, const char *dset_name, hsize_t *width, hsize_t
|
||||
if ((sid = H5Dget_space(did)) < 0)
|
||||
goto out;
|
||||
|
||||
+ if (H5Sget_simple_extent_dims(sid, NULL, NULL) > IMAGE24_RANK)
|
||||
+ goto out;
|
||||
/* Get dimensions */
|
||||
if (H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
|
||||
goto out;
|
@ -0,0 +1,85 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Tue Oct 4 23:09:01 2022 +0200
|
||||
Subject: H5O__pline_decode() Make more resilient to out-of-bounds read
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 35b798ca7542ce45ef016859b8e70d57b7f89cfe
|
||||
References:
|
||||
|
||||
Malformed hdf5 files may have trunkated content which does not match
|
||||
the expected size. This function attempts to decode these it will read
|
||||
past the end of the allocated space which may lead to a crash. Make sure
|
||||
each element is within bounds before reading.
|
||||
|
||||
This fixes CVE-2019-8396.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Opline.c | 17 +++++++++++++++--
|
||||
src/H5private.h | 3 +++
|
||||
2 files changed, 18 insertions(+), 2 deletions(-)
|
||||
diff --git a/src/H5Opline.c b/src/H5Opline.c
|
||||
index ffc4557ffc..a532aa4512 100644
|
||||
--- a/src/H5Opline.c
|
||||
+++ b/src/H5Opline.c
|
||||
@@ -110,6 +110,14 @@ H5FL_DEFINE(H5O_pline_t);
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
+static char err[] = "ran off the end of the buffer: current p = %p, p_end = %p";
|
||||
+
|
||||
+#define VERIFY_LIMIT(p,s,l) \
|
||||
+ if (p + s - 1 > l) { \
|
||||
+ HCOMMON_ERROR(H5E_RESOURCE, H5E_NOSPACE, err, p + s, l); \
|
||||
+ HGOTO_DONE(NULL) \
|
||||
+ };
|
||||
+
|
||||
static void *
|
||||
H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags,
|
||||
unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p)
|
||||
@@ -159,6 +167,7 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign
|
||||
/* Decode filters */
|
||||
for (i = 0, filter = &pline->filter[0]; i < pline->nused; i++, filter++) {
|
||||
/* Filter ID */
|
||||
+ VERIFY_LIMIT(p, 6, p_end) /* 6 bytes minimum */
|
||||
UINT16DECODE(p, filter->id);
|
||||
|
||||
/* Length of filter name */
|
||||
@@ -168,6 +177,7 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign
|
||||
UINT16DECODE(p, name_length);
|
||||
if (pline->version == H5O_PLINE_VERSION_1 && name_length % 8)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_CANTLOAD, NULL, "filter name length is not a multiple of eight")
|
||||
+ VERIFY_LIMIT(p, 4, p_end) /* with name_length 4 bytes to go */
|
||||
} /* end if */
|
||||
|
||||
/* Filter flags */
|
||||
@@ -179,9 +189,12 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign
|
||||
/* Filter name, if there is one */
|
||||
if (name_length) {
|
||||
size_t actual_name_length; /* Actual length of name */
|
||||
-
|
||||
+ size_t len = (size_t)(p_end - p + 1);
|
||||
/* Determine actual name length (without padding, but with null terminator) */
|
||||
- actual_name_length = HDstrlen((const char *)p) + 1;
|
||||
+ actual_name_length = HDstrnlen((const char *)p, len);
|
||||
+ if (actual_name_length == len)
|
||||
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "filter name not null terminated")
|
||||
+ actual_name_length += 1; /* include \0 byte */
|
||||
HDassert(actual_name_length <= name_length);
|
||||
|
||||
/* Allocate space for the filter name, or use the internal buffer */
|
||||
diff --git a/src/H5private.h b/src/H5private.h
|
||||
index bc00f120d2..3285c36441 100644
|
||||
--- a/src/H5private.h
|
||||
+++ b/src/H5private.h
|
||||
@@ -1485,6 +1485,9 @@ H5_DLL H5_ATTR_CONST int Nflock(int fd, int operation);
|
||||
#ifndef HDstrlen
|
||||
#define HDstrlen(S) strlen(S)
|
||||
#endif
|
||||
+#ifndef HDstrnlen
|
||||
+#define HDstrnlen(S,L) strnlen(S,L)
|
||||
+#endif
|
||||
#ifndef HDstrncat
|
||||
#define HDstrncat(X, Y, Z) strncat(X, Y, Z)
|
||||
#endif
|
@ -0,0 +1,35 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Wed Sep 28 14:54:58 2022 +0200
|
||||
Subject: H5O_dtype_decode_helper: Parent of enum needs to have same size as enum itself
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: d39a27113ef75058f236b0606a74b4af5767c4e7
|
||||
References:
|
||||
|
||||
The size of the enumeration values is determined by the size of the parent.
|
||||
Functions accessing the enumeration values use the size of the enumartion
|
||||
to determine the size of each element and how much data to copy. Thus the
|
||||
size of the enumeration and its parent need to match.
|
||||
Check here to avoid unpleasant surprises later.
|
||||
|
||||
This fixes CVE-2018-14031.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Odtype.c | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
diff --git a/src/H5Odtype.c b/src/H5Odtype.c
|
||||
index 9af79f4e9a..dc2b904362 100644
|
||||
--- a/src/H5Odtype.c
|
||||
+++ b/src/H5Odtype.c
|
||||
@@ -472,6 +472,9 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t
|
||||
if (H5O__dtype_decode_helper(ioflags, pp, dt->shared->parent) < 0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "unable to decode parent datatype")
|
||||
|
||||
+ if (dt->shared->parent->shared->size != dt->shared->size)
|
||||
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "ENUM size does not match parent")
|
||||
+
|
||||
/* Check if the parent of this enum has a version greater than the
|
||||
* enum itself. */
|
||||
H5O_DTYPE_CHECK_VERSION(dt, version, dt->shared->parent->shared->version, ioflags, "enum", FAIL)
|
@ -0,0 +1,76 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Wed Oct 5 07:17:24 2022 +0200
|
||||
Subject: H5O_fsinfo_decode() Make more resilient to out-of-bounds read
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 8aee14b3a19858a08e3fabdef6ff925b47d4ce2c
|
||||
References:
|
||||
|
||||
Malformed hdf5 files may have trunkated content which does not match
|
||||
the expected size. This function attempts to decode these it will read
|
||||
past the end of the allocated space which may lead to a crash. Make sure
|
||||
each element is within bounds before reading.
|
||||
|
||||
This fixes CVE-2021-45830.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Additions
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Ofsinfo.c | 14 +++++++++++++-
|
||||
1 file changed, 13 insertions(+), 1 deletion(-)
|
||||
diff --git a/src/H5Ofsinfo.c b/src/H5Ofsinfo.c
|
||||
index 9f6514a291..15cbb5ae7b 100644
|
||||
--- a/src/H5Ofsinfo.c
|
||||
+++ b/src/H5Ofsinfo.c
|
||||
@@ -88,6 +88,13 @@ H5FL_DEFINE_STATIC(H5O_fsinfo_t);
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
+static char err[] = "ran off end of input buffer while decoding";
|
||||
+#define VERIFY_LIMIT(p,s,l) \
|
||||
+ if (p + s - 1 > l) { \
|
||||
+ HCOMMON_ERROR(H5E_RESOURCE, H5E_NOSPACE, err); \
|
||||
+ HGOTO_DONE(NULL) \
|
||||
+ }
|
||||
+
|
||||
static void *
|
||||
H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags,
|
||||
unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p)
|
||||
@@ -112,6 +119,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
|
||||
fsinfo->fs_addr[ptype - 1] = HADDR_UNDEF;
|
||||
|
||||
/* Version of message */
|
||||
+ VERIFY_LIMIT(p,1,p_end)
|
||||
vers = *p++;
|
||||
|
||||
if (vers == H5O_FSINFO_VERSION_0) {
|
||||
@@ -125,6 +133,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
|
||||
fsinfo->pgend_meta_thres = H5F_FILE_SPACE_PGEND_META_THRES;
|
||||
fsinfo->eoa_pre_fsm_fsalloc = HADDR_UNDEF;
|
||||
|
||||
+ VERIFY_LIMIT(p, 1 + H5F_SIZEOF_SIZE(f), p_end);
|
||||
strategy = (H5F_file_space_type_t)*p++; /* File space strategy */
|
||||
H5F_DECODE_LENGTH(f, p, threshold); /* Free-space section threshold */
|
||||
|
||||
@@ -170,6 +179,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
|
||||
HDassert(vers >= H5O_FSINFO_VERSION_1);
|
||||
|
||||
fsinfo->version = vers;
|
||||
+ VERIFY_LIMIT(p, 1 + 1 + 2 * H5F_SIZEOF_SIZE(f) + 2 + H5F_SIZEOF_ADDR(f), p_end);
|
||||
fsinfo->strategy = (H5F_fspace_strategy_t)*p++; /* File space strategy */
|
||||
fsinfo->persist = *p++; /* Free-space persist or not */
|
||||
H5F_DECODE_LENGTH(f, p, fsinfo->threshold); /* Free-space section threshold */
|
||||
@@ -181,9 +191,11 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
|
||||
|
||||
/* Decode addresses of free space managers, if persisting */
|
||||
if (fsinfo->persist)
|
||||
- for (ptype = H5F_MEM_PAGE_SUPER; ptype < H5F_MEM_PAGE_NTYPES; ptype++)
|
||||
+ for (ptype = H5F_MEM_PAGE_SUPER; ptype < H5F_MEM_PAGE_NTYPES; ptype++) {
|
||||
+ VERIFY_LIMIT(p, H5F_SIZEOF_SIZE(f), p_end);
|
||||
H5F_addr_decode(f, &p, &(fsinfo->fs_addr[ptype - 1]));
|
||||
|
||||
+ }
|
||||
fsinfo->mapped = FALSE;
|
||||
}
|
||||
|
43
Hot-fix-for-CVE-2020-10812.patch
Normal file
43
Hot-fix-for-CVE-2020-10812.patch
Normal file
@ -0,0 +1,43 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Wed Oct 5 09:44:02 2022 +0200
|
||||
Subject: Hot fix for CVE-2020-10812
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 2465fc41d208d57eb0d7d025286a81664148fbaf
|
||||
References:
|
||||
|
||||
CVE-2020-10812 unveils a more fundamental design flaw in H5F__dest():
|
||||
this function returns FAIL if one of multiple operations fail (in this
|
||||
case H5AC_prep_for_file_close()) while it still proceeds to prepare the
|
||||
close operation, free the 'shared' member in struct H5F_t and ulimately
|
||||
deallocate the structure itself.
|
||||
When H5F__dest() signals back FAIL to the caller, the caller itself
|
||||
(H5F_try_close() in this case) will fail. This failure is signalled
|
||||
up the stack, thus the file will not be considered closed and another
|
||||
attempt will be made to close it - latest in the exit handler.
|
||||
The next attempt to close will however need the already deallocated
|
||||
H5F_t structure and the H5T_shared_t structure in its 'shared' member,
|
||||
however.
|
||||
This fix papers over the failure of H5AC_prep_for_file_close() by not
|
||||
changing the return status of H5F__dest() to fail. There are numerous
|
||||
other opportunities where this will happen.
|
||||
This may call for a more fundamental solution.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Fint.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
diff --git a/src/H5Fint.c b/src/H5Fint.c
|
||||
index 9b5613972f..01faf33495 100644
|
||||
--- a/src/H5Fint.c
|
||||
+++ b/src/H5Fint.c
|
||||
@@ -1413,7 +1413,7 @@ H5F__dest(H5F_t *f, hbool_t flush)
|
||||
*/
|
||||
if (H5AC_prep_for_file_close(f) < 0)
|
||||
/* Push error, but keep going */
|
||||
- HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "metadata cache prep for close failed")
|
||||
+ HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, ret_value, "metadata cache prep for close failed")
|
||||
|
||||
/* Flush at this point since the file will be closed (phase 2).
|
||||
* Only try to flush the file if it was opened with write access, and if
|
@ -0,0 +1,34 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Sun Oct 9 08:07:23 2022 +0200
|
||||
Subject: Make sure info block for external links has at least 3 bytes
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 082bfe392b04b1137da9eabd1ecac76c212ab385
|
||||
References:
|
||||
|
||||
According to the specification, the information block for external links
|
||||
contains 1 byte of version/flag information and two 0 terminated strings
|
||||
for the object linked to and the full path.
|
||||
Although not very useful, the minimum string length for each would be one
|
||||
byte.
|
||||
|
||||
This fixes CVE-2018-16438.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Olink.c | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
diff --git a/src/H5Olink.c b/src/H5Olink.c
|
||||
index 51c44a36b0..074744b022 100644
|
||||
--- a/src/H5Olink.c
|
||||
+++ b/src/H5Olink.c
|
||||
@@ -241,6 +241,8 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE
|
||||
/* A UD link. Get the user-supplied data */
|
||||
UINT16DECODE(p, len)
|
||||
lnk->u.ud.size = len;
|
||||
+ if (lnk->type == H5L_TYPE_EXTERNAL && len < 3)
|
||||
+ HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "external link information lenght < 3")
|
||||
if (len > 0) {
|
||||
/* Make sure that length doesn't exceed buffer size, which could
|
||||
occur when the file is corrupted */
|
92
PPC64LE-Fix-long-double-handling.patch
Normal file
92
PPC64LE-Fix-long-double-handling.patch
Normal file
@ -0,0 +1,92 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Tue Nov 7 14:16:53 2017 +0100
|
||||
Subject: [PATCH]PPC64LE: Fix long double handling
|
||||
Git-commit: ad6559a71b7ba3cacb4b56d4747db63f28a12f55
|
||||
References:
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
---
|
||||
hdf5-1.10.1/config/cmake/ConversionTests.c | 16 ++++++++++++++++
|
||||
hdf5-1.10.1/test/dt_arith.c | 26 ++++++++++++++++++++++++--
|
||||
2 files changed, 40 insertions(+), 2 deletions(-)
|
||||
|
||||
Index: hdf5-1.12.2/config/cmake/ConversionTests.c
|
||||
===================================================================
|
||||
--- hdf5-1.12.2.orig/config/cmake/ConversionTests.c
|
||||
+++ hdf5-1.12.2/config/cmake/ConversionTests.c
|
||||
@@ -34,6 +34,14 @@ int HDF_NO_UBSAN main(void)
|
||||
unsigned char s2[8];
|
||||
int ret = 1;
|
||||
|
||||
+#if defined __powerpc64__ && defined _LITTLE_ENDIAN
|
||||
+ /* Don't bother checking on ppc64le, we know it'll work, and
|
||||
+ that what hdf5 calls 'special algorithm' simply is
|
||||
+ IBM ldouble 128 (i.e. two seperately scaled doubles).
|
||||
+ The check below assumes big endian. */
|
||||
+ ret = 0;
|
||||
+#endif
|
||||
+
|
||||
if(sizeof(long double) == 16 && sizeof(long) == 8) {
|
||||
/*make sure the long double type has 16 bytes in size and
|
||||
* 11 bits of exponent. If it is,
|
||||
@@ -157,6 +165,14 @@ int HDF_NO_UBSAN main(void)
|
||||
unsigned char s[16];
|
||||
int ret = 0;
|
||||
|
||||
+#if defined __powerpc64__ && defined _LITTLE_ENDIAN
|
||||
+ /* Don't bother checking on ppc64le, we know it'll work, and
|
||||
+ that what hdf5 calls 'special algorithm' simply is
|
||||
+ IBM ldouble 128 (i.e. two seperately scaled doubles).
|
||||
+ The check below assumes big endian. */
|
||||
+ ret = 0;
|
||||
+#endif
|
||||
+
|
||||
if(sizeof(long double) == 16) {
|
||||
/*make sure the long double type is the same as the failing type
|
||||
*which has 16 bytes in size and 11 bits of exponent. If it is,
|
||||
Index: hdf5-1.12.2/test/dt_arith.c
|
||||
===================================================================
|
||||
--- hdf5-1.12.2.orig/test/dt_arith.c
|
||||
+++ hdf5-1.12.2/test/dt_arith.c
|
||||
@@ -3036,7 +3036,19 @@ test_conv_flt_1(const char *name, int ru
|
||||
#if H5_SIZEOF_LONG_DOUBLE != H5_SIZEOF_DOUBLE
|
||||
}
|
||||
else if (src_type == FLT_LDOUBLE) {
|
||||
- INIT_FP_SPECIAL(src_size, src_nbits, sendian, LDBL_MANT_DIG, dst_size, buf, saved, nelmts);
|
||||
+ size_t mant_dig = LDBL_MANT_DIG;
|
||||
+ if (mant_dig >= src_nbits) {
|
||||
+ /* This happens for IBM long double in little endian.
|
||||
+ The macro LDBL_MANT_DIG says 106 mantissa bits, but the
|
||||
+ HDF5 detection code actually represents it as a normal 64bit
|
||||
+ double (52 bit mantissa) with the upper double being
|
||||
+ unspec bits (which is sort of okay as the testsuite
|
||||
+ wouldn't deal with that format correctly anyway). So
|
||||
+ override the mantissa size. */
|
||||
+ mant_dig = 52;
|
||||
+ }
|
||||
+ INIT_FP_SPECIAL(src_size, src_nbits, sendian, mant_dig, dst_size,
|
||||
+ buf, saved, nelmts);
|
||||
#endif
|
||||
}
|
||||
else
|
||||
@@ -3736,7 +3748,18 @@ test_conv_int_fp(const char *name, int r
|
||||
nelmts);
|
||||
}
|
||||
else {
|
||||
- INIT_FP_SPECIAL(src_size, src_nbits, sendian, LDBL_MANT_DIG, dst_size, buf, saved, nelmts);
|
||||
+ size_t mant_dig = LDBL_MANT_DIG;
|
||||
+ if (mant_dig >= src_nbits) {
|
||||
+ /* This happens for IBM long double in little endian.
|
||||
+ The macro LDBL_MANT_DIG says 106 mantissa bits, but the
|
||||
+ HDF5 detection code actually represents it as a normal 64bit
|
||||
+ double (52 bit mantissa) with the upper double being
|
||||
+ unspec bits (which is sort of okay as the testsuite
|
||||
+ wouldn't deal with that format correctly anyway). So
|
||||
+ override the mantissa size. */
|
||||
+ mant_dig = 52;
|
||||
+ }
|
||||
+ INIT_FP_SPECIAL(src_size, src_nbits, sendian, mant_dig, dst_size, buf, saved, nelmts);
|
||||
}
|
||||
#endif
|
||||
}
|
@ -0,0 +1,258 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Sat Oct 1 15:13:52 2022 +0200
|
||||
Subject: Pass compact chunk size info to ensure requested elements are within bounds
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 18300944261a9fa8f0087f99d9176f3757b1ec38
|
||||
References:
|
||||
|
||||
To avoid reading/writing elements out of bounds of a compact chunk, pass
|
||||
size info and check whether all elements are within the size before attempting
|
||||
to read/write these elements. Such accesses can occur when accessing malformed
|
||||
hdf5 files.
|
||||
|
||||
This fixes CVE-2018-11205
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Dchunk.c | 34 +++++++++++++++++++++++++++-------
|
||||
src/H5Dcompact.c | 5 +++++
|
||||
src/H5Dpkg.h | 1 +
|
||||
3 files changed, 33 insertions(+), 7 deletions(-)
|
||||
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
|
||||
index e6bf26ce89..94ad392cb7 100644
|
||||
--- a/src/H5Dchunk.c
|
||||
+++ b/src/H5Dchunk.c
|
||||
@@ -128,6 +128,7 @@ typedef struct H5D_rdcc_ent_t {
|
||||
H5F_block_t chunk_block; /*offset/length of chunk in file */
|
||||
hsize_t chunk_idx; /*index of chunk in dataset */
|
||||
uint8_t * chunk; /*the unfiltered chunk data */
|
||||
+ size_t size; /*size of chunk */
|
||||
unsigned idx; /*index in hash table */
|
||||
struct H5D_rdcc_ent_t *next; /*next item in doubly-linked list */
|
||||
struct H5D_rdcc_ent_t *prev; /*previous item in doubly-linked list */
|
||||
@@ -303,7 +304,7 @@ static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *s
|
||||
static herr_t H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset);
|
||||
static herr_t H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t flush);
|
||||
static void * H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t relax,
|
||||
- hbool_t prev_unfilt_chunk);
|
||||
+ hbool_t prev_unfilt_chunk, size_t *ret_size);
|
||||
static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, hbool_t dirty,
|
||||
void *chunk, uint32_t naccessed);
|
||||
static herr_t H5D__chunk_cache_prune(const H5D_t *dset, size_t size);
|
||||
@@ -2480,6 +2481,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
|
||||
uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
|
||||
hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */
|
||||
herr_t ret_value = SUCCEED; /*return value */
|
||||
+ size_t chunk_size = 0;
|
||||
|
||||
FUNC_ENTER_STATIC
|
||||
|
||||
@@ -2565,11 +2567,12 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
|
||||
src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
|
||||
|
||||
/* Lock the chunk into the cache */
|
||||
- if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE)))
|
||||
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE, &chunk_size)))
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
|
||||
|
||||
/* Set up the storage buffer information for this chunk */
|
||||
cpt_store.compact.buf = chunk;
|
||||
+ cpt_store.compact.size = chunk_size;
|
||||
|
||||
/* Point I/O info at contiguous I/O info for this chunk */
|
||||
chk_io_info = &cpt_io_info;
|
||||
@@ -2629,6 +2632,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize
|
||||
hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
|
||||
uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
+ size_t chunk_size;
|
||||
|
||||
FUNC_ENTER_STATIC
|
||||
|
||||
@@ -2699,11 +2703,12 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize
|
||||
entire_chunk = FALSE;
|
||||
|
||||
/* Lock the chunk into the cache */
|
||||
- if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
|
||||
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE, &chunk_size)))
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
|
||||
|
||||
/* Set up the storage buffer information for this chunk */
|
||||
cpt_store.compact.buf = chunk;
|
||||
+ cpt_store.compact.size = chunk_size;
|
||||
|
||||
/* Point I/O info at main I/O info for this chunk */
|
||||
chk_io_info = &cpt_io_info;
|
||||
@@ -3714,7 +3719,7 @@ done:
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static void *
|
||||
-H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk)
|
||||
+H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk, size_t *ret_size)
|
||||
{
|
||||
const H5D_t * dset = io_info->dset; /* Local pointer to the dataset info */
|
||||
const H5O_pline_t *pline =
|
||||
@@ -3731,6 +3736,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
hbool_t disable_filters = FALSE; /* Whether to disable filters (when adding to cache) */
|
||||
void * chunk = NULL; /*the file chunk */
|
||||
void * ret_value = NULL; /* Return value */
|
||||
+ size_t chunk_size_ret = 0;
|
||||
|
||||
FUNC_ENTER_STATIC
|
||||
|
||||
@@ -3796,6 +3802,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline);
|
||||
ent->chunk = (uint8_t *)chunk;
|
||||
chunk = NULL;
|
||||
+ ent->size = chunk_size;
|
||||
|
||||
/* Mark the chunk as having filters disabled as well as "newly
|
||||
* disabled" so it is inserted on flush */
|
||||
@@ -3823,6 +3830,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline);
|
||||
ent->chunk = (uint8_t *)chunk;
|
||||
chunk = NULL;
|
||||
+ ent->size = chunk_size;
|
||||
|
||||
/* Mark the chunk as having filters enabled */
|
||||
ent->edge_chunk_state &= ~(H5D_RDCC_DISABLE_FILTERS | H5D_RDCC_NEWLY_DISABLED_FILTERS);
|
||||
@@ -3902,6 +3910,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
/* In the case that some dataset functions look through this data,
|
||||
* clear it to all 0s. */
|
||||
HDmemset(chunk, 0, chunk_size);
|
||||
+ chunk_size_ret = chunk_size;
|
||||
} /* end if */
|
||||
else {
|
||||
/*
|
||||
@@ -3924,6 +3933,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
my_chunk_alloc, chunk) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
|
||||
|
||||
+ chunk_size_ret = my_chunk_alloc;
|
||||
if (old_pline && old_pline->nused) {
|
||||
H5Z_EDC_t err_detect; /* Error detection info */
|
||||
H5Z_cb_t filter_cb; /* I/O filter callback function */
|
||||
@@ -3937,6 +3947,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
if (H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), err_detect,
|
||||
filter_cb, &my_chunk_alloc, &buf_alloc, &chunk) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, NULL, "data pipeline read failed")
|
||||
+ chunk_size_ret = buf_alloc;
|
||||
|
||||
/* Reallocate chunk if necessary */
|
||||
if (udata->new_unfilt_chunk) {
|
||||
@@ -3947,6 +3958,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
|
||||
"memory allocation failed for raw data chunk")
|
||||
} /* end if */
|
||||
+ chunk_size_ret = my_chunk_alloc;
|
||||
H5MM_memcpy(chunk, tmp_chunk, chunk_size);
|
||||
(void)H5D__chunk_mem_xfree(tmp_chunk, old_pline);
|
||||
} /* end if */
|
||||
@@ -3967,6 +3979,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
|
||||
"memory allocation failed for raw data chunk")
|
||||
|
||||
+ chunk_size_ret = chunk_size;
|
||||
if (H5P_is_fill_value_defined(fill, &fill_status) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined")
|
||||
|
||||
@@ -4032,6 +4045,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t);
|
||||
H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t);
|
||||
ent->chunk = (uint8_t *)chunk;
|
||||
+ ent->size = chunk_size_ret;
|
||||
|
||||
/* Add it to the cache */
|
||||
HDassert(NULL == rdcc->slot[udata->idx_hint]);
|
||||
@@ -4065,6 +4079,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
HDassert(!ent->locked);
|
||||
ent->locked = TRUE;
|
||||
chunk = ent->chunk;
|
||||
+ chunk_size_ret = ent->size;
|
||||
} /* end if */
|
||||
else
|
||||
/*
|
||||
@@ -4076,6 +4091,8 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
|
||||
|
||||
/* Set return value */
|
||||
ret_value = chunk;
|
||||
+ if (ret_size != NULL)
|
||||
+ *ret_size = chunk_size_ret;
|
||||
|
||||
done:
|
||||
/* Release the fill buffer info, if it's been initialized */
|
||||
@@ -4084,8 +4101,11 @@ done:
|
||||
|
||||
/* Release the chunk allocated, on error */
|
||||
if (!ret_value)
|
||||
- if (chunk)
|
||||
+ if (chunk) {
|
||||
chunk = H5D__chunk_mem_xfree(chunk, pline);
|
||||
+ if (ret_size != NULL)
|
||||
+ *ret_size = 0;
|
||||
+ }
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* end H5D__chunk_lock() */
|
||||
@@ -4884,7 +4904,7 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[])
|
||||
if (H5F_addr_defined(chk_udata.chunk_block.offset) || (UINT_MAX != chk_udata.idx_hint)) {
|
||||
/* Lock the chunk into cache. H5D__chunk_lock will take care of
|
||||
* updating the chunk to no longer be an edge chunk. */
|
||||
- if (NULL == (chunk = (void *)H5D__chunk_lock(&chk_io_info, &chk_udata, FALSE, TRUE)))
|
||||
+ if (NULL == (chunk = (void *)H5D__chunk_lock(&chk_io_info, &chk_udata, FALSE, TRUE, NULL)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
|
||||
|
||||
/* Unlock the chunk */
|
||||
@@ -5274,7 +5294,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to select hyperslab")
|
||||
|
||||
/* Lock the chunk into the cache, to get a pointer to the chunk buffer */
|
||||
- if (NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE, FALSE)))
|
||||
+ if (NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE, FALSE, NULL)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
|
||||
|
||||
/* Fill the selection in the memory buffer */
|
||||
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
|
||||
index b78693660d..21c37e8a08 100644
|
||||
--- a/src/H5Dcompact.c
|
||||
+++ b/src/H5Dcompact.c
|
||||
@@ -245,6 +245,7 @@ H5D__compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR
|
||||
FUNC_ENTER_STATIC_NOERR
|
||||
|
||||
io_info->store->compact.buf = io_info->dset->shared->layout.storage.u.compact.buf;
|
||||
+ io_info->store->compact.size = io_info->dset->shared->layout.storage.u.compact.size;
|
||||
io_info->store->compact.dirty = &io_info->dset->shared->layout.storage.u.compact.dirty;
|
||||
|
||||
FUNC_LEAVE_NOAPI(SUCCEED)
|
||||
@@ -278,6 +279,8 @@ H5D__compact_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *
|
||||
FUNC_ENTER_STATIC
|
||||
|
||||
HDassert(io_info);
|
||||
+ if (io_info->store->compact.size < *(dset_offset_arr + dset_max_nseq - 1) + *(dset_size_arr + dset_max_nseq - 1))
|
||||
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "source size less than requested data")
|
||||
|
||||
/* Use the vectorized memory copy routine to do actual work */
|
||||
if ((ret_value = H5VM_memcpyvv(io_info->u.rbuf, mem_max_nseq, mem_curr_seq, mem_size_arr, mem_offset_arr,
|
||||
@@ -320,6 +323,8 @@ H5D__compact_writevv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t
|
||||
FUNC_ENTER_STATIC
|
||||
|
||||
HDassert(io_info);
|
||||
+ if (io_info->store->compact.size < *(dset_offset_arr + dset_max_nseq - 1) + *(dset_size_arr + dset_max_nseq - 1))
|
||||
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "source size less than requested data")
|
||||
|
||||
/* Use the vectorized memory copy routine to do actual work */
|
||||
if ((ret_value = H5VM_memcpyvv(io_info->store->compact.buf, dset_max_nseq, dset_curr_seq, dset_size_arr,
|
||||
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
|
||||
index 64692c5d1d..8a4acd62e3 100644
|
||||
--- a/src/H5Dpkg.h
|
||||
+++ b/src/H5Dpkg.h
|
||||
@@ -196,6 +196,7 @@ typedef struct {
|
||||
typedef struct {
|
||||
void * buf; /* Buffer for compact dataset */
|
||||
hbool_t *dirty; /* Pointer to dirty flag to mark */
|
||||
+ size_t size; /* Buffer size for compact dataset */
|
||||
} H5D_compact_storage_t;
|
||||
|
||||
typedef union H5D_storage_t {
|
28
Remove-duplicate-code.patch
Normal file
28
Remove-duplicate-code.patch
Normal file
@ -0,0 +1,28 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Sat Feb 11 18:08:15 2023 +0100
|
||||
Subject: Remove duplicate code
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: https://github.com/HDFGroup/hdf5
|
||||
Git-commit: 539bca81e2b5713b1c6c5723d742377fb92c1ac1
|
||||
References:
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Oattr.c | 4 ----
|
||||
1 file changed, 4 deletions(-)
|
||||
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
|
||||
index 3ef0b99aa4..19d3abfb4c 100644
|
||||
--- a/src/H5Oattr.c
|
||||
+++ b/src/H5Oattr.c
|
||||
@@ -222,10 +222,6 @@ H5O_attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, un
|
||||
else
|
||||
p += attr->shared->ds_size;
|
||||
|
||||
- /* Get the datatype's size */
|
||||
- if (0 == (dt_size = H5T_get_size(attr->shared->dt)))
|
||||
- HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, NULL, "unable to get datatype size")
|
||||
-
|
||||
/* Get the datatype & dataspace sizes */
|
||||
if (0 == (dt_size = H5T_get_size(attr->shared->dt)))
|
||||
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, NULL, "unable to get datatype size")
|
@ -0,0 +1,48 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Wed Sep 28 19:11:16 2022 +0200
|
||||
Subject: Report error if dimensions of chunked storage in data layout < 2
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 34b621424504265cff3c33cf634a70efb52db180
|
||||
References:
|
||||
|
||||
For Data Layout Messages version 1 & 2 the specification state
|
||||
that the value stored in the data field is 1 greater than the
|
||||
number of dimensions in the dataspace. For version 3 this is
|
||||
not explicitly stated but the implementation suggests it to be
|
||||
the case.
|
||||
Thus the set value needs to be at least 2. For dimensionality
|
||||
< 2 an out-of-bounds access occurs as in CVE-2021-45833.
|
||||
|
||||
This fixes CVE-2021-45833.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Olayout.c | 7 +++++++
|
||||
1 file changed, 7 insertions(+)
|
||||
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
|
||||
index c939e72744..9fa9e36e8c 100644
|
||||
--- a/src/H5Olayout.c
|
||||
+++ b/src/H5Olayout.c
|
||||
@@ -168,6 +168,10 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
|
||||
p += ndims * 4; /* Skip over dimension sizes (32-bit quantities) */
|
||||
} /* end if */
|
||||
else {
|
||||
+ if (ndims < 2)
|
||||
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL,
|
||||
+ "bad dimensions for chunked storage")
|
||||
+
|
||||
mesg->u.chunk.ndims = ndims;
|
||||
for (u = 0; u < ndims; u++)
|
||||
UINT32DECODE(p, mesg->u.chunk.dim[u]);
|
||||
@@ -241,6 +245,9 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
|
||||
mesg->u.chunk.ndims = *p++;
|
||||
if (mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large")
|
||||
+ if (mesg->u.chunk.ndims < 2)
|
||||
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL,
|
||||
+ "bad dimensions for chunked storage")
|
||||
|
||||
/* B-tree address */
|
||||
H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr));
|
@ -0,0 +1,96 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Mon Oct 10 08:43:44 2022 +0200
|
||||
Subject: Validate location (offset) of the accumulated metadata when comparing
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 2cf9918ae66f023a2b6d44eb591ee2ac479a6e53
|
||||
References:
|
||||
|
||||
Initially, the accumulated metadata location is initialized to HADDR_UNDEF
|
||||
- the highest available address. Bogus input files may provide a location
|
||||
or size matching this value. Comparing this address against such bogus
|
||||
values may provide false positives. This make sure, the value has been
|
||||
initilized or fail the comparison early and let other parts of the
|
||||
code deal with the bogus address/size.
|
||||
Note: To avoid unnecessary checks, we have assumed that if the 'dirty'
|
||||
member in the same structure is true the location is valid.
|
||||
|
||||
This fixes CVE-2018-13867.
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Faccum.c | 19 +++++++++++++------
|
||||
1 file changed, 13 insertions(+), 6 deletions(-)
|
||||
diff --git a/src/H5Faccum.c b/src/H5Faccum.c
|
||||
index aed5812e63..73bd4b811e 100644
|
||||
--- a/src/H5Faccum.c
|
||||
+++ b/src/H5Faccum.c
|
||||
@@ -48,6 +48,7 @@
|
||||
#define H5F_ACCUM_THROTTLE 8
|
||||
#define H5F_ACCUM_THRESHOLD 2048
|
||||
#define H5F_ACCUM_MAX_SIZE (1024 * 1024) /* Max. accum. buf size (max. I/Os will be 1/2 this size) */
|
||||
+#define H5F_LOC_VALID(x) (x != HADDR_UNDEF)
|
||||
|
||||
/******************/
|
||||
/* Local Typedefs */
|
||||
@@ -126,8 +127,9 @@ H5F__accum_read(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t si
|
||||
HDassert(!accum->buf || (accum->alloc_size >= accum->size));
|
||||
|
||||
/* Current read adjoins or overlaps with metadata accumulator */
|
||||
- if (H5F_addr_overlap(addr, size, accum->loc, accum->size) || ((addr + size) == accum->loc) ||
|
||||
- (accum->loc + accum->size) == addr) {
|
||||
+ if (H5F_LOC_VALID(accum->loc) &&
|
||||
+ (H5F_addr_overlap(addr, size, accum->loc, accum->size) || ((addr + size) == accum->loc) ||
|
||||
+ (accum->loc + accum->size) == addr)) {
|
||||
size_t amount_before; /* Amount to read before current accumulator */
|
||||
haddr_t new_addr; /* New address of the accumulator buffer */
|
||||
size_t new_size; /* New size of the accumulator buffer */
|
||||
@@ -439,7 +441,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s
|
||||
/* Check if there is already metadata in the accumulator */
|
||||
if (accum->size > 0) {
|
||||
/* Check if the new metadata adjoins the beginning of the current accumulator */
|
||||
- if ((addr + size) == accum->loc) {
|
||||
+ if (H5F_LOC_VALID(accum->loc)
|
||||
+ && (addr + size) == accum->loc) {
|
||||
/* Check if we need to adjust accumulator size */
|
||||
if (H5F__accum_adjust(accum, file, H5F_ACCUM_PREPEND, size) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
|
||||
@@ -464,7 +467,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s
|
||||
accum->dirty_off = 0;
|
||||
} /* end if */
|
||||
/* Check if the new metadata adjoins the end of the current accumulator */
|
||||
- else if (addr == (accum->loc + accum->size)) {
|
||||
+ else if (H5F_LOC_VALID(accum->loc) &&
|
||||
+ addr == (accum->loc + accum->size)) {
|
||||
/* Check if we need to adjust accumulator size */
|
||||
if (H5F__accum_adjust(accum, file, H5F_ACCUM_APPEND, size) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
|
||||
@@ -485,7 +489,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s
|
||||
accum->size += size;
|
||||
} /* end if */
|
||||
/* Check if the piece of metadata being written overlaps the metadata accumulator */
|
||||
- else if (H5F_addr_overlap(addr, size, accum->loc, accum->size)) {
|
||||
+ else if (H5F_LOC_VALID(accum->loc) &&
|
||||
+ H5F_addr_overlap(addr, size, accum->loc, accum->size)) {
|
||||
size_t add_size; /* New size of the accumulator buffer */
|
||||
|
||||
/* Check if the new metadata is entirely within the current accumulator */
|
||||
@@ -745,7 +750,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s
|
||||
/* (Note that this could be improved by updating the accumulator
|
||||
* with [some of] the information just read in. -QAK)
|
||||
*/
|
||||
- if (H5F_addr_overlap(addr, size, accum->loc, accum->size)) {
|
||||
+ if (H5F_LOC_VALID(accum->loc) &&
|
||||
+ H5F_addr_overlap(addr, size, accum->loc, accum->size)) {
|
||||
/* Check for write starting before beginning of accumulator */
|
||||
if (H5F_addr_le(addr, accum->loc)) {
|
||||
/* Check for write ending within accumulator */
|
||||
@@ -868,6 +874,7 @@ H5F__accum_free(H5F_shared_t *f_sh, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr
|
||||
|
||||
/* Adjust the metadata accumulator to remove the freed block, if it overlaps */
|
||||
if ((f_sh->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) &&
|
||||
+ H5F_LOC_VALID(accum->loc) &&
|
||||
H5F_addr_overlap(addr, size, accum->loc, accum->size)) {
|
||||
size_t overlap_size; /* Size of overlap with accumulator */
|
||||
|
@ -0,0 +1,31 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Thu Sep 29 13:47:30 2022 +0200
|
||||
Subject: When evicting driver info block, NULL the corresponding entry
|
||||
Patch-mainline: Not yet
|
||||
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
|
||||
Git-commit: 6d5496f17ed5aa65cbb0498e0bf70b0d599dc336
|
||||
References:
|
||||
|
||||
This prevents it from another attempt to unpin it in H5F__dest() which may
|
||||
happen due to malformed hdf5 files which leads to a segfault.
|
||||
|
||||
This fixes CVE-2021-46242
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5Fsuper.c | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
|
||||
index 60b045ae29..1283790c57 100644
|
||||
--- a/src/H5Fsuper.c
|
||||
+++ b/src/H5Fsuper.c
|
||||
@@ -1044,6 +1044,8 @@ done:
|
||||
/* Evict the driver info block from the cache */
|
||||
if (sblock && H5AC_expunge_entry(f, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
|
||||
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block")
|
||||
+
|
||||
+ f->shared->drvinfo = NULL;
|
||||
} /* end if */
|
||||
|
||||
/* Unpin & discard superblock */
|
10
_constraints
Normal file
10
_constraints
Normal file
@ -0,0 +1,10 @@
|
||||
<constraints>
|
||||
<hardware>
|
||||
<physicalmemory>
|
||||
<size unit="M">4000</size>
|
||||
</physicalmemory>
|
||||
<disk>
|
||||
<size unit="G">6</size>
|
||||
</disk>
|
||||
</hardware>
|
||||
</constraints>
|
9
_multibuild
Normal file
9
_multibuild
Normal file
@ -0,0 +1,9 @@
|
||||
<multibuild>
|
||||
<package>serial</package>
|
||||
<package>openmpi4</package>
|
||||
<package>mvapich2</package>
|
||||
<package>gnu-openmpi4-hpc</package>
|
||||
<package>gnu-mvapich2-hpc</package>
|
||||
<package>gnu-mpich-hpc</package>
|
||||
<package>gnu-hpc</package>
|
||||
</multibuild>
|
BIN
hdf5-1.12.2.tar.bz2
(Stored with Git LFS)
Normal file
BIN
hdf5-1.12.2.tar.bz2
(Stored with Git LFS)
Normal file
Binary file not shown.
12
hdf5-1.8.10-tests-arm.patch
Normal file
12
hdf5-1.8.10-tests-arm.patch
Normal file
@ -0,0 +1,12 @@
|
||||
Index: hdf5-1.12.2/test/testhdf5.c
|
||||
===================================================================
|
||||
--- hdf5-1.12.2.orig/test/testhdf5.c
|
||||
+++ hdf5-1.12.2/test/testhdf5.c
|
||||
@@ -53,7 +53,6 @@ main(int argc, char *argv[])
|
||||
AddTest("coords", test_coords, cleanup_coords, "Dataspace coordinates", NULL);
|
||||
AddTest("sohm", test_sohm, cleanup_sohm, "Shared Object Header Messages", NULL);
|
||||
AddTest("attr", test_attr, cleanup_attr, "Attributes", NULL);
|
||||
- AddTest("select", test_select, cleanup_select, "Selections", NULL);
|
||||
AddTest("time", test_time, cleanup_time, "Time Datatypes", NULL);
|
||||
AddTest("ref_deprec", test_reference_deprec, cleanup_reference_deprec, "Deprecated References", NULL);
|
||||
AddTest("ref", test_reference, cleanup_reference, "References", NULL);
|
26
hdf5-1.8.11-abort_unknown_host_config.patch
Normal file
26
hdf5-1.8.11-abort_unknown_host_config.patch
Normal file
@ -0,0 +1,26 @@
|
||||
Index: configure
|
||||
===================================================================
|
||||
--- configure.orig
|
||||
+++ configure
|
||||
@@ -4343,6 +4343,8 @@ done
|
||||
if test "X$host_config" != "Xnone"; then
|
||||
CC_BASENAME="`echo $CC | cut -f1 -d' ' | xargs basename 2>/dev/null`"
|
||||
. $host_config
|
||||
+else
|
||||
+ as_fn_error $? "no usable host config found" "$LINENO" 5
|
||||
fi
|
||||
|
||||
## Source any special site-specific file
|
||||
Index: configure.ac
|
||||
===================================================================
|
||||
--- configure.ac.orig
|
||||
+++ configure.ac
|
||||
@@ -330,6 +330,8 @@ done
|
||||
if test "X$host_config" != "Xnone"; then
|
||||
CC_BASENAME="`echo $CC | cut -f1 -d' ' | xargs basename 2>/dev/null`"
|
||||
. $host_config
|
||||
+else
|
||||
+ AC_MSG_ERROR([no usable host config found])
|
||||
fi
|
||||
|
||||
## Source any special site-specific file
|
22
hdf5-LD_LIBRARY_PATH.patch
Normal file
22
hdf5-LD_LIBRARY_PATH.patch
Normal file
@ -0,0 +1,22 @@
|
||||
Index: hdf5-1.12.2/src/Makefile.in
|
||||
===================================================================
|
||||
--- hdf5-1.12.2.orig/src/Makefile.in
|
||||
+++ hdf5-1.12.2/src/Makefile.in
|
||||
@@ -1998,8 +1998,6 @@ help:
|
||||
# Remove the generated .c file if errors occur unless HDF5_Make_Ignore
|
||||
# is set to ignore the error.
|
||||
H5Tinit.c: H5detect$(EXEEXT)
|
||||
- LD_LIBRARY_PATH="$$LD_LIBRARY_PATH`echo $(LDFLAGS) | \
|
||||
- sed -e 's/-L/:/g' -e 's/ //g'`" \
|
||||
$(RUNSERIAL) ./H5detect$(EXEEXT) $@ || \
|
||||
(test $$HDF5_Make_Ignore && echo "*** Error ignored") || \
|
||||
($(RM) $@ ; exit 1)
|
||||
@@ -2010,8 +2008,6 @@ H5Tinit.c: H5detect$(EXEEXT)
|
||||
# Remove the generated .c file if errors occur unless HDF5_Make_Ignore
|
||||
# is set to ignore the error.
|
||||
H5lib_settings.c: H5make_libsettings$(EXEEXT) libhdf5.settings
|
||||
- LD_LIBRARY_PATH="$$LD_LIBRARY_PATH`echo $(LDFLAGS) | \
|
||||
- sed -e 's/-L/:/g' -e 's/ //g'`" \
|
||||
$(RUNSERIAL) ./H5make_libsettings$(EXEEXT) $@ || \
|
||||
(test $$HDF5_Make_Ignore && echo "*** Error ignored") || \
|
||||
($(RM) $@ ; exit 1)
|
39
hdf5-Remove-timestamps-from-binaries.patch
Normal file
39
hdf5-Remove-timestamps-from-binaries.patch
Normal file
@ -0,0 +1,39 @@
|
||||
From: Egbert Eich <eich@suse.com>
|
||||
Date: Sat Nov 17 18:15:13 2018 +0100
|
||||
Subject: hdf5: Remove timestamps from binaries
|
||||
Patch-mainline: never
|
||||
Git-commit: 3b88045491c0b43f385edce47e3aae07660cd9f3
|
||||
References:
|
||||
|
||||
Signed-off-by: Egbert Eich <eich@suse.com>
|
||||
Signed-off-by: Egbert Eich <eich@suse.de>
|
||||
---
|
||||
src/H5detect.c | 15 ---------------
|
||||
1 file changed, 15 deletions(-)
|
||||
Index: hdf5-1.12.2/src/H5detect.c
|
||||
===================================================================
|
||||
--- hdf5-1.12.2.orig/src/H5detect.c
|
||||
+++ hdf5-1.12.2/src/H5detect.c
|
||||
@@ -1224,22 +1224,6 @@ bit.\n";
|
||||
fprintf(rawoutstream, "/* Generated automatically by H5detect -- do not edit */\n\n\n");
|
||||
HDfputs(FileHeader, rawoutstream); /*the copyright notice--see top of this file */
|
||||
|
||||
- fprintf(rawoutstream, " *\n * Created:\t\t%s %2d, %4d\n", month_name[tm->tm_mon], tm->tm_mday,
|
||||
- 1900 + tm->tm_year);
|
||||
- if (pwd || real_name[0] || host_name[0]) {
|
||||
- fprintf(rawoutstream, " *\t\t\t");
|
||||
- if (real_name[0])
|
||||
- fprintf(rawoutstream, "%s <", real_name);
|
||||
-#ifdef H5_HAVE_GETPWUID
|
||||
- if (pwd)
|
||||
- HDfputs(pwd->pw_name, rawoutstream);
|
||||
-#endif
|
||||
- if (host_name[0])
|
||||
- fprintf(rawoutstream, "@%s", host_name);
|
||||
- if (real_name[0])
|
||||
- fprintf(rawoutstream, ">");
|
||||
- HDfputc('\n', rawoutstream);
|
||||
- }
|
||||
fprintf(rawoutstream, " *\n * Purpose:\t\t");
|
||||
for (s = purpose; *s; s++) {
|
||||
HDfputc(*s, rawoutstream);
|
30
hdf5-mpi.patch
Normal file
30
hdf5-mpi.patch
Normal file
@ -0,0 +1,30 @@
|
||||
diff -up hdf5-1.8.16/testpar/t_pflush1.c.mpi hdf5-1.8.16/testpar/t_pflush1.c
|
||||
--- hdf5-1.8.16/testpar/t_pflush1.c.mpi 2015-10-23 23:13:44.000000000 -0600
|
||||
+++ hdf5-1.8.16/testpar/t_pflush1.c 2016-03-20 21:46:42.089409776 -0600
|
||||
@@ -171,6 +171,7 @@ main(int argc, char* argv[])
|
||||
* because MPI_File_close wants to modify the file-handle variable.
|
||||
*/
|
||||
|
||||
+#if 0
|
||||
/* close file1 */
|
||||
if(H5Fget_vfd_handle(file1, fapl, (void **)&mpifh_p) < 0) {
|
||||
printf("H5Fget_vfd_handle for file1 failed\n");
|
||||
@@ -189,14 +190,17 @@ main(int argc, char* argv[])
|
||||
printf("MPI_File_close for file2 failed\n");
|
||||
goto error;
|
||||
} /* end if */
|
||||
+#endif
|
||||
|
||||
fflush(stdout);
|
||||
fflush(stderr);
|
||||
+ MPI_Finalize();
|
||||
HD_exit(0);
|
||||
|
||||
error:
|
||||
fflush(stdout);
|
||||
fflush(stderr);
|
||||
+ MPI_Finalize();
|
||||
HD_exit(1);
|
||||
}
|
||||
|
||||
|
109
hdf5-wrappers.patch
Normal file
109
hdf5-wrappers.patch
Normal file
@ -0,0 +1,109 @@
|
||||
diff -Naur hdf5-1.10.8.orig/bin/h5cc.in hdf5-1.10.8/bin/h5cc.in
|
||||
--- hdf5-1.10.8.orig/bin/h5cc.in 2022-04-07 18:23:46.000000000 -0600
|
||||
+++ hdf5-1.10.8/bin/h5cc.in 2022-04-07 18:24:04.000000000 -0600
|
||||
@@ -89,10 +89,10 @@
|
||||
# paths and libraries from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in
|
||||
# from the hdf5 build. The order of the flags is intended to give precedence
|
||||
# to the user's flags.
|
||||
-H5BLD_CFLAGS="@AM_CFLAGS@ @CFLAGS@"
|
||||
+H5BLD_CFLAGS=
|
||||
H5BLD_CPPFLAGS="@AM_CPPFLAGS@ @CPPFLAGS@"
|
||||
-H5BLD_LDFLAGS="@AM_LDFLAGS@ @LDFLAGS@"
|
||||
-H5BLD_LIBS="@LIBS@"
|
||||
+H5BLD_LDFLAGS=
|
||||
+H5BLD_LIBS=
|
||||
|
||||
CC="${HDF5_CC:-$CCBASE}"
|
||||
CLINKER="${HDF5_CLINKER:-$CLINKERBASE}"
|
||||
@@ -105,7 +105,8 @@
|
||||
# available library is shared, it will be used by default. The user can
|
||||
# override either default, although choosing an unavailable library will result
|
||||
# in link errors.
|
||||
-STATIC_AVAILABLE="@enable_static@"
|
||||
+# openSUSE prefers shared libraries
|
||||
+STATIC_AVAILABLE=no
|
||||
if test "${STATIC_AVAILABLE}" = "yes"; then
|
||||
USE_SHARED_LIB="${HDF5_USE_SHLIB:-no}"
|
||||
else
|
||||
@@ -385,7 +386,7 @@
|
||||
# paths and libraries from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in
|
||||
# from the hdf5 build. The order of the flags is intended to give precedence
|
||||
# to the user's flags.
|
||||
- $SHOW $CLINKER $H5BLD_CPPFLAGS $CPPFLAGS $H5BLD_CFLAGS $CFLAGS $LDFLAGS $clibpath $link_objs $LIBS $link_args $shared_link
|
||||
+ $SHOW $CLINKER $H5BLD_CPPFLAGS $CPPFLAGS $H5BLD_CFLAGS $CFLAGS $LDFLAGS $clibpath $link_objs $LIBS $link_args
|
||||
status=$?
|
||||
fi
|
||||
|
||||
diff -Naur hdf5-1.10.8.orig/c++/src/h5c++.in hdf5-1.10.8/c++/src/h5c++.in
|
||||
--- hdf5-1.10.8.orig/c++/src/h5c++.in 2022-04-07 18:23:45.000000000 -0600
|
||||
+++ hdf5-1.10.8/c++/src/h5c++.in 2022-04-07 18:24:04.000000000 -0600
|
||||
@@ -87,10 +87,10 @@
|
||||
# paths and libraries from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in
|
||||
# from the hdf5 build. The order of the flags is intended to give precedence
|
||||
# to the user's flags.
|
||||
-H5BLD_CXXFLAGS="@AM_CXXFLAGS@ @CXXFLAGS@"
|
||||
+H5BLD_CXXFLAGS=
|
||||
H5BLD_CPPFLAGS="@AM_CPPFLAGS@ @CPPFLAGS@"
|
||||
-H5BLD_LDFLAGS="@AM_LDFLAGS@ @LDFLAGS@"
|
||||
-H5BLD_LIBS="@LIBS@"
|
||||
+H5BLD_LDFLAGS=
|
||||
+H5BLD_LIBS=
|
||||
|
||||
CXX="${HDF5_CXX:-$CXXBASE}"
|
||||
CXXLINKER="${HDF5_CXXLINKER:-$CXXLINKERBASE}"
|
||||
@@ -103,7 +103,8 @@
|
||||
# available library is shared, it will be used by default. The user can
|
||||
# override either default, although choosing an unavailable library will result
|
||||
# in link errors.
|
||||
-STATIC_AVAILABLE="@enable_static@"
|
||||
+# openSUSE prefers shared libraries
|
||||
+STATIC_AVAILABLE=no
|
||||
if test "${STATIC_AVAILABLE}" = "yes"; then
|
||||
USE_SHARED_LIB="${HDF5_USE_SHLIB:-no}"
|
||||
else
|
||||
@@ -385,7 +386,7 @@
|
||||
# from the hdf5 build. The order of the flags is intended to give precedence
|
||||
# to the user's flags.
|
||||
|
||||
- $SHOW $CXXLINKER $H5BLD_CPPFLAGS $CPPFLAGS $H5BLD_CXXFLAGS $CXXFLAGS $LDFLAGS $clibpath $link_objs $LIBS $link_args $shared_link
|
||||
+ $SHOW $CXXLINKER $H5BLD_CPPFLAGS $CPPFLAGS $H5BLD_CXXFLAGS $CXXFLAGS $LDFLAGS $clibpath $link_objs $LIBS $link_args
|
||||
|
||||
status=$?
|
||||
fi
|
||||
diff -Naur hdf5-1.10.8.orig/fortran/src/h5fc.in hdf5-1.10.8/fortran/src/h5fc.in
|
||||
--- hdf5-1.10.8.orig/fortran/src/h5fc.in 2022-04-07 18:23:46.000000000 -0600
|
||||
+++ hdf5-1.10.8/fortran/src/h5fc.in 2022-04-07 18:24:04.000000000 -0600
|
||||
@@ -83,11 +83,11 @@
|
||||
# libraries in $link_args, followed by any external library paths and libraries
|
||||
# from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in from the hdf5 build.
|
||||
# The order of the flags is intended to give precedence to the user's flags.
|
||||
-H5BLD_FCFLAGS="@AM_FCFLAGS@ @FCFLAGS@"
|
||||
+H5BLD_FCFLAGS=
|
||||
F9XMODFLAG="@F9XMODFLAG@"
|
||||
F9XSUFFIXFLAG="@F9XSUFFIXFLAG@"
|
||||
-H5BLD_LDFLAGS="@AM_LDFLAGS@ @LDFLAGS@"
|
||||
-H5BLD_LIBS="@LIBS@"
|
||||
+H5BLD_LDFLAGS=
|
||||
+H5BLD_LIBS=
|
||||
|
||||
FC="${HDF5_FC:-$FCBASE}"
|
||||
FLINKER="${HDF5_FLINKER:-$FLINKERBASE}"
|
||||
@@ -99,7 +99,8 @@
|
||||
# available library is shared, it will be used by default. The user can
|
||||
# override either default, although choosing an unavailable library will result
|
||||
# in link errors.
|
||||
-STATIC_AVAILABLE="@enable_static@"
|
||||
+# openSUSE prefers shared libraries
|
||||
+STATIC_AVAILABLE=no
|
||||
if test "${STATIC_AVAILABLE}" = "yes"; then
|
||||
USE_SHARED_LIB="${HDF5_USE_SHLIB:-no}"
|
||||
else
|
||||
@@ -363,7 +364,7 @@
|
||||
# libraries in $link_args, followed by any external library paths and libraries
|
||||
# from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in from the hdf5 build.
|
||||
# The order of the flags is intended to give precedence to the user's flags.
|
||||
- $SHOW $FLINKER $FCFLAGS $H5BLD_FCFLAGS $F9XSUFFIXFLAG $LDFLAGS $fmodules $link_objs $LIBS $link_args $shared_link
|
||||
+ $SHOW $FLINKER $FCFLAGS $H5BLD_FCFLAGS $F9XSUFFIXFLAG $LDFLAGS $fmodules $link_objs $LIBS $link_args
|
||||
status=$?
|
||||
fi
|
||||
|
1302
hdf5.changes
Normal file
1302
hdf5.changes
Normal file
File diff suppressed because it is too large
Load Diff
945
hdf5.spec
Normal file
945
hdf5.spec
Normal file
@ -0,0 +1,945 @@
|
||||
#
|
||||
# spec file
|
||||
#
|
||||
# Copyright (c) 2023 SUSE LLC
|
||||
#
|
||||
# All modifications and additions to the file contributed by third parties
|
||||
# remain the property of their copyright owners, unless otherwise agreed
|
||||
# upon. The license for this file, and modifications and additions to the
|
||||
# file, is the same license as for the pristine package itself (unless the
|
||||
# license for the pristine package is not an Open Source License, in which
|
||||
# case the license is the MIT License). An "Open Source License" is a
|
||||
# license that conforms to the Open Source Definition (Version 1.9)
|
||||
# published by the Open Source Initiative.
|
||||
|
||||
# Please submit bugfixes or comments via https://bugs.opensuse.org/
|
||||
#
|
||||
|
||||
|
||||
%global flavor @BUILD_FLAVOR@%{nil}
|
||||
|
||||
%if 0%{?sle_version} >= 150200
|
||||
%define DisOMPI1 ExclusiveArch: do_not_build
|
||||
%endif
|
||||
%if !0%{?is_opensuse} && 0%{?sle_version:1} && 0%{?sle_version} < 150200
|
||||
%define DisOMPI3 ExclusiveArch: do_not_build
|
||||
%endif
|
||||
|
||||
%if 0%{?sle_version:1} && 0%{?sle_version} < 150300
|
||||
%define DisOMPI4 ExclusiveArch: do_not_build
|
||||
%endif
|
||||
|
||||
# Disable until resource issue is resolved.
|
||||
%bcond_with check
|
||||
|
||||
%define use_sz2 0
|
||||
|
||||
%define short_ver 1.12
|
||||
%define vers %{short_ver}.2
|
||||
%define _vers %( echo %{vers} | tr '.' '_' )
|
||||
%define src_ver %{version}
|
||||
%define pname hdf5
|
||||
%global _lto_cflags %{_lto_cflags} -ffat-lto-objects
|
||||
|
||||
%if "%{flavor}" == ""
|
||||
ExclusiveArch: do_not_build
|
||||
%define package_name %pname
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "serial"
|
||||
%bcond_with hpc
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "openmpi4"
|
||||
%{?DisOMPI4}
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 4
|
||||
%bcond_with hpc
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "openmpi5"
|
||||
%{?DisOMPI5}
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 5
|
||||
%bcond_with hpc
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "mvapich2"
|
||||
%global mpi_flavor %{flavor}
|
||||
%bcond_with hpc
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu-hpc"
|
||||
%bcond_without hpc
|
||||
%global compiler_family gnu
|
||||
%undefine c_f_ver
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu-openmpi4-hpc"
|
||||
%{?DisOMPI4}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%undefine c_f_ver
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 4
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu-openmpi5-hpc"
|
||||
%{?DisOMPI5}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%undefine c_f_ver
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 5
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu-mvapich2-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%undefine c_f_ver
|
||||
%global mpi_flavor mvapich2
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu-mpich-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%undefine c_f_ver
|
||||
%global mpi_flavor mpich
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu7-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 7
|
||||
%undefine mpi_flavor
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu7-openmpi4-hpc"
|
||||
%{?DisOMPI4}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 7
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 4
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu7-openmpi5-hpc"
|
||||
%{?DisOMPI5}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 7
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 5
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu7-mvapich2-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 7
|
||||
%global mpi_flavor mvapich2
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu7-mpich-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 7
|
||||
%global mpi_flavor mpich
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu8-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 8
|
||||
%undefine mpi_flavor
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu8-openmpi4-hpc"
|
||||
%{?DisOMPI4}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 8
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 4
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu8-openmpi5-hpc"
|
||||
%{?DisOMPI5}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 8
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 5
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu8-mvapich2-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 8
|
||||
%global mpi_flavor mvapich2
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu8-mpich-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 8
|
||||
%global mpi_flavor mpich
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu9-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 9
|
||||
%undefine mpi_flavor
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu9-openmpi4-hpc"
|
||||
%{?DisOMPI4}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 9
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 4
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu9-openmpi5-hpc"
|
||||
%{?DisOMPI5}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 9
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 5
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu9-mvapich2-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 9
|
||||
%global mpi_flavor mvapich2
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu9-mpich-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 9
|
||||
%global mpi_flavor mpich
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu10-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 10
|
||||
%undefine mpi_flavor
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu10-openmpi4-hpc"
|
||||
%{?DisOMPI4}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 10
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 4
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu10-openmpi5-hpc"
|
||||
%{?DisOMPI5}
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 10
|
||||
%global mpi_flavor openmpi
|
||||
%define mpi_vers 5
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu10-mvapich2-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 10
|
||||
%global mpi_flavor mvapich2
|
||||
%endif
|
||||
|
||||
%if "%{flavor}" == "gnu10-mpich-hpc"
|
||||
%bcond_without hpc
|
||||
%define compiler_family gnu
|
||||
%define c_f_ver 10
|
||||
%global mpi_flavor mpich
|
||||
%endif
|
||||
|
||||
%{?mpi_flavor:%{bcond_without mpi}}%{!?mpi_flavor:%{bcond_with mpi}}
|
||||
%{?with_hpc:%{!?compiler_family:%global compiler_family gnu}}
|
||||
%{?with_mpi:%{!?mpi_flavor:error "No MPI family specified!"}}
|
||||
|
||||
# For compatibility package names
|
||||
%define mpi_ext %{?mpi_vers}
|
||||
|
||||
%if %{with hpc}
|
||||
%{hpc_init -c %compiler_family %{?with_mpi:-m %mpi_flavor} %{?c_f_ver:-v %{c_f_ver}} %{?mpi_vers:-V %{mpi_vers}} %{?ext:-e %{ext}}}
|
||||
%{?with_mpi:%global hpc_module_pname p%{pname}}
|
||||
%define my_prefix %hpc_prefix
|
||||
%define my_bindir %hpc_bindir
|
||||
%ifarch x86_64
|
||||
%define my_libdir %hpc_prefix/lib64
|
||||
%else
|
||||
%define my_libdir %hpc_libdir
|
||||
%endif
|
||||
%define my_incdir %hpc_includedir
|
||||
%define package_name %{hpc_package_name %_vers}
|
||||
%define libname(l:s:) lib%{pname}%{-l*}%{hpc_package_name_tail %{?_vers}}
|
||||
%define vname %{pname}_%{_vers}-hpc
|
||||
%else
|
||||
%if %{without mpi}
|
||||
%define my_prefix %_prefix
|
||||
%define my_bindir %_bindir
|
||||
%define my_libdir %_libdir
|
||||
%define my_incdir %_includedir
|
||||
%else
|
||||
%define my_prefix %{_libdir}/mpi/gcc/%{mpi_flavor}%{?mpi_ext}
|
||||
%define my_suffix -%{mpi_flavor}%{?mpi_ext}
|
||||
%define my_bindir %{my_prefix}/bin
|
||||
%define my_libdir %{my_prefix}/%{_lib}/
|
||||
%define my_incdir %{my_prefix}/include/
|
||||
%endif
|
||||