Ana Guerrero 2024-05-15 19:27:01 +00:00 committed by Git OBS Bridge
commit bcd4bd717d
24 changed files with 183 additions and 748 deletions

View File

@ -1,66 +0,0 @@
From: Egbert Eich <eich@suse.com>
Date: Sat Feb 11 13:54:17 2023 +0100
Subject: Check for overflow when calculating on-disk attribute data size (#2459)
Patch-mainline: Not yet
Git-repo: https://github.com/HDFGroup/hdf5
Git-commit: 0d026daa13a81be72495872f651c036fdc84ae5e
References:
A bogus hdf5 file may contain dataspace messages with sizes
which lead to the on-disk data sizes to exceed what is addressable.
When calculating the size, make sure, the multiplication does not
overflow.
The test case was crafted in a way that the overflow caused the
size to be 0.
This fixes CVE-2021-37501 / Bug #2458.
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Oattr.c | 3 +++
src/H5private.h | 18 ++++++++++++++++++
2 files changed, 21 insertions(+)
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
index 4dee7aa187..3ef0b99aa4 100644
--- a/src/H5Oattr.c
+++ b/src/H5Oattr.c
@@ -235,6 +235,9 @@ H5O_attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, un
/* Compute the size of the data */
H5_CHECKED_ASSIGN(attr->shared->data_size, size_t, ds_size * (hsize_t)dt_size, hsize_t);
+ H5_CHECK_MUL_OVERFLOW(attr->shared->data_size, ds_size, dt_size,
+ HGOTO_ERROR(H5E_RESOURCE, H5E_OVERFLOW, NULL,
+ "data size exceeds addressable range"))
/* Go get the data */
if (attr->shared->data_size) {
diff --git a/src/H5private.h b/src/H5private.h
index 931d7b9046..a115aee1a4 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -1605,6 +1605,24 @@ H5_DLL int HDvasprintf(char **bufp, const char *fmt, va_list _ap);
#define H5_CHECK_OVERFLOW(var, vartype, casttype)
#endif /* NDEBUG */
+/*
+ * A macro for checking whether a multiplication has overflown
+ * r is assumed to be the result of a prior multiplication of a and b
+ */
+#define H5_CHECK_MUL_OVERFLOW(r, a, b, err) \
+ { \
+ bool mul_overflow = false; \
+ if (r != 0) { \
+ if (r / a != b) \
+ mul_overflow = true; \
+ } else { \
+ if (a != 0 && b != 0) \
+ mul_overflow = true; \
+ } \
+ if (mul_overflow) \
+ err \
+ }
+
/*
* A macro for detecting over/under-flow when assigning between types
*/

View File

@ -1,51 +0,0 @@
From: Egbert Eich <eich@suse.com>
Date: Wed Oct 5 15:47:54 2022 +0200
Subject: Compound datatypes may not have members of size 0
Patch-mainline: Not yet
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
Git-commit: 88ea94d38fdfecba173dbea18502a5f82a46601b
References:
A member size of 0 may lead to an FPE later on as reported in
CVE-2021-46244. To avoid this, check for this as soon as the
member is decoded.
This should probably be done in H5O_dtype_decode_helper() already,
however it is not clear whether all sizes are expected to be != 0.
This fixes CVE-2021-46244.
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Odtype.c | 6 ++++++
src/H5T.c | 2 ++
2 files changed, 8 insertions(+)
diff --git a/src/H5Odtype.c b/src/H5Odtype.c
index 9af79f4e9a..d35fc65322 100644
--- a/src/H5Odtype.c
+++ b/src/H5Odtype.c
@@ -333,6 +333,12 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t
H5MM_xfree(dt->shared->u.compnd.memb);
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "unable to decode member type")
} /* end if */
+ if (temp_type->shared->size == 0) {
+ for (j = 0; j <= i; j++)
+ H5MM_xfree(dt->shared->u.compnd.memb[j].name);
+ H5MM_xfree(dt->shared->u.compnd.memb);
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "invalid field size in member type")
+ }
/* Upgrade the version if we can and it is necessary */
if (can_upgrade && temp_type->shared->version > version) {
diff --git a/src/H5T.c b/src/H5T.c
index 3bb220ac26..04b96c5676 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -3591,6 +3591,8 @@ H5T__complete_copy(H5T_t *new_dt, const H5T_t *old_dt, H5T_shared_t *reopened_fo
if (new_dt->shared->u.compnd.memb[i].type->shared->size !=
old_dt->shared->u.compnd.memb[old_match].type->shared->size) {
/* Adjust the size of the member */
+ if (old_dt->shared->u.compnd.memb[old_match].size == 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid field size in datatype")
new_dt->shared->u.compnd.memb[i].size =
(old_dt->shared->u.compnd.memb[old_match].size * tmp->shared->size) /
old_dt->shared->u.compnd.memb[old_match].type->shared->size;

View File

@ -8,11 +8,11 @@ References:
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
Index: hdf5-1.12.2/testpar/CMakeTests.cmake
Index: hdf5-1.12.3/testpar/CMakeTests.cmake
===================================================================
--- hdf5-1.12.2.orig/testpar/CMakeTests.cmake
+++ hdf5-1.12.2/testpar/CMakeTests.cmake
@@ -43,7 +43,7 @@ foreach (skiptest ${SKIP_tests})
--- hdf5-1.12.3.orig/testpar/CMakeTests.cmake
+++ hdf5-1.12.3/testpar/CMakeTests.cmake
@@ -49,7 +49,7 @@ foreach (skiptest ${SKIP_tests})
set (SKIP_testphdf5 "${SKIP_testphdf5};-x;${skiptest}")
endforeach ()
@ -21,11 +21,11 @@ Index: hdf5-1.12.2/testpar/CMakeTests.cmake
set_tests_properties (MPI_TEST_testphdf5 PROPERTIES
FIXTURES_REQUIRED par_clear_testphdf5
ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}"
Index: hdf5-1.12.2/testpar/Makefile.am
Index: hdf5-1.12.3/testpar/Makefile.am
===================================================================
--- hdf5-1.12.2.orig/testpar/Makefile.am
+++ hdf5-1.12.2/testpar/Makefile.am
@@ -30,7 +30,7 @@ check_SCRIPTS = $(TEST_SCRIPT_PARA)
--- hdf5-1.12.3.orig/testpar/Makefile.am
+++ hdf5-1.12.3/testpar/Makefile.am
@@ -29,7 +29,7 @@ check_SCRIPTS = $(TEST_SCRIPT_PARA)
# Test programs. These are our main targets.
#

View File

@ -11,11 +11,11 @@ Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Olink.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/H5Olink.c b/src/H5Olink.c
index 51c44a36b0..ee2a413dc1 100644
--- a/src/H5Olink.c
+++ b/src/H5Olink.c
@@ -245,7 +245,7 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE
Index: hdf5-1.12.3/src/H5Olink.c
===================================================================
--- hdf5-1.12.3.orig/src/H5Olink.c
+++ hdf5-1.12.3/src/H5Olink.c
@@ -244,7 +244,7 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR
/* Make sure that length doesn't exceed buffer size, which could
occur when the file is corrupted */
if (p + len > p_end)

View File

@ -1,33 +0,0 @@
From: Egbert Eich <eich@suse.com>
Date: Tue Sep 27 10:29:56 2022 +0200
Subject: H5IMget_image_info: H5Sget_simple_extent_dims() does not exceed array size
Patch-mainline: Not yet
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
Git-commit: c1baab0937c8956a15efc41240f68d573c7b7324
References:
Malformed hdf5 files may provide more dimensions than the array dim[] is
able to hold. Check number of elements first by calling
H5Sget_simple_extent_dims() with NULL for both 'dims' and 'maxdims' arguments.
This will cause the function to return only the number of dimensions.
This fixes CVE-2018-17439
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
hl/src/H5IM.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/hl/src/H5IM.c b/hl/src/H5IM.c
index ff10d573c7..e37c696e25 100644
--- a/hl/src/H5IM.c
+++ b/hl/src/H5IM.c
@@ -283,6 +283,8 @@ H5IMget_image_info(hid_t loc_id, const char *dset_name, hsize_t *width, hsize_t
if ((sid = H5Dget_space(did)) < 0)
goto out;
+ if (H5Sget_simple_extent_dims(sid, NULL, NULL) > IMAGE24_RANK)
+ goto out;
/* Get dimensions */
if (H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
goto out;

View File

@ -1,85 +0,0 @@
From: Egbert Eich <eich@suse.com>
Date: Tue Oct 4 23:09:01 2022 +0200
Subject: H5O__pline_decode() Make more resilient to out-of-bounds read
Patch-mainline: Not yet
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
Git-commit: 35b798ca7542ce45ef016859b8e70d57b7f89cfe
References:
Malformed hdf5 files may have trunkated content which does not match
the expected size. This function attempts to decode these it will read
past the end of the allocated space which may lead to a crash. Make sure
each element is within bounds before reading.
This fixes CVE-2019-8396.
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Opline.c | 17 +++++++++++++++--
src/H5private.h | 3 +++
2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/src/H5Opline.c b/src/H5Opline.c
index ffc4557ffc..a532aa4512 100644
--- a/src/H5Opline.c
+++ b/src/H5Opline.c
@@ -110,6 +110,14 @@ H5FL_DEFINE(H5O_pline_t);
*
*-------------------------------------------------------------------------
*/
+static char err[] = "ran off the end of the buffer: current p = %p, p_end = %p";
+
+#define VERIFY_LIMIT(p,s,l) \
+ if (p + s - 1 > l) { \
+ HCOMMON_ERROR(H5E_RESOURCE, H5E_NOSPACE, err, p + s, l); \
+ HGOTO_DONE(NULL) \
+ };
+
static void *
H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags,
unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p)
@@ -159,6 +167,7 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign
/* Decode filters */
for (i = 0, filter = &pline->filter[0]; i < pline->nused; i++, filter++) {
/* Filter ID */
+ VERIFY_LIMIT(p, 6, p_end) /* 6 bytes minimum */
UINT16DECODE(p, filter->id);
/* Length of filter name */
@@ -168,6 +177,7 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign
UINT16DECODE(p, name_length);
if (pline->version == H5O_PLINE_VERSION_1 && name_length % 8)
HGOTO_ERROR(H5E_PLINE, H5E_CANTLOAD, NULL, "filter name length is not a multiple of eight")
+ VERIFY_LIMIT(p, 4, p_end) /* with name_length 4 bytes to go */
} /* end if */
/* Filter flags */
@@ -179,9 +189,12 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign
/* Filter name, if there is one */
if (name_length) {
size_t actual_name_length; /* Actual length of name */
-
+ size_t len = (size_t)(p_end - p + 1);
/* Determine actual name length (without padding, but with null terminator) */
- actual_name_length = HDstrlen((const char *)p) + 1;
+ actual_name_length = HDstrnlen((const char *)p, len);
+ if (actual_name_length == len)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "filter name not null terminated")
+ actual_name_length += 1; /* include \0 byte */
HDassert(actual_name_length <= name_length);
/* Allocate space for the filter name, or use the internal buffer */
diff --git a/src/H5private.h b/src/H5private.h
index bc00f120d2..3285c36441 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -1485,6 +1485,9 @@ H5_DLL H5_ATTR_CONST int Nflock(int fd, int operation);
#ifndef HDstrlen
#define HDstrlen(S) strlen(S)
#endif
+#ifndef HDstrnlen
+#define HDstrnlen(S,L) strnlen(S,L)
+#endif
#ifndef HDstrncat
#define HDstrncat(X, Y, Z) strncat(X, Y, Z)
#endif

View File

@ -1,35 +0,0 @@
From: Egbert Eich <eich@suse.com>
Date: Wed Sep 28 14:54:58 2022 +0200
Subject: H5O_dtype_decode_helper: Parent of enum needs to have same size as enum itself
Patch-mainline: Not yet
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
Git-commit: d39a27113ef75058f236b0606a74b4af5767c4e7
References:
The size of the enumeration values is determined by the size of the parent.
Functions accessing the enumeration values use the size of the enumartion
to determine the size of each element and how much data to copy. Thus the
size of the enumeration and its parent need to match.
Check here to avoid unpleasant surprises later.
This fixes CVE-2018-14031.
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Odtype.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/src/H5Odtype.c b/src/H5Odtype.c
index 9af79f4e9a..dc2b904362 100644
--- a/src/H5Odtype.c
+++ b/src/H5Odtype.c
@@ -472,6 +472,9 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t
if (H5O__dtype_decode_helper(ioflags, pp, dt->shared->parent) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "unable to decode parent datatype")
+ if (dt->shared->parent->shared->size != dt->shared->size)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "ENUM size does not match parent")
+
/* Check if the parent of this enum has a version greater than the
* enum itself. */
H5O_DTYPE_CHECK_VERSION(dt, version, dt->shared->parent->shared->version, ioflags, "enum", FAIL)

View File

@ -19,11 +19,11 @@ Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Ofsinfo.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/src/H5Ofsinfo.c b/src/H5Ofsinfo.c
index 9f6514a291..15cbb5ae7b 100644
--- a/src/H5Ofsinfo.c
+++ b/src/H5Ofsinfo.c
@@ -88,6 +88,13 @@ H5FL_DEFINE_STATIC(H5O_fsinfo_t);
Index: hdf5-1.12.3/src/H5Ofsinfo.c
===================================================================
--- hdf5-1.12.3.orig/src/H5Ofsinfo.c
+++ hdf5-1.12.3/src/H5Ofsinfo.c
@@ -87,6 +87,13 @@ H5FL_DEFINE_STATIC(H5O_fsinfo_t);
*
*-------------------------------------------------------------------------
*/
@ -37,7 +37,7 @@ index 9f6514a291..15cbb5ae7b 100644
static void *
H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags,
unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p)
@@ -112,6 +119,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
@@ -111,6 +118,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_AT
fsinfo->fs_addr[ptype - 1] = HADDR_UNDEF;
/* Version of message */
@ -45,7 +45,7 @@ index 9f6514a291..15cbb5ae7b 100644
vers = *p++;
if (vers == H5O_FSINFO_VERSION_0) {
@@ -125,6 +133,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
@@ -124,6 +132,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_AT
fsinfo->pgend_meta_thres = H5F_FILE_SPACE_PGEND_META_THRES;
fsinfo->eoa_pre_fsm_fsalloc = HADDR_UNDEF;
@ -53,7 +53,7 @@ index 9f6514a291..15cbb5ae7b 100644
strategy = (H5F_file_space_type_t)*p++; /* File space strategy */
H5F_DECODE_LENGTH(f, p, threshold); /* Free-space section threshold */
@@ -170,6 +179,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
@@ -169,6 +178,7 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_AT
HDassert(vers >= H5O_FSINFO_VERSION_1);
fsinfo->version = vers;
@ -61,7 +61,7 @@ index 9f6514a291..15cbb5ae7b 100644
fsinfo->strategy = (H5F_fspace_strategy_t)*p++; /* File space strategy */
fsinfo->persist = *p++; /* Free-space persist or not */
H5F_DECODE_LENGTH(f, p, fsinfo->threshold); /* Free-space section threshold */
@@ -181,9 +191,11 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
@@ -180,9 +190,11 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_AT
/* Decode addresses of free space managers, if persisting */
if (fsinfo->persist)

View File

@ -28,11 +28,11 @@ Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Fint.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/H5Fint.c b/src/H5Fint.c
index 9b5613972f..01faf33495 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -1413,7 +1413,7 @@ H5F__dest(H5F_t *f, hbool_t flush)
Index: hdf5-1.12.3/src/H5Fint.c
===================================================================
--- hdf5-1.12.3.orig/src/H5Fint.c
+++ hdf5-1.12.3/src/H5Fint.c
@@ -1412,7 +1412,7 @@ H5F__dest(H5F_t *f, hbool_t flush, hbool
*/
if (H5AC_prep_for_file_close(f) < 0)
/* Push error, but keep going */

View File

@ -1,34 +0,0 @@
From: Egbert Eich <eich@suse.com>
Date: Sun Oct 9 08:07:23 2022 +0200
Subject: Make sure info block for external links has at least 3 bytes
Patch-mainline: Not yet
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
Git-commit: 082bfe392b04b1137da9eabd1ecac76c212ab385
References:
According to the specification, the information block for external links
contains 1 byte of version/flag information and two 0 terminated strings
for the object linked to and the full path.
Although not very useful, the minimum string length for each would be one
byte.
This fixes CVE-2018-16438.
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Olink.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/H5Olink.c b/src/H5Olink.c
index 51c44a36b0..074744b022 100644
--- a/src/H5Olink.c
+++ b/src/H5Olink.c
@@ -241,6 +241,8 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE
/* A UD link. Get the user-supplied data */
UINT16DECODE(p, len)
lnk->u.ud.size = len;
+ if (lnk->type == H5L_TYPE_EXTERNAL && len < 3)
+ HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "external link information lenght < 3")
if (len > 0) {
/* Make sure that length doesn't exceed buffer size, which could
occur when the file is corrupted */

View File

@ -7,49 +7,15 @@ Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.com>
---
hdf5-1.10.1/config/cmake/ConversionTests.c | 16 ++++++++++++++++
hdf5-1.10.1/test/dt_arith.c | 26 ++++++++++++++++++++++++--
2 files changed, 40 insertions(+), 2 deletions(-)
config/cmake/ConversionTests.c | 16 ++++++++++++++++
test/dt_arith.c | 27 +++++++++++++++++++++++++--
2 files changed, 41 insertions(+), 2 deletions(-)
Index: hdf5-1.12.2/config/cmake/ConversionTests.c
Index: hdf5-1.12.3/test/dt_arith.c
===================================================================
--- hdf5-1.12.2.orig/config/cmake/ConversionTests.c
+++ hdf5-1.12.2/config/cmake/ConversionTests.c
@@ -34,6 +34,14 @@ int HDF_NO_UBSAN main(void)
unsigned char s2[8];
int ret = 1;
+#if defined __powerpc64__ && defined _LITTLE_ENDIAN
+ /* Don't bother checking on ppc64le, we know it'll work, and
+ that what hdf5 calls 'special algorithm' simply is
+ IBM ldouble 128 (i.e. two seperately scaled doubles).
+ The check below assumes big endian. */
+ ret = 0;
+#endif
+
if(sizeof(long double) == 16 && sizeof(long) == 8) {
/*make sure the long double type has 16 bytes in size and
* 11 bits of exponent. If it is,
@@ -157,6 +165,14 @@ int HDF_NO_UBSAN main(void)
unsigned char s[16];
int ret = 0;
+#if defined __powerpc64__ && defined _LITTLE_ENDIAN
+ /* Don't bother checking on ppc64le, we know it'll work, and
+ that what hdf5 calls 'special algorithm' simply is
+ IBM ldouble 128 (i.e. two seperately scaled doubles).
+ The check below assumes big endian. */
+ ret = 0;
+#endif
+
if(sizeof(long double) == 16) {
/*make sure the long double type is the same as the failing type
*which has 16 bytes in size and 11 bits of exponent. If it is,
Index: hdf5-1.12.2/test/dt_arith.c
===================================================================
--- hdf5-1.12.2.orig/test/dt_arith.c
+++ hdf5-1.12.2/test/dt_arith.c
@@ -3036,7 +3036,19 @@ test_conv_flt_1(const char *name, int ru
--- hdf5-1.12.3.orig/test/dt_arith.c
+++ hdf5-1.12.3/test/dt_arith.c
@@ -3035,7 +3035,19 @@ test_conv_flt_1(const char *name, int ru
#if H5_SIZEOF_LONG_DOUBLE != H5_SIZEOF_DOUBLE
}
else if (src_type == FLT_LDOUBLE) {
@ -70,7 +36,7 @@ Index: hdf5-1.12.2/test/dt_arith.c
#endif
}
else
@@ -3736,7 +3748,18 @@ test_conv_int_fp(const char *name, int r
@@ -3735,7 +3747,18 @@ test_conv_int_fp(const char *name, int r
nelmts);
}
else {
@ -90,3 +56,37 @@ Index: hdf5-1.12.2/test/dt_arith.c
}
#endif
}
Index: hdf5-1.12.3/config/cmake/ConversionTests.c
===================================================================
--- hdf5-1.12.3.orig/config/cmake/ConversionTests.c
+++ hdf5-1.12.3/config/cmake/ConversionTests.c
@@ -34,6 +34,14 @@ int HDF_NO_UBSAN main(void)
unsigned char s2[8];
int ret = 1;
+#if defined __powerpc64__ && defined _LITTLE_ENDIAN
+ /* Don't bother checking on ppc64le, we know it'll work, and
+ that what hdf5 calls 'special algorithm' simply is
+ IBM ldouble 128 (i.e. two seperately scaled doubles).
+ The check below assumes big endian. */
+ ret = 0;
+#endif
+
if (sizeof(long double) == 16 && sizeof(long) == 8) {
/* Make sure the long double type has 16 bytes in size and
* 11 bits of exponent. If it is, the bit sequence should be
@@ -174,6 +182,14 @@ int HDF_NO_UBSAN main(void)
unsigned char s[16];
int ret = 0;
+#if defined __powerpc64__ && defined _LITTLE_ENDIAN
+ /* Don't bother checking on ppc64le, we know it'll work, and
+ that what hdf5 calls 'special algorithm' simply is
+ IBM ldouble 128 (i.e. two seperately scaled doubles).
+ The check below assumes big endian. */
+ ret = 0;
+#endif
+
if (sizeof(long double) == 16) {
/* Make sure the long double type is the same as the failing type
* which has 16 bytes in size and 11 bits of exponent. If it is,

View File

@ -1,258 +0,0 @@
From: Egbert Eich <eich@suse.com>
Date: Sat Oct 1 15:13:52 2022 +0200
Subject: Pass compact chunk size info to ensure requested elements are within bounds
Patch-mainline: Not yet
Git-repo: ssh://eich@192.168.122.1:/home/eich/sources/HPC/hdf5
Git-commit: 18300944261a9fa8f0087f99d9176f3757b1ec38
References:
To avoid reading/writing elements out of bounds of a compact chunk, pass
size info and check whether all elements are within the size before attempting
to read/write these elements. Such accesses can occur when accessing malformed
hdf5 files.
This fixes CVE-2018-11205
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Dchunk.c | 34 +++++++++++++++++++++++++++-------
src/H5Dcompact.c | 5 +++++
src/H5Dpkg.h | 1 +
3 files changed, 33 insertions(+), 7 deletions(-)
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index e6bf26ce89..94ad392cb7 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -128,6 +128,7 @@ typedef struct H5D_rdcc_ent_t {
H5F_block_t chunk_block; /*offset/length of chunk in file */
hsize_t chunk_idx; /*index of chunk in dataset */
uint8_t * chunk; /*the unfiltered chunk data */
+ size_t size; /*size of chunk */
unsigned idx; /*index in hash table */
struct H5D_rdcc_ent_t *next; /*next item in doubly-linked list */
struct H5D_rdcc_ent_t *prev; /*previous item in doubly-linked list */
@@ -303,7 +304,7 @@ static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *s
static herr_t H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset);
static herr_t H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t flush);
static void * H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t relax,
- hbool_t prev_unfilt_chunk);
+ hbool_t prev_unfilt_chunk, size_t *ret_size);
static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, hbool_t dirty,
void *chunk, uint32_t naccessed);
static herr_t H5D__chunk_cache_prune(const H5D_t *dset, size_t size);
@@ -2480,6 +2481,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */
herr_t ret_value = SUCCEED; /*return value */
+ size_t chunk_size = 0;
FUNC_ENTER_STATIC
@@ -2565,11 +2567,12 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
/* Lock the chunk into the cache */
- if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE)))
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE, &chunk_size)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
cpt_store.compact.buf = chunk;
+ cpt_store.compact.size = chunk_size;
/* Point I/O info at contiguous I/O info for this chunk */
chk_io_info = &cpt_io_info;
@@ -2629,6 +2632,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize
hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
herr_t ret_value = SUCCEED; /* Return value */
+ size_t chunk_size;
FUNC_ENTER_STATIC
@@ -2699,11 +2703,12 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE, &chunk_size)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
cpt_store.compact.buf = chunk;
+ cpt_store.compact.size = chunk_size;
/* Point I/O info at main I/O info for this chunk */
chk_io_info = &cpt_io_info;
@@ -3714,7 +3719,7 @@ done:
*-------------------------------------------------------------------------
*/
static void *
-H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk)
+H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk, size_t *ret_size)
{
const H5D_t * dset = io_info->dset; /* Local pointer to the dataset info */
const H5O_pline_t *pline =
@@ -3731,6 +3736,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
hbool_t disable_filters = FALSE; /* Whether to disable filters (when adding to cache) */
void * chunk = NULL; /*the file chunk */
void * ret_value = NULL; /* Return value */
+ size_t chunk_size_ret = 0;
FUNC_ENTER_STATIC
@@ -3796,6 +3802,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline);
ent->chunk = (uint8_t *)chunk;
chunk = NULL;
+ ent->size = chunk_size;
/* Mark the chunk as having filters disabled as well as "newly
* disabled" so it is inserted on flush */
@@ -3823,6 +3830,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline);
ent->chunk = (uint8_t *)chunk;
chunk = NULL;
+ ent->size = chunk_size;
/* Mark the chunk as having filters enabled */
ent->edge_chunk_state &= ~(H5D_RDCC_DISABLE_FILTERS | H5D_RDCC_NEWLY_DISABLED_FILTERS);
@@ -3902,6 +3910,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
/* In the case that some dataset functions look through this data,
* clear it to all 0s. */
HDmemset(chunk, 0, chunk_size);
+ chunk_size_ret = chunk_size;
} /* end if */
else {
/*
@@ -3924,6 +3933,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
my_chunk_alloc, chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
+ chunk_size_ret = my_chunk_alloc;
if (old_pline && old_pline->nused) {
H5Z_EDC_t err_detect; /* Error detection info */
H5Z_cb_t filter_cb; /* I/O filter callback function */
@@ -3937,6 +3947,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
if (H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), err_detect,
filter_cb, &my_chunk_alloc, &buf_alloc, &chunk) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, NULL, "data pipeline read failed")
+ chunk_size_ret = buf_alloc;
/* Reallocate chunk if necessary */
if (udata->new_unfilt_chunk) {
@@ -3947,6 +3958,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for raw data chunk")
} /* end if */
+ chunk_size_ret = my_chunk_alloc;
H5MM_memcpy(chunk, tmp_chunk, chunk_size);
(void)H5D__chunk_mem_xfree(tmp_chunk, old_pline);
} /* end if */
@@ -3967,6 +3979,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for raw data chunk")
+ chunk_size_ret = chunk_size;
if (H5P_is_fill_value_defined(fill, &fill_status) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined")
@@ -4032,6 +4045,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t);
H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t);
ent->chunk = (uint8_t *)chunk;
+ ent->size = chunk_size_ret;
/* Add it to the cache */
HDassert(NULL == rdcc->slot[udata->idx_hint]);
@@ -4065,6 +4079,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
HDassert(!ent->locked);
ent->locked = TRUE;
chunk = ent->chunk;
+ chunk_size_ret = ent->size;
} /* end if */
else
/*
@@ -4076,6 +4091,8 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t rel
/* Set return value */
ret_value = chunk;
+ if (ret_size != NULL)
+ *ret_size = chunk_size_ret;
done:
/* Release the fill buffer info, if it's been initialized */
@@ -4084,8 +4101,11 @@ done:
/* Release the chunk allocated, on error */
if (!ret_value)
- if (chunk)
+ if (chunk) {
chunk = H5D__chunk_mem_xfree(chunk, pline);
+ if (ret_size != NULL)
+ *ret_size = 0;
+ }
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_lock() */
@@ -4884,7 +4904,7 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[])
if (H5F_addr_defined(chk_udata.chunk_block.offset) || (UINT_MAX != chk_udata.idx_hint)) {
/* Lock the chunk into cache. H5D__chunk_lock will take care of
* updating the chunk to no longer be an edge chunk. */
- if (NULL == (chunk = (void *)H5D__chunk_lock(&chk_io_info, &chk_udata, FALSE, TRUE)))
+ if (NULL == (chunk = (void *)H5D__chunk_lock(&chk_io_info, &chk_udata, FALSE, TRUE, NULL)))
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
/* Unlock the chunk */
@@ -5274,7 +5294,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to select hyperslab")
/* Lock the chunk into the cache, to get a pointer to the chunk buffer */
- if (NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE, FALSE)))
+ if (NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE, FALSE, NULL)))
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
/* Fill the selection in the memory buffer */
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index b78693660d..21c37e8a08 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -245,6 +245,7 @@ H5D__compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR
FUNC_ENTER_STATIC_NOERR
io_info->store->compact.buf = io_info->dset->shared->layout.storage.u.compact.buf;
+ io_info->store->compact.size = io_info->dset->shared->layout.storage.u.compact.size;
io_info->store->compact.dirty = &io_info->dset->shared->layout.storage.u.compact.dirty;
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -278,6 +279,8 @@ H5D__compact_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *
FUNC_ENTER_STATIC
HDassert(io_info);
+ if (io_info->store->compact.size < *(dset_offset_arr + dset_max_nseq - 1) + *(dset_size_arr + dset_max_nseq - 1))
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "source size less than requested data")
/* Use the vectorized memory copy routine to do actual work */
if ((ret_value = H5VM_memcpyvv(io_info->u.rbuf, mem_max_nseq, mem_curr_seq, mem_size_arr, mem_offset_arr,
@@ -320,6 +323,8 @@ H5D__compact_writevv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t
FUNC_ENTER_STATIC
HDassert(io_info);
+ if (io_info->store->compact.size < *(dset_offset_arr + dset_max_nseq - 1) + *(dset_size_arr + dset_max_nseq - 1))
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "source size less than requested data")
/* Use the vectorized memory copy routine to do actual work */
if ((ret_value = H5VM_memcpyvv(io_info->store->compact.buf, dset_max_nseq, dset_curr_seq, dset_size_arr,
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 64692c5d1d..8a4acd62e3 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -196,6 +196,7 @@ typedef struct {
typedef struct {
void * buf; /* Buffer for compact dataset */
hbool_t *dirty; /* Pointer to dirty flag to mark */
+ size_t size; /* Buffer size for compact dataset */
} H5D_compact_storage_t;
typedef union H5D_storage_t {

View File

@ -1,28 +0,0 @@
From: Egbert Eich <eich@suse.com>
Date: Sat Feb 11 18:08:15 2023 +0100
Subject: Remove duplicate code
Patch-mainline: Not yet
Git-repo: https://github.com/HDFGroup/hdf5
Git-commit: 539bca81e2b5713b1c6c5723d742377fb92c1ac1
References:
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Oattr.c | 4 ----
1 file changed, 4 deletions(-)
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
index 3ef0b99aa4..19d3abfb4c 100644
--- a/src/H5Oattr.c
+++ b/src/H5Oattr.c
@@ -222,10 +222,6 @@ H5O_attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, un
else
p += attr->shared->ds_size;
- /* Get the datatype's size */
- if (0 == (dt_size = H5T_get_size(attr->shared->dt)))
- HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, NULL, "unable to get datatype size")
-
/* Get the datatype & dataspace sizes */
if (0 == (dt_size = H5T_get_size(attr->shared->dt)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, NULL, "unable to get datatype size")

View File

@ -19,30 +19,20 @@ This fixes CVE-2021-45833.
Signed-off-by: Egbert Eich <eich@suse.com>
Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Olayout.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index c939e72744..9fa9e36e8c 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -168,6 +168,10 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
p += ndims * 4; /* Skip over dimension sizes (32-bit quantities) */
} /* end if */
else {
+ if (ndims < 2)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL,
+ "bad dimensions for chunked storage")
+
mesg->u.chunk.ndims = ndims;
for (u = 0; u < ndims; u++)
UINT32DECODE(p, mesg->u.chunk.dim[u]);
@@ -241,6 +245,9 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU
mesg->u.chunk.ndims = *p++;
if (mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large")
src/H5Olayout.c | 4 ++++
1 file changed, 4 insertions(+)
Index: hdf5-1.12.3/src/H5Olayout.c
===================================================================
--- hdf5-1.12.3.orig/src/H5Olayout.c
+++ hdf5-1.12.3/src/H5Olayout.c
@@ -291,6 +291,10 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_AT
if (mesg->u.chunk.ndims < 2)
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad dimensions for chunked storage")
+ if (mesg->u.chunk.ndims < 2)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL,
+ "bad dimensions for chunked storage")
+
/* B-tree address */
H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr));
if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end))
HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL,

View File

@ -22,11 +22,11 @@ Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Faccum.c | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/src/H5Faccum.c b/src/H5Faccum.c
index aed5812e63..73bd4b811e 100644
--- a/src/H5Faccum.c
+++ b/src/H5Faccum.c
@@ -48,6 +48,7 @@
Index: hdf5-1.12.3/src/H5Faccum.c
===================================================================
--- hdf5-1.12.3.orig/src/H5Faccum.c
+++ hdf5-1.12.3/src/H5Faccum.c
@@ -47,6 +47,7 @@
#define H5F_ACCUM_THROTTLE 8
#define H5F_ACCUM_THRESHOLD 2048
#define H5F_ACCUM_MAX_SIZE (1024 * 1024) /* Max. accum. buf size (max. I/Os will be 1/2 this size) */
@ -34,7 +34,7 @@ index aed5812e63..73bd4b811e 100644
/******************/
/* Local Typedefs */
@@ -126,8 +127,9 @@ H5F__accum_read(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t si
@@ -125,8 +126,9 @@ H5F__accum_read(H5F_shared_t *f_sh, H5FD
HDassert(!accum->buf || (accum->alloc_size >= accum->size));
/* Current read adjoins or overlaps with metadata accumulator */
@ -46,7 +46,7 @@ index aed5812e63..73bd4b811e 100644
size_t amount_before; /* Amount to read before current accumulator */
haddr_t new_addr; /* New address of the accumulator buffer */
size_t new_size; /* New size of the accumulator buffer */
@@ -439,7 +441,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s
@@ -438,7 +440,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5F
/* Check if there is already metadata in the accumulator */
if (accum->size > 0) {
/* Check if the new metadata adjoins the beginning of the current accumulator */
@ -56,7 +56,7 @@ index aed5812e63..73bd4b811e 100644
/* Check if we need to adjust accumulator size */
if (H5F__accum_adjust(accum, file, H5F_ACCUM_PREPEND, size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
@@ -464,7 +467,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s
@@ -463,7 +466,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5F
accum->dirty_off = 0;
} /* end if */
/* Check if the new metadata adjoins the end of the current accumulator */
@ -66,7 +66,7 @@ index aed5812e63..73bd4b811e 100644
/* Check if we need to adjust accumulator size */
if (H5F__accum_adjust(accum, file, H5F_ACCUM_APPEND, size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
@@ -485,7 +489,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s
@@ -484,7 +488,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5F
accum->size += size;
} /* end if */
/* Check if the piece of metadata being written overlaps the metadata accumulator */
@ -76,7 +76,7 @@ index aed5812e63..73bd4b811e 100644
size_t add_size; /* New size of the accumulator buffer */
/* Check if the new metadata is entirely within the current accumulator */
@@ -745,7 +750,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s
@@ -744,7 +749,8 @@ H5F__accum_write(H5F_shared_t *f_sh, H5F
/* (Note that this could be improved by updating the accumulator
* with [some of] the information just read in. -QAK)
*/
@ -86,7 +86,7 @@ index aed5812e63..73bd4b811e 100644
/* Check for write starting before beginning of accumulator */
if (H5F_addr_le(addr, accum->loc)) {
/* Check for write ending within accumulator */
@@ -868,6 +874,7 @@ H5F__accum_free(H5F_shared_t *f_sh, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr
@@ -867,6 +873,7 @@ H5F__accum_free(H5F_shared_t *f_sh, H5FD
/* Adjust the metadata accumulator to remove the freed block, if it overlaps */
if ((f_sh->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) &&

View File

@ -16,11 +16,11 @@ Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5Fsuper.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index 60b045ae29..1283790c57 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -1044,6 +1044,8 @@ done:
Index: hdf5-1.12.3/src/H5Fsuper.c
===================================================================
--- hdf5-1.12.3.orig/src/H5Fsuper.c
+++ hdf5-1.12.3/src/H5Fsuper.c
@@ -1045,6 +1045,8 @@ done:
/* Evict the driver info block from the cache */
if (sblock && H5AC_expunge_entry(f, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block")

BIN
hdf5-1.12.2.tar.bz2 (Stored with Git LFS)

Binary file not shown.

3
hdf5-1.12.3.tar.bz2 Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:31cdc13b1097b34f8d5bee67a8ea2ab082b7c128d817e69e04d8f59ae8aea295
size 15433382

View File

@ -2,7 +2,7 @@ Index: configure
===================================================================
--- configure.orig
+++ configure
@@ -4343,6 +4343,8 @@ done
@@ -4962,6 +4962,8 @@ done
if test "X$host_config" != "Xnone"; then
CC_BASENAME="`echo $CC | cut -f1 -d' ' | xargs basename 2>/dev/null`"
. $host_config
@ -15,7 +15,7 @@ Index: configure.ac
===================================================================
--- configure.ac.orig
+++ configure.ac
@@ -330,6 +330,8 @@ done
@@ -346,6 +346,8 @@ done
if test "X$host_config" != "Xnone"; then
CC_BASENAME="`echo $CC | cut -f1 -d' ' | xargs basename 2>/dev/null`"
. $host_config

View File

@ -1,8 +1,8 @@
Index: hdf5-1.12.2/src/Makefile.in
Index: hdf5-1.12.3/src/Makefile.in
===================================================================
--- hdf5-1.12.2.orig/src/Makefile.in
+++ hdf5-1.12.2/src/Makefile.in
@@ -1998,8 +1998,6 @@ help:
--- hdf5-1.12.3.orig/src/Makefile.in
+++ hdf5-1.12.3/src/Makefile.in
@@ -2831,8 +2831,6 @@ help:
# Remove the generated .c file if errors occur unless HDF5_Make_Ignore
# is set to ignore the error.
H5Tinit.c: H5detect$(EXEEXT)
@ -11,7 +11,7 @@ Index: hdf5-1.12.2/src/Makefile.in
$(RUNSERIAL) ./H5detect$(EXEEXT) $@ || \
(test $$HDF5_Make_Ignore && echo "*** Error ignored") || \
($(RM) $@ ; exit 1)
@@ -2010,8 +2008,6 @@ H5Tinit.c: H5detect$(EXEEXT)
@@ -2843,8 +2841,6 @@ H5Tinit.c: H5detect$(EXEEXT)
# Remove the generated .c file if errors occur unless HDF5_Make_Ignore
# is set to ignore the error.
H5lib_settings.c: H5make_libsettings$(EXEEXT) libhdf5.settings

View File

@ -10,11 +10,11 @@ Signed-off-by: Egbert Eich <eich@suse.de>
---
src/H5detect.c | 15 ---------------
1 file changed, 15 deletions(-)
Index: hdf5-1.12.2/src/H5detect.c
Index: hdf5-1.12.3/src/H5detect.c
===================================================================
--- hdf5-1.12.2.orig/src/H5detect.c
+++ hdf5-1.12.2/src/H5detect.c
@@ -1224,22 +1224,6 @@ bit.\n";
--- hdf5-1.12.3.orig/src/H5detect.c
+++ hdf5-1.12.3/src/H5detect.c
@@ -1219,22 +1219,6 @@ bit.\n";
fprintf(rawoutstream, "/* Generated automatically by H5detect -- do not edit */\n\n\n");
HDfputs(FileHeader, rawoutstream); /*the copyright notice--see top of this file */

View File

@ -1,7 +1,8 @@
diff -Naur hdf5-1.10.8.orig/bin/h5cc.in hdf5-1.10.8/bin/h5cc.in
--- hdf5-1.10.8.orig/bin/h5cc.in 2022-04-07 18:23:46.000000000 -0600
+++ hdf5-1.10.8/bin/h5cc.in 2022-04-07 18:24:04.000000000 -0600
@@ -89,10 +89,10 @@
Index: hdf5-1.12.3/bin/h5cc.in
===================================================================
--- hdf5-1.12.3.orig/bin/h5cc.in
+++ hdf5-1.12.3/bin/h5cc.in
@@ -88,10 +88,10 @@ CLINKERBASE="@CC@"
# paths and libraries from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in
# from the hdf5 build. The order of the flags is intended to give precedence
# to the user's flags.
@ -15,7 +16,7 @@ diff -Naur hdf5-1.10.8.orig/bin/h5cc.in hdf5-1.10.8/bin/h5cc.in
CC="${HDF5_CC:-$CCBASE}"
CLINKER="${HDF5_CLINKER:-$CLINKERBASE}"
@@ -105,7 +105,8 @@
@@ -104,7 +104,8 @@ LIBS="${HDF5_LIBS:-$LIBSBASE}"
# available library is shared, it will be used by default. The user can
# override either default, although choosing an unavailable library will result
# in link errors.
@ -25,7 +26,7 @@ diff -Naur hdf5-1.10.8.orig/bin/h5cc.in hdf5-1.10.8/bin/h5cc.in
if test "${STATIC_AVAILABLE}" = "yes"; then
USE_SHARED_LIB="${HDF5_USE_SHLIB:-no}"
else
@@ -385,7 +386,7 @@
@@ -384,7 +385,7 @@ if test "x$do_link" = "xyes"; then
# paths and libraries from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in
# from the hdf5 build. The order of the flags is intended to give precedence
# to the user's flags.
@ -34,10 +35,11 @@ diff -Naur hdf5-1.10.8.orig/bin/h5cc.in hdf5-1.10.8/bin/h5cc.in
status=$?
fi
diff -Naur hdf5-1.10.8.orig/c++/src/h5c++.in hdf5-1.10.8/c++/src/h5c++.in
--- hdf5-1.10.8.orig/c++/src/h5c++.in 2022-04-07 18:23:45.000000000 -0600
+++ hdf5-1.10.8/c++/src/h5c++.in 2022-04-07 18:24:04.000000000 -0600
@@ -87,10 +87,10 @@
Index: hdf5-1.12.3/c++/src/h5c++.in
===================================================================
--- hdf5-1.12.3.orig/c++/src/h5c++.in
+++ hdf5-1.12.3/c++/src/h5c++.in
@@ -86,10 +86,10 @@ CXXLINKERBASE="@CXX@"
# paths and libraries from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in
# from the hdf5 build. The order of the flags is intended to give precedence
# to the user's flags.
@ -51,7 +53,7 @@ diff -Naur hdf5-1.10.8.orig/c++/src/h5c++.in hdf5-1.10.8/c++/src/h5c++.in
CXX="${HDF5_CXX:-$CXXBASE}"
CXXLINKER="${HDF5_CXXLINKER:-$CXXLINKERBASE}"
@@ -103,7 +103,8 @@
@@ -102,7 +102,8 @@ LIBS="${HDF5_LIBS:-$LIBSBASE}"
# available library is shared, it will be used by default. The user can
# override either default, although choosing an unavailable library will result
# in link errors.
@ -61,7 +63,7 @@ diff -Naur hdf5-1.10.8.orig/c++/src/h5c++.in hdf5-1.10.8/c++/src/h5c++.in
if test "${STATIC_AVAILABLE}" = "yes"; then
USE_SHARED_LIB="${HDF5_USE_SHLIB:-no}"
else
@@ -385,7 +386,7 @@
@@ -384,7 +385,7 @@ if test "x$do_link" = "xyes"; then
# from the hdf5 build. The order of the flags is intended to give precedence
# to the user's flags.
@ -70,10 +72,11 @@ diff -Naur hdf5-1.10.8.orig/c++/src/h5c++.in hdf5-1.10.8/c++/src/h5c++.in
status=$?
fi
diff -Naur hdf5-1.10.8.orig/fortran/src/h5fc.in hdf5-1.10.8/fortran/src/h5fc.in
--- hdf5-1.10.8.orig/fortran/src/h5fc.in 2022-04-07 18:23:46.000000000 -0600
+++ hdf5-1.10.8/fortran/src/h5fc.in 2022-04-07 18:24:04.000000000 -0600
@@ -83,11 +83,11 @@
Index: hdf5-1.12.3/fortran/src/h5fc.in
===================================================================
--- hdf5-1.12.3.orig/fortran/src/h5fc.in
+++ hdf5-1.12.3/fortran/src/h5fc.in
@@ -83,11 +83,11 @@ FLINKERBASE="@FC@"
# libraries in $link_args, followed by any external library paths and libraries
# from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in from the hdf5 build.
# The order of the flags is intended to give precedence to the user's flags.
@ -88,7 +91,7 @@ diff -Naur hdf5-1.10.8.orig/fortran/src/h5fc.in hdf5-1.10.8/fortran/src/h5fc.in
FC="${HDF5_FC:-$FCBASE}"
FLINKER="${HDF5_FLINKER:-$FLINKERBASE}"
@@ -99,7 +99,8 @@
@@ -99,7 +99,8 @@ LIBS="${HDF5_LIBS:-$LIBSBASE}"
# available library is shared, it will be used by default. The user can
# override either default, although choosing an unavailable library will result
# in link errors.
@ -98,7 +101,7 @@ diff -Naur hdf5-1.10.8.orig/fortran/src/h5fc.in hdf5-1.10.8/fortran/src/h5fc.in
if test "${STATIC_AVAILABLE}" = "yes"; then
USE_SHARED_LIB="${HDF5_USE_SHLIB:-no}"
else
@@ -363,7 +364,7 @@
@@ -363,7 +364,7 @@ if test "x$do_link" = "xyes"; then
# libraries in $link_args, followed by any external library paths and libraries
# from AM_LDFLAGS, LDFLAGS, AM_LIBS or LIBS carried in from the hdf5 build.
# The order of the flags is intended to give precedence to the user's flags.

View File

@ -1,3 +1,41 @@
-------------------------------------------------------------------
Mon May 13 11:41:05 UTC 2024 - Atri Bhattacharya <badshah400@gmail.com>
- Update to version 1.12.3:
* Bumped the minimum required version of Autoconf to 2.71.
* Added new option to build libaec and zlib inline with CMake.
* Changed the error handling for a not found path in the find
plugin process.
* Removed "-commons" linking option to fix a compile error on
MacOS Sonoma that resulted in build failure.
* Doxygen User Guide documentation can be configured and
generated.
- Minor patch rebase for updated version:
* Disable-phdf5-tests.patch
* Fix-error-message-not-the-name-but-the-link-information-is-parsed.patch
* H5O_fsinfo_decode-Make-more-resilient-to-out-of-bounds-read.patch
* Hot-fix-for-CVE-2020-10812.patch
* PPC64LE-Fix-long-double-handling.patch
* Report-error-if-dimensions-of-chunked-storage-in-data-layout-2.patch
* Validate-location-offset-of-the-accumulated-metadata-when-comparing.patch
* When-evicting-driver-info-block-NULL-the-corresponding-entry.patch
* hdf5-1.8.11-abort_unknown_host_config.patch
* hdf5-LD_LIBRARY_PATH.patch
* hdf5-Remove-timestamps-from-binaries.patch
* hdf5-wrappers.patch
- Drop upstreamed patches:
* Remove-duplicate-code.patch
* H5O__pline_decode-Make-more-resilient-to-out-of-bounds-read.patch
* H5O_dtype_decode_helper-Parent-of-enum-needs-to-have-same-size-as-enum-itself.patch
* Pass-compact-chunk-size-info-to-ensure-requested-elements-are-within-bounds.patch
* Make-sure-info-block-for-external-links-has-at-least-3-bytes.patch
* Compound-datatypes-may-not-have-members-of-size-0.patch
* H5IMget_image_info-H5Sget_simple_extent_dims-does-not-exceed-array-size.patch
* Check-for-overflow-when-calculating-on-disk-attribute-data-size-2459.patch
- New BuildRequires: hostname.
- Work around an sed hack in upstream configure file by dropping
"-Werror=return-type" from RPM %optflags.
-------------------------------------------------------------------
Wed Mar 20 03:09:27 UTC 2024 - Atri Bhattacharya <badshah400@gmail.com>

View File

@ -35,7 +35,7 @@
%define use_sz2 0
%define short_ver 1.12
%define vers %{short_ver}.2
%define vers %{short_ver}.3
%define _vers %( echo %{vers} | tr '.' '_' )
%define src_ver %{version}
%define pname hdf5
@ -332,20 +332,13 @@ Patch9: Fix-error-message-not-the-name-but-the-link-information-is-parse
# Imported from Fedora, strip flags from h5cc wrapper
Patch10: hdf5-wrappers.patch
Patch101: H5O_fsinfo_decode-Make-more-resilient-to-out-of-bounds-read.patch
Patch102: H5O__pline_decode-Make-more-resilient-to-out-of-bounds-read.patch
Patch103: H5O_dtype_decode_helper-Parent-of-enum-needs-to-have-same-size-as-enum-itself.patch
Patch104: Report-error-if-dimensions-of-chunked-storage-in-data-layout-2.patch
Patch105: When-evicting-driver-info-block-NULL-the-corresponding-entry.patch
Patch106: Pass-compact-chunk-size-info-to-ensure-requested-elements-are-within-bounds.patch
Patch107: Validate-location-offset-of-the-accumulated-metadata-when-comparing.patch
Patch108: Make-sure-info-block-for-external-links-has-at-least-3-bytes.patch
Patch109: Hot-fix-for-CVE-2020-10812.patch
Patch110: Compound-datatypes-may-not-have-members-of-size-0.patch
Patch111: H5IMget_image_info-H5Sget_simple_extent_dims-does-not-exceed-array-size.patch
Patch112: Check-for-overflow-when-calculating-on-disk-attribute-data-size-2459.patch
Patch113: Remove-duplicate-code.patch
BuildRequires: fdupes
BuildRequires: hostname
%if 0%{?use_sz2}
BuildRequires: libsz2-devel
%endif
@ -583,18 +576,10 @@ library packages.
%patch -P 9 -p1
%patch -P 10 -p1
%patch -P 101 -p1
%patch -P 102 -p1
%patch -P 103 -p1
%patch -P 104 -p1
%patch -P 105 -p1
%patch -P 106 -p1
%patch -P 107 -p1
%patch -P 108 -p1
%patch -P 109 -p1
%patch -P 110 -p1
%patch -P 111 -p1
%patch -P 112 -p1
%patch -P 113 -p1
%if %{without hpc}
# baselibs looks different for different flavors - generate it on the fly
@ -624,7 +609,15 @@ EOF
export CC=gcc
export CXX=g++
export F9X=gfortran
export CFLAGS="%{optflags}"
# Ouch, how ugly! Upstream configure depends on hacking out Werror manually:
# > configure.ac:## Strip out -Werror from CFLAGS since that can cause checks to fail when
# > configure.ac:CFLAGS="`echo $CFLAGS | sed -e 's/-Werror//g'`"
# We need to clear out -Werror=return-type from our optflags otherwise we leave
# a bare '=return-type' hanging in the options passed to GCC by configure
export CFLAGS=`echo "%{optflags}" | sed -e 's/-Werror=return-type //'`
export CXXFLAGS=`echo "%{optflags}" | sed -e 's/-Werror=return-type //'`
export FFLAGS=`echo "%{optflags}" | sed -e 's/-Werror=return-type //'`
export FCFLAGS=`echo "%{optflags}" | sed -e 's/-Werror=return-type //'`
%ifarch %arm
# we want to have useful H5_CFLAGS on arm too
test -e config/linux-gnueabi || cp config/linux-gnu config/linux-gnueabi
@ -863,6 +856,7 @@ export HDF5_Make_Ignore=yes
%{my_bindir}/h5repart
%{my_bindir}/h5stat
%{my_bindir}/h5unjam
%{my_bindir}/h5watch
%files -n %{libname -s %{sonum}}
%doc ACKNOWLEDGMENTS README.md