diff --git a/openssl-3.changes b/openssl-3.changes index 9acbbba..f2bcfd3 100644 --- a/openssl-3.changes +++ b/openssl-3.changes @@ -1,3 +1,63 @@ +------------------------------------------------------------------- +Mon Jul 22 16:42:52 UTC 2024 - Pedro Monreal + +- Build with no-afalgeng [bsc#1226463] + +------------------------------------------------------------------- +Mon Jul 22 08:30:16 UTC 2024 - Pedro Monreal + +- Security fix: [bsc#1227138, CVE-2024-5535] + * SSL_select_next_proto buffer overread + * Add openssl-CVE-2024-5535.patch + +------------------------------------------------------------------- +Wed Jul 17 12:55:39 UTC 2024 - Pedro Monreal + +- Build with enabled sm2 and sm4 support [bsc#1222899] + +------------------------------------------------------------------- +Mon Jul 15 05:52:07 UTC 2024 - Bernhard Wiedemann + +- Add reproducible.patch to fix bsc#1223336 + aes-gcm-avx512.pl: fix non-reproducibility issue + +------------------------------------------------------------------- +Thu Jun 6 15:12:10 UTC 2024 - Peter Simons + +- Apply "openssl-CVE-2024-4741.patch" to fix a use-after-free + security vulnerability. Calling the function SSL_free_buffers() + potentially caused memory to be accessed that was previously + freed in some situations and a malicious attacker could attempt + to engineer a stituation where this occurs to facilitate a + denial-of-service attack. [CVE-2024-4741, bsc#1225551] + +------------------------------------------------------------------- +Wed May 29 13:30:21 UTC 2024 - Martin Wilck + +- Fix HDKF key derivation (bsc#1225291, gh#openssl/openssl#23448, + gh#openssl/openssl#23456) + * Add openssl-Fix-EVP_PKEY_CTX_add1_hkdf_info-behavior.patch + * Add openssl-Handle-empty-param-in-EVP_PKEY_CTX_add1_hkdf_info.patch + +------------------------------------------------------------------- +Tue May 28 14:17:50 UTC 2024 - Giuliano Belinassi + +- Enable livepatching support (bsc#1223428) + +------------------------------------------------------------------- +Mon May 20 12:24:03 UTC 2024 - Otto Hollmann + +- Security fix: [bsc#1224388, CVE-2024-4603] + * Check DSA parameters for excessive sizes before validating + * Add openssl-CVE-2024-4603.patch + +------------------------------------------------------------------- +Mon May 6 12:11:02 UTC 2024 - Otto Hollmann + +- Security fix: [bsc#1222548, CVE-2024-2511] + * Fix unconstrained session cache growth in TLSv1.3 + * Add openssl-CVE-2024-2511.patch + ------------------------------------------------------------------- Fri Feb 23 11:31:44 UTC 2024 - Pedro Monreal diff --git a/openssl-3.spec b/openssl-3.spec index b66f6cc..ce2316c 100644 --- a/openssl-3.spec +++ b/openssl-3.spec @@ -22,6 +22,10 @@ %define man_suffix 3ssl %global sslengcnf %{ssletcdir}/engines%{sover}.d %global sslengdef %{ssletcdir}/engdef%{sover}.d + +# Enable userspace livepatching. +%define livepatchable 1 + Name: openssl-3 # Don't forget to update the version in the "openssl" meta-package! Version: 3.1.4 @@ -83,6 +87,27 @@ Patch25: openssl-CVE-2023-6237.patch Patch26: openssl-3-use-include-directive.patch # PATCH-FIX-UPSTREAM: bsc#1219243 CVE-2024-0727: denial of service via null dereference Patch27: openssl-CVE-2024-0727.patch +# PATCH-FIX-UPSTREAM: bsc#1222548 CVE-2024-2511: Unbounded memory growth with session handling in TLSv1.3 +Patch28: openssl-CVE-2024-2511.patch +# PATCH-FIX-UPSTREAM: bsc#1224388 CVE-2024-4603: excessive time spent checking DSA keys and parameters +Patch29: openssl-CVE-2024-4603.patch +# PATCH-FIX-UPSTREAM: bsc#1225291 NVMe/TCP TLS connection fails due to handshake failure +Patch30: openssl-Fix-EVP_PKEY_CTX_add1_hkdf_info-behavior.patch +Patch31: openssl-Handle-empty-param-in-EVP_PKEY_CTX_add1_hkdf_info.patch +# PATCH-FIX-UPSTREAM bsc#1225551 CVE-2024-4741: use After Free with SSL_free_buffers +Patch32: openssl-CVE-2024-4741.patch +# PATCH-FIX-UPSTREAM: bsc#1223336 aes-gcm-avx512.pl: fix non-reproducibility issue +Patch33: reproducible.patch +# PATCH-FIX-UPSTREAM: bsc#1227138 CVE-2024-5535: SSL_select_next_proto buffer overread +Patch34: openssl-CVE-2024-5535.patch +BuildRequires: pkgconfig +%if 0%{?sle_version} >= 150400 || 0%{?suse_version} >= 1550 +BuildRequires: ulp-macros +%else +# Define ulp-macros macros as empty +%define cflags_livepatching "" +%define pack_ipa_dumps echo "Livepatching is disabled in this build" +%endif BuildRequires: pkgconfig BuildRequires: pkgconfig(zlib) Requires: libopenssl3 = %{version}-%{release} @@ -174,7 +199,8 @@ export MACHINE=armv6l %endif ./Configure \ - no-mdc2 no-ec2m no-sm2 no-sm4 \ + no-mdc2 no-ec2m \ + no-afalgeng \ enable-rfc3779 enable-camellia enable-seed \ %ifarch x86_64 aarch64 ppc64le enable-ec_nistp_64_gcc_128 \ @@ -186,6 +212,7 @@ export MACHINE=armv6l --libdir=%{_lib} \ --openssldir=%{ssletcdir} \ %{optflags} \ + %{cflags_livepatching} \ -Wa,--noexecstack \ -Wl,-z,relro,-z,now \ -fno-common \ @@ -230,7 +257,6 @@ mv providers/fips.so.mac providers/fips.so LD_LIBRARY_PATH="$PWD" make test -j16 # Run the tests also in FIPS mode -OPENSSL_FORCE_FIPS_MODE=1 LD_LIBRARY_PATH="$PWD" make test -j16 || : # OPENSSL_FORCE_FIPS_MODE=1 LD_LIBRARY_PATH="$PWD" make TESTS='-test_evp_fetch_prov -test_tsa' test -j16 || : # Add generation of HMAC checksum of the final stripped library @@ -251,6 +277,7 @@ gcc -o showciphers %{optflags} -I%{buildroot}%{_includedir} %{SOURCE5} -L%{build LD_LIBRARY_PATH=%{buildroot}%{_libdir} ./showciphers %install +%{pack_ipa_dumps} %make_install %{?_smp_mflags} MANSUFFIX=%{man_suffix} rename so.%{sover} so.%{version} %{buildroot}%{_libdir}/*.so.%{sover} diff --git a/openssl-CVE-2024-2511.patch b/openssl-CVE-2024-2511.patch new file mode 100644 index 0000000..0ffdd7f --- /dev/null +++ b/openssl-CVE-2024-2511.patch @@ -0,0 +1,116 @@ +From 7e4d731b1c07201ad9374c1cd9ac5263bdf35bce Mon Sep 17 00:00:00 2001 +From: Matt Caswell +Date: Tue, 5 Mar 2024 15:43:53 +0000 +Subject: [PATCH] Fix unconstrained session cache growth in TLSv1.3 + +In TLSv1.3 we create a new session object for each ticket that we send. +We do this by duplicating the original session. If SSL_OP_NO_TICKET is in +use then the new session will be added to the session cache. However, if +early data is not in use (and therefore anti-replay protection is being +used), then multiple threads could be resuming from the same session +simultaneously. If this happens and a problem occurs on one of the threads, +then the original session object could be marked as not_resumable. When we +duplicate the session object this not_resumable status gets copied into the +new session object. The new session object is then added to the session +cache even though it is not_resumable. + +Subsequently, another bug means that the session_id_length is set to 0 for +sessions that are marked as not_resumable - even though that session is +still in the cache. Once this happens the session can never be removed from +the cache. When that object gets to be the session cache tail object the +cache never shrinks again and grows indefinitely. + +CVE-2024-2511 + +Reviewed-by: Neil Horman +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/24044) +--- + ssl/ssl_lib.c | 5 +++-- + ssl/ssl_sess.c | 28 ++++++++++++++++++++++------ + ssl/statem/statem_srvr.c | 5 ++--- + 3 files changed, 27 insertions(+), 11 deletions(-) + +diff --git a/ssl/ssl_lib.c b/ssl/ssl_lib.c +index b5cc4af2f0302..e747b7f90aa71 100644 +--- a/ssl/ssl_lib.c ++++ b/ssl/ssl_lib.c +@@ -3737,9 +3737,10 @@ void ssl_update_cache(SSL *s, int mode) + + /* + * If the session_id_length is 0, we are not supposed to cache it, and it +- * would be rather hard to do anyway :-) ++ * would be rather hard to do anyway :-). Also if the session has already ++ * been marked as not_resumable we should not cache it for later reuse. + */ +- if (s->session->session_id_length == 0) ++ if (s->session->session_id_length == 0 || s->session->not_resumable) + return; + + /* +diff --git a/ssl/ssl_sess.c b/ssl/ssl_sess.c +index bf84e792251b8..241cf43c46296 100644 +--- a/ssl/ssl_sess.c ++++ b/ssl/ssl_sess.c +@@ -154,16 +154,11 @@ SSL_SESSION *SSL_SESSION_new(void) + return ss; + } + +-SSL_SESSION *SSL_SESSION_dup(const SSL_SESSION *src) +-{ +- return ssl_session_dup(src, 1); +-} +- + /* + * Create a new SSL_SESSION and duplicate the contents of |src| into it. If + * ticket == 0 then no ticket information is duplicated, otherwise it is. + */ +-SSL_SESSION *ssl_session_dup(const SSL_SESSION *src, int ticket) ++static SSL_SESSION *ssl_session_dup_intern(const SSL_SESSION *src, int ticket) + { + SSL_SESSION *dest; + +@@ -287,6 +282,27 @@ SSL_SESSION *ssl_session_dup(const SSL_SESSION *src, int ticket) + return NULL; + } + ++SSL_SESSION *SSL_SESSION_dup(const SSL_SESSION *src) ++{ ++ return ssl_session_dup_intern(src, 1); ++} ++ ++/* ++ * Used internally when duplicating a session which might be already shared. ++ * We will have resumed the original session. Subsequently we might have marked ++ * it as non-resumable (e.g. in another thread) - but this copy should be ok to ++ * resume from. ++ */ ++SSL_SESSION *ssl_session_dup(const SSL_SESSION *src, int ticket) ++{ ++ SSL_SESSION *sess = ssl_session_dup_intern(src, ticket); ++ ++ if (sess != NULL) ++ sess->not_resumable = 0; ++ ++ return sess; ++} ++ + const unsigned char *SSL_SESSION_get_id(const SSL_SESSION *s, unsigned int *len) + { + if (len) +diff --git a/ssl/statem/statem_srvr.c b/ssl/statem/statem_srvr.c +index 5d59d53563ed8..8e493176f658e 100644 +--- a/ssl/statem/statem_srvr.c ++++ b/ssl/statem/statem_srvr.c +@@ -2338,9 +2338,8 @@ int tls_construct_server_hello(SSL *s, WPACKET *pkt) + * so the following won't overwrite an ID that we're supposed + * to send back. + */ +- if (s->session->not_resumable || +- (!(s->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER) +- && !s->hit)) ++ if (!(s->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER) ++ && !s->hit) + s->session->session_id_length = 0; + + if (usetls13) { diff --git a/openssl-CVE-2024-4603.patch b/openssl-CVE-2024-4603.patch new file mode 100644 index 0000000..23fa5d3 --- /dev/null +++ b/openssl-CVE-2024-4603.patch @@ -0,0 +1,199 @@ +From 9c39b3858091c152f52513c066ff2c5a47969f0d Mon Sep 17 00:00:00 2001 +From: Tomas Mraz +Date: Wed, 8 May 2024 15:23:45 +0200 +Subject: [PATCH] Check DSA parameters for excessive sizes before validating + +This avoids overly long computation of various validation +checks. + +Fixes CVE-2024-4603 + +Reviewed-by: Paul Dale +Reviewed-by: Matt Caswell +Reviewed-by: Neil Horman +Reviewed-by: Shane Lontis +(Merged from https://github.com/openssl/openssl/pull/24346) + +(cherry picked from commit 85ccbab216da245cf9a6503dd327072f21950d9b) +--- + CHANGES.md | 17 ++++++ + crypto/dsa/dsa_check.c | 44 ++++++++++++-- + .../invalid/p10240_q256_too_big.pem | 57 +++++++++++++++++++ + 3 files changed, 114 insertions(+), 4 deletions(-) + create mode 100644 test/recipes/15-test_dsaparam_data/invalid/p10240_q256_too_big.pem + +Index: openssl-3.1.4/crypto/dsa/dsa_check.c +=================================================================== +--- openssl-3.1.4.orig/crypto/dsa/dsa_check.c ++++ openssl-3.1.4/crypto/dsa/dsa_check.c +@@ -19,8 +19,34 @@ + #include "dsa_local.h" + #include "crypto/dsa.h" + ++static int dsa_precheck_params(const DSA *dsa, int *ret) ++{ ++ if (dsa->params.p == NULL || dsa->params.q == NULL) { ++ ERR_raise(ERR_LIB_DSA, DSA_R_BAD_FFC_PARAMETERS); ++ *ret = FFC_CHECK_INVALID_PQ; ++ return 0; ++ } ++ ++ if (BN_num_bits(dsa->params.p) > OPENSSL_DSA_MAX_MODULUS_BITS) { ++ ERR_raise(ERR_LIB_DSA, DSA_R_MODULUS_TOO_LARGE); ++ *ret = FFC_CHECK_INVALID_PQ; ++ return 0; ++ } ++ ++ if (BN_num_bits(dsa->params.q) >= BN_num_bits(dsa->params.p)) { ++ ERR_raise(ERR_LIB_DSA, DSA_R_BAD_Q_VALUE); ++ *ret = FFC_CHECK_INVALID_PQ; ++ return 0; ++ } ++ ++ return 1; ++} ++ + int ossl_dsa_check_params(const DSA *dsa, int checktype, int *ret) + { ++ if (!dsa_precheck_params(dsa, ret)) ++ return 0; ++ + if (checktype == OSSL_KEYMGMT_VALIDATE_QUICK_CHECK) + return ossl_ffc_params_simple_validate(dsa->libctx, &dsa->params, + FFC_PARAM_TYPE_DSA, ret); +@@ -39,6 +65,9 @@ int ossl_dsa_check_params(const DSA *dsa + */ + int ossl_dsa_check_pub_key(const DSA *dsa, const BIGNUM *pub_key, int *ret) + { ++ if (!dsa_precheck_params(dsa, ret)) ++ return 0; ++ + return ossl_ffc_validate_public_key(&dsa->params, pub_key, ret) + && *ret == 0; + } +@@ -50,6 +79,9 @@ int ossl_dsa_check_pub_key(const DSA *ds + */ + int ossl_dsa_check_pub_key_partial(const DSA *dsa, const BIGNUM *pub_key, int *ret) + { ++ if (!dsa_precheck_params(dsa, ret)) ++ return 0; ++ + return ossl_ffc_validate_public_key_partial(&dsa->params, pub_key, ret) + && *ret == 0; + } +@@ -58,8 +90,10 @@ int ossl_dsa_check_priv_key(const DSA *d + { + *ret = 0; + +- return (dsa->params.q != NULL +- && ossl_ffc_validate_private_key(dsa->params.q, priv_key, ret)); ++ if (!dsa_precheck_params(dsa, ret)) ++ return 0; ++ ++ return ossl_ffc_validate_private_key(dsa->params.q, priv_key, ret); + } + + /* +@@ -72,8 +106,10 @@ int ossl_dsa_check_pairwise(const DSA *d + BN_CTX *ctx = NULL; + BIGNUM *pub_key = NULL; + +- if (dsa->params.p == NULL +- || dsa->params.g == NULL ++ if (!dsa_precheck_params(dsa, &ret)) ++ return 0; ++ ++ if (dsa->params.g == NULL + || dsa->priv_key == NULL + || dsa->pub_key == NULL) + return 0; +Index: openssl-3.1.4/test/recipes/15-test_dsaparam_data/invalid/p10240_q256_too_big.pem +=================================================================== +--- /dev/null ++++ openssl-3.1.4/test/recipes/15-test_dsaparam_data/invalid/p10240_q256_too_big.pem +@@ -0,0 +1,57 @@ ++-----BEGIN DSA PARAMETERS----- ++MIIKLAKCBQEAym47LzPFZdbz16WvjczLKuzLtsP8yRk/exxL4bBthJhP1qOwctja ++p1586SF7gDxCMn7yWVEYdfRbFefGoq0gj1XOE917XqlbnkmZhMgxut2KbNJo/xil ++XNFUjGvKs3F413U9rAodC8f07cWHP1iTcWL+vPe6u2yilKWYYfnLWHQH+Z6aPrrF ++x/R08LI6DZ6nEsIo+hxaQnEtx+iqNTJC6Q1RIjWDqxQkFVTkJ0Y7miRDXmRdneWk ++oLrMZRpaXr5l5tSjEghh1pBgJcdyOv0lh4dlDy/alAiqE2Qlb667yHl6A9dDPlpW ++dAntpffy4LwOxfbuEhISvKjjQoBwIvYE4TBPqL0Q6bC6HgQ4+tqd9b44pQjdIQjb ++Xcjc6azheITSnPEex3OdKtKoQeRq01qCeLBpMXu1c+CTf4ApKArZvT3vZSg0hM1O ++pR71bRZrEEegDj0LH2HCgI5W6H3blOS9A0kUTddCoQXr2lsVdiPtRbPKH1gcd9FQ ++P8cGrvbakpTiC0dCczOMDaCteM1QNILlkM7ZoV6VghsKvDnFPxFsiIr5GgjasXP5 ++hhbn3g7sDoq1LiTEo+IKQY28pBWx7etSOSRuXW/spnvCkivZla7lSEGljoy9QlQ2 ++UZmsEQI9G3YyzgpxHvKZBK1CiZVTywdYKTZ4TYCxvqzhYhjv2bqbpjI12HRFLojB ++koyEmMSp53lldCzp158PrIanqSp2rksMR8SmmCL3FwfAp2OjqFMEglG9DT8x0WaN ++TLSkjGC6t2csMte7WyU1ekNoFDKfMjDSAz0+xIx21DEmZtYqFOg1DNPK1xYLS0pl ++RSMRRkJVN2mk/G7/1oxlB8Wb9wgi3GKUqqCYT11SnBjzq0NdoJ3E4GMedp5Lx3AZ ++4mFuRPUd4iV86tE0XDSHSFE7Y3ZkrOjD7Q/26/L53L/UH5z4HW6CHP5os7QERJjg ++c1S3x87wXWo9QXbB9b2xmf+c+aWwAAr1cviw38tru58jF3/IGyduj9H8claKQqBG ++cIOUF4aNe1hK2K3ArAOApUxr4KE+tCvrltRfiTmVFip0g9Jt1CPY3Zu7Bd4Z2ZkE ++DtSztpwa49HrWF5E9xpquvBL2U8jQ68E7Xd8Wp4orI/TIChriamBmdkgRz3H2LvN ++Ozb6+hsnEGrz3sp2RVAToSqA9ysa6nHZdfufPNtMEbQdO/k1ehmGRb0ljBRsO6b2 ++rsG2eYuC8tg8eCrIkua0TGRI7g6a4K32AJdzaX6NsISaaIW+OYJuoDSscvD3oOg8 ++PPEhU+zM7xJskTA+jxvPlikKx8V7MNHOCQECldJlUBwzJvqp40JvwfnDsF+8VYwd ++UaiieR3pzMzyTjpReXRmZbnRPusRcsVzxb2OhB79wmuy4UPjjQBX+7eD0rs8xxvW ++5a5q1Cjq4AvbwmmcA/wDrHDOjcbD/zodad2O1QtBWa/R4xyWea4zKsflgACE1zY9 ++wW2br7+YQFekcrXkkkEzgxd6zxv8KVEDpXRZjmAM1cI5LvkoN64To4GedN8Qe/G7 ++R9SZh9gnS17PTP64hK+aYqhFafMdu87q/+qLfxaSux727qE5hiW01u4nnWhACf9s ++xuOozowKqxZxkolMIyZv6Lddwy1Zv5qjCyd0DvM/1skpXWkb9kfabYC+OhjsjVhs ++0Ktfs6a5B3eixiw5x94hhIcTEcS4hmvhGUL72FiTca6ZeSERTKmNBy8CIQC9/ZUN ++uU/V5JTcnYyUGHzm7+XcZBjyGBagBj9rCmW3SQKCBQAJ/k9rb39f1cO+/3XDEMjy ++9bIEXSuS48g5RAc1UGd5nrrBQwuDxGWFyz0yvAY7LgyidZuJS21+MAp9EY7AOMmx ++TDttifNaBJYt4GZ8of166PcqTKkHQwq5uBpxeSDv/ZE8YbYfaCtLTcUC8KlO+l36 ++gjJHSkdkflSsGy1yObSNDQDfVAAwQs//TjDMnuEtvlNXZllsTvFFBceXVETn10K2 ++ZMmdSIJNfLnjReUKEN6PfeGqv7F4xoyGwUybEfRE4u5RmXrqCODaIjY3SNMrOq8B ++R3Ata/cCozsM1jIdIW2z+OybDJH+BYsYm2nkSZQjZS6javTYClLrntEKG/hAQwL8 ++F16YLOQXpHhgiAaWnTZzANtLppB2+5qCVy5ElzKongOwT8JTjTFXOaRnqe/ngm9W ++SSbrxfDaoWUOyK9XD8Cydzpv3n4Y8nWNGayi7/yAFCU36Ri040ufgv/TZLuKacnl +++3ga3ZUpRlSigzx0kb1+KjTSWeQ8vE/psdWjvBukVEbzdUauMLyRLo/6znSVvvPX ++UGhviThE5uhrsUg+wEPFINriSHfF7JDKVhDcJnLBdaXvfN52pkF/naLBF5Rt3Gvq ++fjCxjx0Sy9Lag1hDN4dor7dzuO7wmwOS01DJW1PtNLuuH0Bbqh1kYSaQkmyXBZWX ++qo8K3nkoDM0niOtJJubOhTNrGmSaZpNXkK3Mcy9rBbdvEs5O0Jmqaax/eOdU0Yot ++B3lX+3ddOseT2ZEFjzObqTtkWuFBeBxuYNcRTsu3qMdIBsEb8URQdsTtjoIja2fK ++hreVgjK36GW70KXEl8V/vq5qjQulmqkBEjmilcDuiREKqQuyeagUOnhQaBplqVco ++4xznh5DMBMRbpGb5lHxKv4cPNi+uNAJ5i98zWUM1JRt6aXnRCuWcll1z8fRZ+5kD ++vK9FaZU3VRMK/eknEG49cGr8OuJ6ZRSaC+tKwV1y+amkSZpKPWnk2bUnQI3ApJv3 ++k1e1EToeECpMUkLMDgNbpKBoz4nqMEvAAlYgw9xKNbLlQlahqTVEAmaJHh4yDMDy ++i7IZ9Wrn47IGoR7s3cvhDHUpRPeW4nsmgzj+tf5EAxemI61STZJTTWo0iaPGJxct ++9nhOOhw1I38Mvm4vkAbFH7YJ0B6QrjjYL2MbOTp5JiIh4vdOeWwNo9/y4ffyaN5+ ++ADpxuuIAmcbdr6GPOhkOFFixRJa0B2eP1i032HESlLs8RB9oYtdTXdXQotnIgJGd ++Y8tSKOa1zjzeLHn3AVpRZTUW++/BxmApV3GKIeG8fsUjg/df0QRrBcdC/1uccdaG ++KKlAOwlywVn5jUlwHkTmDiTM9w5AqVVGHZ2b+4ZgQW8jnPKN0SrKf6U555D+zp7E ++x4uXoE8ojN9y8m8UKf0cTLnujH2XgZorjPfuMOt5VZEhQFMS2QaljSeni5CJJ8gk ++XtztNqfBlAtWR4V5iAHeQOfIB2YaOy8GESda89tyKraKeaez41VblpTVHTeq9IIF ++YB4cQA2PfuNaGVRGLMAgT3Dvl+mxxxeJyxnGAiUcETU/jJJt9QombiuszBlYGQ5d ++ELOSm/eQSRARV9zNSt5jaQlMSjMBqenIEM09BzYqa7jDwqoztFxNdO8bcuQPuKwa ++4z3bBZ1yYm63WFdNbQqqGEwc0OYmqg1raJ0zltgHyjFyw8IGu4g/wETs+nVQcH7D ++vKuje86bePD6kD/LH3wmkA== ++-----END DSA PARAMETERS----- +Index: openssl-3.1.4/CHANGES.md +=================================================================== +--- openssl-3.1.4.orig/CHANGES.md ++++ openssl-3.1.4/CHANGES.md +@@ -22,6 +22,23 @@ OpenSSL Releases + OpenSSL 3.1 + ----------- + ++ * Fixed an issue where checking excessively long DSA keys or parameters may ++ be very slow. ++ ++ Applications that use the functions EVP_PKEY_param_check() or ++ EVP_PKEY_public_check() to check a DSA public key or DSA parameters may ++ experience long delays. Where the key or parameters that are being checked ++ have been obtained from an untrusted source this may lead to a Denial of ++ Service. ++ ++ To resolve this issue DSA keys larger than OPENSSL_DSA_MAX_MODULUS_BITS ++ will now fail the check immediately with a DSA_R_MODULUS_TOO_LARGE error ++ reason. ++ ++ ([CVE-2024-4603]) ++ ++ *Tomáš Mráz* ++ + ### Changes between 3.1.3 and 3.1.4 [24 Oct 2023] + + * Fix incorrect key and IV resizing issues when calling EVP_EncryptInit_ex2(), diff --git a/openssl-CVE-2024-4741.patch b/openssl-CVE-2024-4741.patch new file mode 100644 index 0000000..2e87ae8 --- /dev/null +++ b/openssl-CVE-2024-4741.patch @@ -0,0 +1,28 @@ +@@ -, +, @@ +--- + ssl/record/methods/tls_common.c | 8 ++++++++ + 1 file changed, 8 insertions(+) +--- openssl-3.0.8/ssl/record/ssl3_buffer.c ++++ openssl-3.0.8/ssl/record/ssl3_buffer.c +@@ -186,5 +186,7 @@ int ssl3_release_read_buffer(SSL *s) + OPENSSL_cleanse(b->buf, b->len); + OPENSSL_free(b->buf); + b->buf = NULL; ++ s->rlayer.packet = NULL; ++ s->rlayer.packet_length = 0; + return 1; + } +--- openssl-3.0.8/ssl/record/rec_layer_s3.c ++++ openssl-3.0.8/ssl/record/rec_layer_s3.c +@@ -238,6 +238,11 @@ int ssl3_read_n(SSL *s, size_t n, size_t + s->rlayer.packet_length = 0; + /* ... now we can act as if 'extend' was set */ + } ++ if (!ossl_assert(s->rlayer.packet != NULL)) { ++ /* does not happen */ ++ SSLfatal(s, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR); ++ return -1; ++ } + + len = s->rlayer.packet_length; + pkt = rb->buf + align; diff --git a/openssl-CVE-2024-5535.patch b/openssl-CVE-2024-5535.patch new file mode 100644 index 0000000..b8ee00a --- /dev/null +++ b/openssl-CVE-2024-5535.patch @@ -0,0 +1,326 @@ +From 4ada436a1946cbb24db5ab4ca082b69c1bc10f37 Mon Sep 17 00:00:00 2001 +From: Matt Caswell +Date: Fri, 31 May 2024 11:14:33 +0100 +Subject: [PATCH] Fix SSL_select_next_proto + +Ensure that the provided client list is non-NULL and starts with a valid +entry. When called from the ALPN callback the client list should already +have been validated by OpenSSL so this should not cause a problem. When +called from the NPN callback the client list is locally configured and +will not have already been validated. Therefore SSL_select_next_proto +should not assume that it is correctly formatted. + +We implement stricter checking of the client protocol list. We also do the +same for the server list while we are about it. + +CVE-2024-5535 + +Reviewed-by: Neil Horman +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/24718) +--- + ssl/ssl_lib.c | 63 ++++++++++++++++++++++++++++++++------------------- + 1 file changed, 40 insertions(+), 23 deletions(-) + +diff --git a/ssl/ssl_lib.c b/ssl/ssl_lib.c +index 5493d9b9c7..f218dcf1db 100644 +--- a/ssl/ssl_lib.c ++++ b/ssl/ssl_lib.c +@@ -2953,37 +2953,54 @@ int SSL_select_next_proto(unsigned char **out, unsigned char *outlen, + unsigned int server_len, + const unsigned char *client, unsigned int client_len) + { +- unsigned int i, j; +- const unsigned char *result; +- int status = OPENSSL_NPN_UNSUPPORTED; ++ PACKET cpkt, csubpkt, spkt, ssubpkt; ++ ++ if (!PACKET_buf_init(&cpkt, client, client_len) ++ || !PACKET_get_length_prefixed_1(&cpkt, &csubpkt) ++ || PACKET_remaining(&csubpkt) == 0) { ++ *out = NULL; ++ *outlen = 0; ++ return OPENSSL_NPN_NO_OVERLAP; ++ } ++ ++ /* ++ * Set the default opportunistic protocol. Will be overwritten if we find ++ * a match. ++ */ ++ *out = (unsigned char *)PACKET_data(&csubpkt); ++ *outlen = (unsigned char)PACKET_remaining(&csubpkt); + + /* + * For each protocol in server preference order, see if we support it. + */ +- for (i = 0; i < server_len;) { +- for (j = 0; j < client_len;) { +- if (server[i] == client[j] && +- memcmp(&server[i + 1], &client[j + 1], server[i]) == 0) { +- /* We found a match */ +- result = &server[i]; +- status = OPENSSL_NPN_NEGOTIATED; +- goto found; ++ if (PACKET_buf_init(&spkt, server, server_len)) { ++ while (PACKET_get_length_prefixed_1(&spkt, &ssubpkt)) { ++ if (PACKET_remaining(&ssubpkt) == 0) ++ continue; /* Invalid - ignore it */ ++ if (PACKET_buf_init(&cpkt, client, client_len)) { ++ while (PACKET_get_length_prefixed_1(&cpkt, &csubpkt)) { ++ if (PACKET_equal(&csubpkt, PACKET_data(&ssubpkt), ++ PACKET_remaining(&ssubpkt))) { ++ /* We found a match */ ++ *out = (unsigned char *)PACKET_data(&ssubpkt); ++ *outlen = (unsigned char)PACKET_remaining(&ssubpkt); ++ return OPENSSL_NPN_NEGOTIATED; ++ } ++ } ++ /* Ignore spurious trailing bytes in the client list */ ++ } else { ++ /* This should never happen */ ++ return OPENSSL_NPN_NO_OVERLAP; + } +- j += client[j]; +- j++; + } +- i += server[i]; +- i++; ++ /* Ignore spurious trailing bytes in the server list */ + } + +- /* There's no overlap between our protocols and the server's list. */ +- result = client; +- status = OPENSSL_NPN_NO_OVERLAP; +- +- found: +- *out = (unsigned char *)result + 1; +- *outlen = result[0]; +- return status; ++ /* ++ * There's no overlap between our protocols and the server's list. We use ++ * the default opportunistic protocol selected earlier ++ */ ++ return OPENSSL_NPN_NO_OVERLAP; + } + + #ifndef OPENSSL_NO_NEXTPROTONEG +-- +2.45.2 + +From 4279c89a726025c758db3dafb263b17e52211304 Mon Sep 17 00:00:00 2001 +From: Matt Caswell +Date: Fri, 31 May 2024 11:18:27 +0100 +Subject: [PATCH] More correctly handle a selected_len of 0 when + processing NPN + +In the case where the NPN callback returns with SSL_TLEXT_ERR_OK, but +the selected_len is 0 we should fail. Previously this would fail with an +internal_error alert because calling OPENSSL_malloc(selected_len) will +return NULL when selected_len is 0. We make this error detection more +explicit and return a handshake failure alert. + +Follow on from CVE-2024-5535 + +Reviewed-by: Neil Horman +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/24718) +--- + ssl/statem/extensions_clnt.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/ssl/statem/extensions_clnt.c b/ssl/statem/extensions_clnt.c +index 842be0722b..a07dc62e9a 100644 +--- a/ssl/statem/extensions_clnt.c ++++ b/ssl/statem/extensions_clnt.c +@@ -1536,7 +1536,8 @@ int tls_parse_stoc_npn(SSL *s, PACKET *pkt, unsigned int context, X509 *x, + PACKET_data(pkt), + PACKET_remaining(pkt), + s->ctx->ext.npn_select_cb_arg) != +- SSL_TLSEXT_ERR_OK) { ++ SSL_TLSEXT_ERR_OK ++ || selected_len == 0) { + SSLfatal(s, SSL_AD_HANDSHAKE_FAILURE, SSL_R_BAD_EXTENSION); + return 0; + } +-- +2.45.2 + +From 889ed19ba25abebd2690997acd6d4791cbe5c493 Mon Sep 17 00:00:00 2001 +From: Matt Caswell +Date: Fri, 31 May 2024 11:46:38 +0100 +Subject: [PATCH] Clarify the SSL_select_next_proto() documentation + +We clarify the input preconditions and the expected behaviour in the event +of no overlap. + +Follow on from CVE-2024-5535 + +Reviewed-by: Neil Horman +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/24718) +--- + doc/man3/SSL_CTX_set_alpn_select_cb.pod | 26 +++++++++++++++++-------- + 1 file changed, 18 insertions(+), 8 deletions(-) + +diff --git a/doc/man3/SSL_CTX_set_alpn_select_cb.pod b/doc/man3/SSL_CTX_set_alpn_select_cb.pod +index 102e657851..a29557dd91 100644 +--- a/doc/man3/SSL_CTX_set_alpn_select_cb.pod ++++ b/doc/man3/SSL_CTX_set_alpn_select_cb.pod +@@ -52,7 +52,8 @@ SSL_select_next_proto, SSL_get0_alpn_selected, SSL_get0_next_proto_negotiated + SSL_CTX_set_alpn_protos() and SSL_set_alpn_protos() are used by the client to + set the list of protocols available to be negotiated. The B must be in + protocol-list format, described below. The length of B is specified in +-B. ++B. Setting B to 0 clears any existing list of ALPN ++protocols and no ALPN extension will be sent to the server. + + SSL_CTX_set_alpn_select_cb() sets the application callback B used by a + server to select which protocol to use for the incoming connection. When B +@@ -73,9 +74,16 @@ B and B, B must be in the protocol-list format + described below. The first item in the B, B list that + matches an item in the B, B list is selected, and returned + in B, B. The B value will point into either B or +-B, so it should be copied immediately. If no match is found, the first +-item in B, B is returned in B, B. This +-function can also be used in the NPN callback. ++B, so it should be copied immediately. The client list must include at ++least one valid (nonempty) protocol entry in the list. ++ ++The SSL_select_next_proto() helper function can be useful from either the ALPN ++callback or the NPN callback (described below). If no match is found, the first ++item in B, B is returned in B, B and ++B is returned. This can be useful when implementating ++the NPN callback. In the ALPN case, the value returned in B and B ++must be ignored if B has been returned from ++SSL_select_next_proto(). + + SSL_CTX_set_next_proto_select_cb() sets a callback B that is called when a + client needs to select a protocol from the server's provided list, and a +@@ -85,9 +93,10 @@ must be set to point to the selected protocol (which may be within B). + The length of the protocol name must be written into B. The + server's advertised protocols are provided in B and B. The + callback can assume that B is syntactically valid. The client must +-select a protocol. It is fatal to the connection if this callback returns +-a value other than B. The B parameter is the pointer +-set via SSL_CTX_set_next_proto_select_cb(). ++select a protocol (although it may be an empty, zero length protocol). It is ++fatal to the connection if this callback returns a value other than ++B or if the zero length protocol is selected. The B ++parameter is the pointer set via SSL_CTX_set_next_proto_select_cb(). + + SSL_CTX_set_next_protos_advertised_cb() sets a callback B that is called + when a TLS server needs a list of supported protocols for Next Protocol +@@ -149,7 +158,8 @@ A match was found and is returned in B, B. + =item OPENSSL_NPN_NO_OVERLAP + + No match was found. The first item in B, B is returned in +-B, B. ++B, B (or B and 0 in the case where the first entry in ++B is invalid). + + =back + +-- +2.45.2 + +From 087501b4f572825e27ca8cc2c5874fcf6fd47cf7 Mon Sep 17 00:00:00 2001 +From: Matt Caswell +Date: Fri, 21 Jun 2024 10:41:55 +0100 +Subject: [PATCH] Correct return values for + tls_construct_stoc_next_proto_neg + +Return EXT_RETURN_NOT_SENT in the event that we don't send the extension, +rather than EXT_RETURN_SENT. This actually makes no difference at all to +the current control flow since this return value is ignored in this case +anyway. But lets make it correct anyway. + +Follow on from CVE-2024-5535 + +Reviewed-by: Neil Horman +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/24718) +--- + ssl/statem/extensions_srvr.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/ssl/statem/extensions_srvr.c b/ssl/statem/extensions_srvr.c +index 4ea085e1a1..2da880450f 100644 +--- a/ssl/statem/extensions_srvr.c ++++ b/ssl/statem/extensions_srvr.c +@@ -1476,9 +1476,10 @@ EXT_RETURN tls_construct_stoc_next_proto_neg(SSL *s, WPACKET *pkt, + return EXT_RETURN_FAIL; + } + s->s3.npn_seen = 1; ++ return EXT_RETURN_SENT; + } + +- return EXT_RETURN_SENT; ++ return EXT_RETURN_NOT_SENT; + } + #endif + +-- +2.45.2 + +From 017e54183b95617825fb9316d618c154a34c634e Mon Sep 17 00:00:00 2001 +From: Matt Caswell +Date: Fri, 21 Jun 2024 11:51:54 +0100 +Subject: [PATCH] Add ALPN validation in the client + +The ALPN protocol selected by the server must be one that we originally +advertised. We should verify that it is. + +Follow on from CVE-2024-5535 + +Reviewed-by: Neil Horman +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/24718) +--- + ssl/statem/extensions_clnt.c | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + +diff --git a/ssl/statem/extensions_clnt.c b/ssl/statem/extensions_clnt.c +index a07dc62e9a..b21ccf9273 100644 +--- a/ssl/statem/extensions_clnt.c ++++ b/ssl/statem/extensions_clnt.c +@@ -1566,6 +1566,8 @@ int tls_parse_stoc_alpn(SSL *s, PACKET *pkt, unsigned int context, X509 *x, + size_t chainidx) + { + size_t len; ++ PACKET confpkt, protpkt; ++ int valid = 0; + + /* We must have requested it. */ + if (!s->s3.alpn_sent) { +@@ -1584,6 +1586,28 @@ int tls_parse_stoc_alpn(SSL *s, PACKET *pkt, unsigned int context, X509 *x, + SSLfatal(s, SSL_AD_DECODE_ERROR, SSL_R_BAD_EXTENSION); + return 0; + } ++ ++ /* It must be a protocol that we sent */ ++ if (!PACKET_buf_init(&confpkt, s->ext.alpn, s->ext.alpn_len)) { ++ SSLfatal(s, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR); ++ return 0; ++ } ++ while (PACKET_get_length_prefixed_1(&confpkt, &protpkt)) { ++ if (PACKET_remaining(&protpkt) != len) ++ continue; ++ if (memcmp(PACKET_data(pkt), PACKET_data(&protpkt), len) == 0) { ++ /* Valid protocol found */ ++ valid = 1; ++ break; ++ } ++ } ++ ++ if (!valid) { ++ /* The protocol sent from the server does not match one we advertised */ ++ SSLfatal(s, SSL_AD_DECODE_ERROR, SSL_R_BAD_EXTENSION); ++ return 0; ++ } ++ + OPENSSL_free(s->s3.alpn_selected); + s->s3.alpn_selected = OPENSSL_malloc(len); + if (s->s3.alpn_selected == NULL) { +-- +2.45.2 + diff --git a/openssl-Fix-EVP_PKEY_CTX_add1_hkdf_info-behavior.patch b/openssl-Fix-EVP_PKEY_CTX_add1_hkdf_info-behavior.patch new file mode 100644 index 0000000..e79c626 --- /dev/null +++ b/openssl-Fix-EVP_PKEY_CTX_add1_hkdf_info-behavior.patch @@ -0,0 +1,309 @@ +From 4580c303fa88f77a98461fee5fe26b5db725967c Mon Sep 17 00:00:00 2001 +From: Todd Short +Date: Thu, 1 Feb 2024 23:09:38 -0500 +Subject: [PATCH 1/2] Fix EVP_PKEY_CTX_add1_hkdf_info() behavior + +Fix #23448 + +`EVP_PKEY_CTX_add1_hkdf_info()` behaves like a `set1` function. + +Fix the setting of the parameter in the params code. +Update the TLS_PRF code to also use the params code. +Add tests. + +Reviewed-by: Shane Lontis +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/23456) + +(cherry picked from commit 6b566687b58fde08b28e3331377f050768fad89b) +--- + crypto/evp/pmeth_lib.c | 65 ++++++++++++++++++- + providers/implementations/exchange/kdf_exch.c | 42 ++++++++++++ + providers/implementations/kdfs/hkdf.c | 8 +++ + test/pkey_meth_kdf_test.c | 53 +++++++++++---- + 4 files changed, 156 insertions(+), 12 deletions(-) + +diff --git a/crypto/evp/pmeth_lib.c b/crypto/evp/pmeth_lib.c +index ba1971c..d0eeaf7 100644 +--- a/crypto/evp/pmeth_lib.c ++++ b/crypto/evp/pmeth_lib.c +@@ -1028,6 +1028,69 @@ static int evp_pkey_ctx_set1_octet_string(EVP_PKEY_CTX *ctx, int fallback, + return EVP_PKEY_CTX_set_params(ctx, octet_string_params); + } + ++static int evp_pkey_ctx_add1_octet_string(EVP_PKEY_CTX *ctx, int fallback, ++ const char *param, int op, int ctrl, ++ const unsigned char *data, ++ int datalen) ++{ ++ OSSL_PARAM os_params[2]; ++ unsigned char *info = NULL; ++ size_t info_len = 0; ++ size_t info_alloc = 0; ++ int ret = 0; ++ ++ if (ctx == NULL || (ctx->operation & op) == 0) { ++ ERR_raise(ERR_LIB_EVP, EVP_R_COMMAND_NOT_SUPPORTED); ++ /* Uses the same return values as EVP_PKEY_CTX_ctrl */ ++ return -2; ++ } ++ ++ /* Code below to be removed when legacy support is dropped. */ ++ if (fallback) ++ return EVP_PKEY_CTX_ctrl(ctx, -1, op, ctrl, datalen, (void *)(data)); ++ /* end of legacy support */ ++ ++ if (datalen < 0) { ++ ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_LENGTH); ++ return 0; ++ } ++ ++ /* Get the original value length */ ++ os_params[0] = OSSL_PARAM_construct_octet_string(param, NULL, 0); ++ os_params[1] = OSSL_PARAM_construct_end(); ++ ++ if (!EVP_PKEY_CTX_get_params(ctx, os_params)) ++ return 0; ++ ++ /* Older provider that doesn't support getting this parameter */ ++ if (os_params[0].return_size == OSSL_PARAM_UNMODIFIED) ++ return evp_pkey_ctx_set1_octet_string(ctx, fallback, param, op, ctrl, data, datalen); ++ ++ info_alloc = os_params[0].return_size + datalen; ++ if (info_alloc == 0) ++ return 0; ++ info = OPENSSL_zalloc(info_alloc); ++ if (info == NULL) ++ return 0; ++ info_len = os_params[0].return_size; ++ ++ os_params[0] = OSSL_PARAM_construct_octet_string(param, info, info_alloc); ++ ++ /* if we have data, then go get it */ ++ if (info_len > 0) { ++ if (!EVP_PKEY_CTX_get_params(ctx, os_params)) ++ goto error; ++ } ++ ++ /* Copy the input data */ ++ memcpy(&info[info_len], data, datalen); ++ ret = EVP_PKEY_CTX_set_params(ctx, os_params); ++ ++ error: ++ OPENSSL_clear_free(info, info_alloc); ++ return ret; ++} ++ + int EVP_PKEY_CTX_set1_tls1_prf_secret(EVP_PKEY_CTX *ctx, + const unsigned char *sec, int seclen) + { +@@ -1078,7 +1141,7 @@ int EVP_PKEY_CTX_set1_hkdf_key(EVP_PKEY_CTX *ctx, + int EVP_PKEY_CTX_add1_hkdf_info(EVP_PKEY_CTX *ctx, + const unsigned char *info, int infolen) + { +- return evp_pkey_ctx_set1_octet_string(ctx, ctx->op.kex.algctx == NULL, ++ return evp_pkey_ctx_add1_octet_string(ctx, ctx->op.kex.algctx == NULL, + OSSL_KDF_PARAM_INFO, + EVP_PKEY_OP_DERIVE, + EVP_PKEY_CTRL_HKDF_INFO, +diff --git a/providers/implementations/exchange/kdf_exch.c b/providers/implementations/exchange/kdf_exch.c +index 527a866..4bc8102 100644 +--- a/providers/implementations/exchange/kdf_exch.c ++++ b/providers/implementations/exchange/kdf_exch.c +@@ -28,9 +28,13 @@ static OSSL_FUNC_keyexch_derive_fn kdf_derive; + static OSSL_FUNC_keyexch_freectx_fn kdf_freectx; + static OSSL_FUNC_keyexch_dupctx_fn kdf_dupctx; + static OSSL_FUNC_keyexch_set_ctx_params_fn kdf_set_ctx_params; ++static OSSL_FUNC_keyexch_get_ctx_params_fn kdf_get_ctx_params; + static OSSL_FUNC_keyexch_settable_ctx_params_fn kdf_tls1_prf_settable_ctx_params; + static OSSL_FUNC_keyexch_settable_ctx_params_fn kdf_hkdf_settable_ctx_params; + static OSSL_FUNC_keyexch_settable_ctx_params_fn kdf_scrypt_settable_ctx_params; ++static OSSL_FUNC_keyexch_gettable_ctx_params_fn kdf_tls1_prf_gettable_ctx_params; ++static OSSL_FUNC_keyexch_gettable_ctx_params_fn kdf_hkdf_gettable_ctx_params; ++static OSSL_FUNC_keyexch_gettable_ctx_params_fn kdf_scrypt_gettable_ctx_params; + + typedef struct { + void *provctx; +@@ -169,6 +173,13 @@ static int kdf_set_ctx_params(void *vpkdfctx, const OSSL_PARAM params[]) + return EVP_KDF_CTX_set_params(pkdfctx->kdfctx, params); + } + ++static int kdf_get_ctx_params(void *vpkdfctx, OSSL_PARAM params[]) ++{ ++ PROV_KDF_CTX *pkdfctx = (PROV_KDF_CTX *)vpkdfctx; ++ ++ return EVP_KDF_CTX_get_params(pkdfctx->kdfctx, params); ++} ++ + static const OSSL_PARAM *kdf_settable_ctx_params(ossl_unused void *vpkdfctx, + void *provctx, + const char *kdfname) +@@ -197,6 +208,34 @@ KDF_SETTABLE_CTX_PARAMS(tls1_prf, "TLS1-PRF") + KDF_SETTABLE_CTX_PARAMS(hkdf, "HKDF") + KDF_SETTABLE_CTX_PARAMS(scrypt, "SCRYPT") + ++static const OSSL_PARAM *kdf_gettable_ctx_params(ossl_unused void *vpkdfctx, ++ void *provctx, ++ const char *kdfname) ++{ ++ EVP_KDF *kdf = EVP_KDF_fetch(PROV_LIBCTX_OF(provctx), kdfname, ++ NULL); ++ const OSSL_PARAM *params; ++ ++ if (kdf == NULL) ++ return NULL; ++ ++ params = EVP_KDF_gettable_ctx_params(kdf); ++ EVP_KDF_free(kdf); ++ ++ return params; ++} ++ ++#define KDF_GETTABLE_CTX_PARAMS(funcname, kdfname) \ ++ static const OSSL_PARAM *kdf_##funcname##_gettable_ctx_params(void *vpkdfctx, \ ++ void *provctx) \ ++ { \ ++ return kdf_gettable_ctx_params(vpkdfctx, provctx, kdfname); \ ++ } ++ ++KDF_GETTABLE_CTX_PARAMS(tls1_prf, "TLS1-PRF") ++KDF_GETTABLE_CTX_PARAMS(hkdf, "HKDF") ++KDF_GETTABLE_CTX_PARAMS(scrypt, "SCRYPT") ++ + #define KDF_KEYEXCH_FUNCTIONS(funcname) \ + const OSSL_DISPATCH ossl_kdf_##funcname##_keyexch_functions[] = { \ + { OSSL_FUNC_KEYEXCH_NEWCTX, (void (*)(void))kdf_##funcname##_newctx }, \ +@@ -205,8 +244,11 @@ KDF_SETTABLE_CTX_PARAMS(scrypt, "SCRYPT") + { OSSL_FUNC_KEYEXCH_FREECTX, (void (*)(void))kdf_freectx }, \ + { OSSL_FUNC_KEYEXCH_DUPCTX, (void (*)(void))kdf_dupctx }, \ + { OSSL_FUNC_KEYEXCH_SET_CTX_PARAMS, (void (*)(void))kdf_set_ctx_params }, \ ++ { OSSL_FUNC_KEYEXCH_GET_CTX_PARAMS, (void (*)(void))kdf_get_ctx_params }, \ + { OSSL_FUNC_KEYEXCH_SETTABLE_CTX_PARAMS, \ + (void (*)(void))kdf_##funcname##_settable_ctx_params }, \ ++ { OSSL_FUNC_KEYEXCH_GETTABLE_CTX_PARAMS, \ ++ (void (*)(void))kdf_##funcname##_gettable_ctx_params }, \ + { 0, NULL } \ + }; + +diff --git a/providers/implementations/kdfs/hkdf.c b/providers/implementations/kdfs/hkdf.c +index daa619b..dd65a2a 100644 +--- a/providers/implementations/kdfs/hkdf.c ++++ b/providers/implementations/kdfs/hkdf.c +@@ -371,6 +371,13 @@ static int kdf_hkdf_get_ctx_params(void *vctx, OSSL_PARAM params[]) + return 0; + return OSSL_PARAM_set_size_t(p, sz); + } ++ if ((p = OSSL_PARAM_locate(params, OSSL_KDF_PARAM_INFO)) != NULL) { ++ if (ctx->info == NULL || ctx->info_len == 0) { ++ p->return_size = 0; ++ return 1; ++ } ++ return OSSL_PARAM_set_octet_string(p, ctx->info, ctx->info_len); ++ } + return -2; + } + +@@ -379,6 +386,7 @@ static const OSSL_PARAM *kdf_hkdf_gettable_ctx_params(ossl_unused void *ctx, + { + static const OSSL_PARAM known_gettable_ctx_params[] = { + OSSL_PARAM_size_t(OSSL_KDF_PARAM_SIZE, NULL), ++ OSSL_PARAM_octet_string(OSSL_KDF_PARAM_INFO, NULL, 0), + OSSL_PARAM_END + }; + return known_gettable_ctx_params; +diff --git a/test/pkey_meth_kdf_test.c b/test/pkey_meth_kdf_test.c +index f816d24..c09e2f3 100644 +--- a/test/pkey_meth_kdf_test.c ++++ b/test/pkey_meth_kdf_test.c +@@ -16,7 +16,7 @@ + #include + #include "testutil.h" + +-static int test_kdf_tls1_prf(void) ++static int test_kdf_tls1_prf(int index) + { + int ret = 0; + EVP_PKEY_CTX *pctx; +@@ -40,10 +40,23 @@ static int test_kdf_tls1_prf(void) + TEST_error("EVP_PKEY_CTX_set1_tls1_prf_secret"); + goto err; + } +- if (EVP_PKEY_CTX_add1_tls1_prf_seed(pctx, +- (unsigned char *)"seed", 4) <= 0) { +- TEST_error("EVP_PKEY_CTX_add1_tls1_prf_seed"); +- goto err; ++ if (index == 0) { ++ if (EVP_PKEY_CTX_add1_tls1_prf_seed(pctx, ++ (unsigned char *)"seed", 4) <= 0) { ++ TEST_error("EVP_PKEY_CTX_add1_tls1_prf_seed"); ++ goto err; ++ } ++ } else { ++ if (EVP_PKEY_CTX_add1_tls1_prf_seed(pctx, ++ (unsigned char *)"se", 2) <= 0) { ++ TEST_error("EVP_PKEY_CTX_add1_tls1_prf_seed"); ++ goto err; ++ } ++ if (EVP_PKEY_CTX_add1_tls1_prf_seed(pctx, ++ (unsigned char *)"ed", 2) <= 0) { ++ TEST_error("EVP_PKEY_CTX_add1_tls1_prf_seed"); ++ goto err; ++ } + } + if (EVP_PKEY_derive(pctx, out, &outlen) <= 0) { + TEST_error("EVP_PKEY_derive"); +@@ -65,7 +78,7 @@ err: + return ret; + } + +-static int test_kdf_hkdf(void) ++static int test_kdf_hkdf(int index) + { + int ret = 0; + EVP_PKEY_CTX *pctx; +@@ -94,10 +107,23 @@ static int test_kdf_hkdf(void) + TEST_error("EVP_PKEY_CTX_set1_hkdf_key"); + goto err; + } +- if (EVP_PKEY_CTX_add1_hkdf_info(pctx, (const unsigned char *)"label", 5) ++ if (index == 0) { ++ if (EVP_PKEY_CTX_add1_hkdf_info(pctx, (const unsigned char *)"label", 5) + <= 0) { +- TEST_error("EVP_PKEY_CTX_set1_hkdf_info"); +- goto err; ++ TEST_error("EVP_PKEY_CTX_add1_hkdf_info"); ++ goto err; ++ } ++ } else { ++ if (EVP_PKEY_CTX_add1_hkdf_info(pctx, (const unsigned char *)"lab", 3) ++ <= 0) { ++ TEST_error("EVP_PKEY_CTX_add1_hkdf_info"); ++ goto err; ++ } ++ if (EVP_PKEY_CTX_add1_hkdf_info(pctx, (const unsigned char *)"el", 2) ++ <= 0) { ++ TEST_error("EVP_PKEY_CTX_add1_hkdf_info"); ++ goto err; ++ } + } + if (EVP_PKEY_derive(pctx, out, &outlen) <= 0) { + TEST_error("EVP_PKEY_derive"); +@@ -195,8 +221,13 @@ err: + + int setup_tests(void) + { +- ADD_TEST(test_kdf_tls1_prf); +- ADD_TEST(test_kdf_hkdf); ++ int tests = 1; ++ ++ if (fips_provider_version_ge(NULL, 3, 3, 1)) ++ tests = 2; ++ ++ ADD_ALL_TESTS(test_kdf_tls1_prf, tests); ++ ADD_ALL_TESTS(test_kdf_hkdf, tests); + #ifndef OPENSSL_NO_SCRYPT + ADD_TEST(test_kdf_scrypt); + #endif +-- +2.45.1 + diff --git a/openssl-Handle-empty-param-in-EVP_PKEY_CTX_add1_hkdf_info.patch b/openssl-Handle-empty-param-in-EVP_PKEY_CTX_add1_hkdf_info.patch new file mode 100644 index 0000000..0ad7660 --- /dev/null +++ b/openssl-Handle-empty-param-in-EVP_PKEY_CTX_add1_hkdf_info.patch @@ -0,0 +1,94 @@ +From d6a9c21302e01c33a9a919e7ba380ba3b0ed65b0 Mon Sep 17 00:00:00 2001 +From: trinity-1686a +Date: Mon, 15 Apr 2024 11:13:14 +0200 +Subject: [PATCH 2/2] Handle empty param in EVP_PKEY_CTX_add1_hkdf_info + +Fixes #24130 +The regression was introduced in PR #23456. + +Reviewed-by: Paul Dale +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/24141) + +(cherry picked from commit 299996fb1fcd76eeadfd547958de2a1b822f37f5) +--- + crypto/evp/pmeth_lib.c | 2 ++ + test/evp_extra_test.c | 42 ++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 44 insertions(+) + +diff --git a/crypto/evp/pmeth_lib.c b/crypto/evp/pmeth_lib.c +index d0eeaf7..bce1ebc 100644 +--- a/crypto/evp/pmeth_lib.c ++++ b/crypto/evp/pmeth_lib.c +@@ -1053,6 +1053,8 @@ static int evp_pkey_ctx_add1_octet_string(EVP_PKEY_CTX *ctx, int fallback, + if (datalen < 0) { + ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_LENGTH); + return 0; ++ } else if (datalen == 0) { ++ return 1; + } + + /* Get the original value length */ +diff --git a/test/evp_extra_test.c b/test/evp_extra_test.c +index 9b3bee7..22121ce 100644 +--- a/test/evp_extra_test.c ++++ b/test/evp_extra_test.c +@@ -2565,6 +2565,47 @@ static int test_emptyikm_HKDF(void) + return ret; + } + ++static int test_empty_salt_info_HKDF(void) ++{ ++ EVP_PKEY_CTX *pctx; ++ unsigned char out[20]; ++ size_t outlen; ++ int ret = 0; ++ unsigned char salt[] = ""; ++ unsigned char key[] = "012345678901234567890123456789"; ++ unsigned char info[] = ""; ++ const unsigned char expected[] = { ++ 0x67, 0x12, 0xf9, 0x27, 0x8a, 0x8a, 0x3a, 0x8f, 0x7d, 0x2c, 0xa3, 0x6a, ++ 0xaa, 0xe9, 0xb3, 0xb9, 0x52, 0x5f, 0xe0, 0x06, ++ }; ++ size_t expectedlen = sizeof(expected); ++ ++ if (!TEST_ptr(pctx = EVP_PKEY_CTX_new_from_name(testctx, "HKDF", testpropq))) ++ goto done; ++ ++ outlen = sizeof(out); ++ memset(out, 0, outlen); ++ ++ if (!TEST_int_gt(EVP_PKEY_derive_init(pctx), 0) ++ || !TEST_int_gt(EVP_PKEY_CTX_set_hkdf_md(pctx, EVP_sha256()), 0) ++ || !TEST_int_gt(EVP_PKEY_CTX_set1_hkdf_salt(pctx, salt, ++ sizeof(salt) - 1), 0) ++ || !TEST_int_gt(EVP_PKEY_CTX_set1_hkdf_key(pctx, key, ++ sizeof(key) - 1), 0) ++ || !TEST_int_gt(EVP_PKEY_CTX_add1_hkdf_info(pctx, info, ++ sizeof(info) - 1), 0) ++ || !TEST_int_gt(EVP_PKEY_derive(pctx, out, &outlen), 0) ++ || !TEST_mem_eq(out, outlen, expected, expectedlen)) ++ goto done; ++ ++ ret = 1; ++ ++ done: ++ EVP_PKEY_CTX_free(pctx); ++ ++ return ret; ++} ++ + #ifndef OPENSSL_NO_EC + static int test_X509_PUBKEY_inplace(void) + { +@@ -5166,6 +5207,7 @@ int setup_tests(void) + #endif + ADD_TEST(test_HKDF); + ADD_TEST(test_emptyikm_HKDF); ++ ADD_TEST(test_empty_salt_info_HKDF); + #ifndef OPENSSL_NO_EC + ADD_TEST(test_X509_PUBKEY_inplace); + ADD_TEST(test_X509_PUBKEY_dup); +-- +2.45.1 + diff --git a/reproducible.patch b/reproducible.patch new file mode 100644 index 0000000..6c40942 --- /dev/null +++ b/reproducible.patch @@ -0,0 +1,929 @@ +commit 0fbc50ef0cb8894973d4739af62e95be825b7ccf +Author: trigpolynom +Date: Tue Oct 17 22:44:45 2023 -0400 + + aes-gcm-avx512.pl: fix non-reproducibility issue + + Replace the random suffix with a counter, to make the + build reproducible. + + Fixes #20954 + + Reviewed-by: Richard Levitte + Reviewed-by: Matthias St. Pierre + Reviewed-by: Tom Cosgrove + Reviewed-by: Hugo Landau + (Merged from https://github.com/openssl/openssl/pull/22415) + +diff --git a/crypto/modes/asm/aes-gcm-avx512.pl b/crypto/modes/asm/aes-gcm-avx512.pl +index afd2af941a..9f9124373b 100644 +--- a/crypto/modes/asm/aes-gcm-avx512.pl ++++ b/crypto/modes/asm/aes-gcm-avx512.pl +@@ -155,6 +155,9 @@ my $STACK_LOCAL_OFFSET = ($STACK_HKEYS_OFFSET + $HKEYS_STORAGE); + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + my ($arg1, $arg2, $arg3, $arg4, $arg5, $arg6, $arg7, $arg8, $arg9, $arg10, $arg11); + ++# ; Counter used for assembly label generation ++my $label_count = 0; ++ + # ; This implementation follows the convention: for non-leaf functions (they + # ; must call PROLOG) %rbp is used as a frame pointer, and has fixed offset from + # ; the function entry: $GP_STORAGE + [8 bytes alignment (Windows only)]. This +@@ -200,15 +203,6 @@ my $CTX_OFFSET_HTable = (16 * 6); # ; (Htable) Precomputed table (a + # ;;; Helper functions + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +-# ; Generates "random" local labels +-sub random_string() { +- my @chars = ('a' .. 'z', 'A' .. 'Z', '0' .. '9', '_'); +- my $length = 15; +- my $str; +- map { $str .= $chars[rand(33)] } 1 .. $length; +- return $str; +-} +- + sub BYTE { + my ($reg) = @_; + if ($reg =~ /%r[abcd]x/i) { +@@ -417,7 +411,7 @@ ___ + sub EPILOG { + my ($hkeys_storage_on_stack, $payload_len) = @_; + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + if ($hkeys_storage_on_stack && $CLEAR_HKEYS_STORAGE_ON_EXIT) { + +@@ -425,13 +419,13 @@ sub EPILOG { + # ; were stored in the local frame storage + $code .= <<___; + cmpq \$`16*16`,$payload_len +- jbe .Lskip_hkeys_cleanup_${rndsuffix} ++ jbe .Lskip_hkeys_cleanup_${label_suffix} + vpxor %xmm0,%xmm0,%xmm0 + ___ + for (my $i = 0; $i < int($HKEYS_STORAGE / 64); $i++) { + $code .= "vmovdqa64 %zmm0,`$STACK_HKEYS_OFFSET + 64*$i`(%rsp)\n"; + } +- $code .= ".Lskip_hkeys_cleanup_${rndsuffix}:\n"; ++ $code .= ".Lskip_hkeys_cleanup_${label_suffix}:\n"; + } + + if ($CLEAR_SCRATCH_REGISTERS) { +@@ -537,11 +531,11 @@ sub precompute_hkeys_on_stack { + && $HKEYS_RANGE ne "first32" + && $HKEYS_RANGE ne "last32"); + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + $code .= <<___; + test $HKEYS_READY,$HKEYS_READY +- jnz .L_skip_hkeys_precomputation_${rndsuffix} ++ jnz .L_skip_hkeys_precomputation_${label_suffix} + ___ + + if ($HKEYS_RANGE eq "first16" || $HKEYS_RANGE eq "first32" || $HKEYS_RANGE eq "all") { +@@ -615,7 +609,7 @@ ___ + } + } + +- $code .= ".L_skip_hkeys_precomputation_${rndsuffix}:\n"; ++ $code .= ".L_skip_hkeys_precomputation_${label_suffix}:\n"; + } + + # ;; ============================================================================= +@@ -1418,20 +1412,20 @@ sub CALC_AAD_HASH { + + my $SHFMSK = $ZT13; + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + $code .= <<___; + mov $A_IN,$T1 # ; T1 = AAD + mov $A_LEN,$T2 # ; T2 = aadLen + or $T2,$T2 +- jz .L_CALC_AAD_done_${rndsuffix} ++ jz .L_CALC_AAD_done_${label_suffix} + + xor $HKEYS_READY,$HKEYS_READY + vmovdqa64 SHUF_MASK(%rip),$SHFMSK + +-.L_get_AAD_loop48x16_${rndsuffix}: ++.L_get_AAD_loop48x16_${label_suffix}: + cmp \$`(48*16)`,$T2 +- jl .L_exit_AAD_loop48x16_${rndsuffix} ++ jl .L_exit_AAD_loop48x16_${label_suffix} + ___ + + $code .= <<___; +@@ -1499,15 +1493,15 @@ ___ + + $code .= <<___; + sub \$`(48*16)`,$T2 +- je .L_CALC_AAD_done_${rndsuffix} ++ je .L_CALC_AAD_done_${label_suffix} + + add \$`(48*16)`,$T1 +- jmp .L_get_AAD_loop48x16_${rndsuffix} ++ jmp .L_get_AAD_loop48x16_${label_suffix} + +-.L_exit_AAD_loop48x16_${rndsuffix}: ++.L_exit_AAD_loop48x16_${label_suffix}: + # ; Less than 48x16 bytes remaining + cmp \$`(32*16)`,$T2 +- jl .L_less_than_32x16_${rndsuffix} ++ jl .L_less_than_32x16_${label_suffix} + ___ + + $code .= <<___; +@@ -1556,14 +1550,14 @@ ___ + + $code .= <<___; + sub \$`(32*16)`,$T2 +- je .L_CALC_AAD_done_${rndsuffix} ++ je .L_CALC_AAD_done_${label_suffix} + + add \$`(32*16)`,$T1 +- jmp .L_less_than_16x16_${rndsuffix} ++ jmp .L_less_than_16x16_${label_suffix} + +-.L_less_than_32x16_${rndsuffix}: ++.L_less_than_32x16_${label_suffix}: + cmp \$`(16*16)`,$T2 +- jl .L_less_than_16x16_${rndsuffix} ++ jl .L_less_than_16x16_${label_suffix} + # ; Get next 16 blocks + vmovdqu64 `64*0`($T1),$ZT1 + vmovdqu64 `64*1`($T1),$ZT2 +@@ -1588,11 +1582,11 @@ ___ + + $code .= <<___; + sub \$`(16*16)`,$T2 +- je .L_CALC_AAD_done_${rndsuffix} ++ je .L_CALC_AAD_done_${label_suffix} + + add \$`(16*16)`,$T1 + # ; Less than 16x16 bytes remaining +-.L_less_than_16x16_${rndsuffix}: ++.L_less_than_16x16_${label_suffix}: + # ;; prep mask source address + lea byte64_len_to_mask_table(%rip),$T3 + lea ($T3,$T2,8),$T3 +@@ -1601,28 +1595,28 @@ ___ + add \$15,@{[DWORD($T2)]} + shr \$4,@{[DWORD($T2)]} + cmp \$2,@{[DWORD($T2)]} +- jb .L_AAD_blocks_1_${rndsuffix} +- je .L_AAD_blocks_2_${rndsuffix} ++ jb .L_AAD_blocks_1_${label_suffix} ++ je .L_AAD_blocks_2_${label_suffix} + cmp \$4,@{[DWORD($T2)]} +- jb .L_AAD_blocks_3_${rndsuffix} +- je .L_AAD_blocks_4_${rndsuffix} ++ jb .L_AAD_blocks_3_${label_suffix} ++ je .L_AAD_blocks_4_${label_suffix} + cmp \$6,@{[DWORD($T2)]} +- jb .L_AAD_blocks_5_${rndsuffix} +- je .L_AAD_blocks_6_${rndsuffix} ++ jb .L_AAD_blocks_5_${label_suffix} ++ je .L_AAD_blocks_6_${label_suffix} + cmp \$8,@{[DWORD($T2)]} +- jb .L_AAD_blocks_7_${rndsuffix} +- je .L_AAD_blocks_8_${rndsuffix} ++ jb .L_AAD_blocks_7_${label_suffix} ++ je .L_AAD_blocks_8_${label_suffix} + cmp \$10,@{[DWORD($T2)]} +- jb .L_AAD_blocks_9_${rndsuffix} +- je .L_AAD_blocks_10_${rndsuffix} ++ jb .L_AAD_blocks_9_${label_suffix} ++ je .L_AAD_blocks_10_${label_suffix} + cmp \$12,@{[DWORD($T2)]} +- jb .L_AAD_blocks_11_${rndsuffix} +- je .L_AAD_blocks_12_${rndsuffix} ++ jb .L_AAD_blocks_11_${label_suffix} ++ je .L_AAD_blocks_12_${label_suffix} + cmp \$14,@{[DWORD($T2)]} +- jb .L_AAD_blocks_13_${rndsuffix} +- je .L_AAD_blocks_14_${rndsuffix} ++ jb .L_AAD_blocks_13_${label_suffix} ++ je .L_AAD_blocks_14_${label_suffix} + cmp \$15,@{[DWORD($T2)]} +- je .L_AAD_blocks_15_${rndsuffix} ++ je .L_AAD_blocks_15_${label_suffix} + ___ + + # ;; fall through for 16 blocks +@@ -1635,7 +1629,7 @@ ___ + # ;; - jump to reduction code + + for (my $aad_blocks = 16; $aad_blocks > 0; $aad_blocks--) { +- $code .= ".L_AAD_blocks_${aad_blocks}_${rndsuffix}:\n"; ++ $code .= ".L_AAD_blocks_${aad_blocks}_${label_suffix}:\n"; + if ($aad_blocks > 12) { + $code .= "sub \$`12*16*8`, $T3\n"; + } elsif ($aad_blocks > 8) { +@@ -1656,11 +1650,11 @@ ___ + if ($aad_blocks > 1) { + + # ;; fall through to CALC_AAD_done in 1 block case +- $code .= "jmp .L_CALC_AAD_done_${rndsuffix}\n"; ++ $code .= "jmp .L_CALC_AAD_done_${label_suffix}\n"; + } + + } +- $code .= ".L_CALC_AAD_done_${rndsuffix}:\n"; ++ $code .= ".L_CALC_AAD_done_${label_suffix}:\n"; + + # ;; result in AAD_HASH + } +@@ -1710,13 +1704,13 @@ sub PARTIAL_BLOCK { + my $IA1 = $GPTMP2; + my $IA2 = $GPTMP0; + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + $code .= <<___; + # ;; if no partial block present then LENGTH/DATA_OFFSET will be set to zero + mov ($PBLOCK_LEN),$LENGTH + or $LENGTH,$LENGTH +- je .L_partial_block_done_${rndsuffix} # ;Leave Macro if no partial blocks ++ je .L_partial_block_done_${label_suffix} # ;Leave Macro if no partial blocks + ___ + + &READ_SMALL_DATA_INPUT($XTMP0, $PLAIN_CIPH_IN, $PLAIN_CIPH_LEN, $IA0, $IA2, $MASKREG); +@@ -1755,9 +1749,9 @@ ___ + } + $code .= <<___; + sub \$16,$IA1 +- jge .L_no_extra_mask_${rndsuffix} ++ jge .L_no_extra_mask_${label_suffix} + sub $IA1,$IA0 +-.L_no_extra_mask_${rndsuffix}: ++.L_no_extra_mask_${label_suffix}: + # ;; get the appropriate mask to mask out bottom $LENGTH bytes of $XTMP1 + # ;; - mask out bottom $LENGTH bytes of $XTMP1 + # ;; sizeof(SHIFT_MASK) == 16 bytes +@@ -1781,7 +1775,7 @@ ___ + } + $code .= <<___; + cmp \$0,$IA1 +- jl .L_partial_incomplete_${rndsuffix} ++ jl .L_partial_incomplete_${label_suffix} + ___ + + # ;; GHASH computation for the last <16 Byte block +@@ -1793,9 +1787,9 @@ ___ + mov $LENGTH,$IA0 + mov \$16,$LENGTH + sub $IA0,$LENGTH +- jmp .L_enc_dec_done_${rndsuffix} ++ jmp .L_enc_dec_done_${label_suffix} + +-.L_partial_incomplete_${rndsuffix}: ++.L_partial_incomplete_${label_suffix}: + ___ + if ($win64) { + $code .= <<___; +@@ -1808,7 +1802,7 @@ ___ + $code .= <<___; + mov $PLAIN_CIPH_LEN,$LENGTH + +-.L_enc_dec_done_${rndsuffix}: ++.L_enc_dec_done_${label_suffix}: + # ;; output encrypted Bytes + + lea byte_len_to_mask_table(%rip),$IA0 +@@ -1826,7 +1820,7 @@ ___ + $code .= <<___; + mov $CIPH_PLAIN_OUT,$IA0 + vmovdqu8 $XTMP1,($IA0){$MASKREG} +-.L_partial_block_done_${rndsuffix}: ++.L_partial_block_done_${label_suffix}: + ___ + } + +@@ -2016,7 +2010,7 @@ sub INITIAL_BLOCKS_PARTIAL_GHASH { + my $GM = $_[23]; # [in] ZMM with mid prodcut part + my $GL = $_[24]; # [in] ZMM with lo product part + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + # ;;; - Hash all but the last partial block of data +@@ -2034,7 +2028,7 @@ sub INITIAL_BLOCKS_PARTIAL_GHASH { + # ;; NOTE: the 'jl' is always taken for num_initial_blocks = 16. + # ;; This is run in the context of GCM_ENC_DEC_SMALL for length < 256. + cmp \$16,$LENGTH +- jl .L_small_initial_partial_block_${rndsuffix} ++ jl .L_small_initial_partial_block_${label_suffix} + + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + # ;;; Handle a full length final block - encrypt and hash all blocks +@@ -2056,11 +2050,11 @@ ___ + &GHASH_1_TO_16($GCM128_CTX, $HASH_IN_OUT, $ZT0, $ZT1, $ZT2, $ZT3, $ZT4, + $ZT5, $ZT6, $ZT7, $ZT8, &ZWORD($HASH_IN_OUT), $DAT0, $DAT1, $DAT2, $DAT3, $NUM_BLOCKS, $GH, $GM, $GL); + } +- $code .= "jmp .L_small_initial_compute_done_${rndsuffix}\n"; ++ $code .= "jmp .L_small_initial_compute_done_${label_suffix}\n"; + } + + $code .= <<___; +-.L_small_initial_partial_block_${rndsuffix}: ++.L_small_initial_partial_block_${label_suffix}: + + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + # ;;; Handle ghash for a <16B final block +@@ -2125,7 +2119,7 @@ ___ + # ;; a partial block of data, so xor that into the hash. + vpxorq $LAST_GHASH_BLK,$HASH_IN_OUT,$HASH_IN_OUT + # ;; The result is in $HASH_IN_OUT +- jmp .L_after_reduction_${rndsuffix} ++ jmp .L_after_reduction_${label_suffix} + ___ + } + +@@ -2133,7 +2127,7 @@ ___ + # ;;; After GHASH reduction + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +- $code .= ".L_small_initial_compute_done_${rndsuffix}:\n"; ++ $code .= ".L_small_initial_compute_done_${label_suffix}:\n"; + + # ;; If using init/update/finalize, we need to xor any partial block data + # ;; into the hash. +@@ -2144,13 +2138,13 @@ ___ + $code .= <<___; + # ;; NOTE: for $NUM_BLOCKS = 16, $LENGTH, stored in [PBlockLen] is never zero + or $LENGTH,$LENGTH +- je .L_after_reduction_${rndsuffix} ++ je .L_after_reduction_${label_suffix} + ___ + } + $code .= "vpxorq $LAST_GHASH_BLK,$HASH_IN_OUT,$HASH_IN_OUT\n"; + } + +- $code .= ".L_after_reduction_${rndsuffix}:\n"; ++ $code .= ".L_after_reduction_${label_suffix}:\n"; + + # ;; Final hash is now in HASH_IN_OUT + } +@@ -2266,7 +2260,7 @@ sub GHASH_16_ENCRYPT_N_GHASH_N { + die "GHASH_16_ENCRYPT_N_GHASH_N: num_blocks is out of bounds = $NUM_BLOCKS\n" + if ($NUM_BLOCKS > 16 || $NUM_BLOCKS < 0); + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + my $GH1H = $HASH_IN_OUT; + +@@ -2326,16 +2320,16 @@ ___ + + $code .= <<___; + cmp \$`(256 - $NUM_BLOCKS)`,@{[DWORD($CTR_CHECK)]} +- jae .L_16_blocks_overflow_${rndsuffix} ++ jae .L_16_blocks_overflow_${label_suffix} + ___ + + &ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16( + $NUM_BLOCKS, "vpaddd", $B00_03, $B04_07, $B08_11, $B12_15, $CTR_BE, + $B00_03, $B04_07, $B08_11, $ADDBE_1234, $ADDBE_4x4, $ADDBE_4x4, $ADDBE_4x4); + $code .= <<___; +- jmp .L_16_blocks_ok_${rndsuffix} ++ jmp .L_16_blocks_ok_${label_suffix} + +-.L_16_blocks_overflow_${rndsuffix}: ++.L_16_blocks_overflow_${label_suffix}: + vpshufb $SHFMSK,$CTR_BE,$CTR_BE + vpaddd ddq_add_1234(%rip),$CTR_BE,$B00_03 + ___ +@@ -2355,7 +2349,7 @@ ___ + $NUM_BLOCKS, "vpshufb", $B00_03, $B04_07, $B08_11, $B12_15, $B00_03, + $B04_07, $B08_11, $B12_15, $SHFMSK, $SHFMSK, $SHFMSK, $SHFMSK); + $code .= <<___; +-.L_16_blocks_ok_${rndsuffix}: ++.L_16_blocks_ok_${label_suffix}: + + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + # ;; - pre-load constants +@@ -2805,53 +2799,53 @@ sub GCM_ENC_DEC_LAST { + my $MASKREG = $_[44]; # [clobbered] mask register + my $PBLOCK_LEN = $_[45]; # [in] partial block length + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + $code .= <<___; + mov @{[DWORD($LENGTH)]},@{[DWORD($IA0)]} + add \$15,@{[DWORD($IA0)]} + shr \$4,@{[DWORD($IA0)]} +- je .L_last_num_blocks_is_0_${rndsuffix} ++ je .L_last_num_blocks_is_0_${label_suffix} + + cmp \$8,@{[DWORD($IA0)]} +- je .L_last_num_blocks_is_8_${rndsuffix} +- jb .L_last_num_blocks_is_7_1_${rndsuffix} ++ je .L_last_num_blocks_is_8_${label_suffix} ++ jb .L_last_num_blocks_is_7_1_${label_suffix} + + + cmp \$12,@{[DWORD($IA0)]} +- je .L_last_num_blocks_is_12_${rndsuffix} +- jb .L_last_num_blocks_is_11_9_${rndsuffix} ++ je .L_last_num_blocks_is_12_${label_suffix} ++ jb .L_last_num_blocks_is_11_9_${label_suffix} + + # ;; 16, 15, 14 or 13 + cmp \$15,@{[DWORD($IA0)]} +- je .L_last_num_blocks_is_15_${rndsuffix} +- ja .L_last_num_blocks_is_16_${rndsuffix} ++ je .L_last_num_blocks_is_15_${label_suffix} ++ ja .L_last_num_blocks_is_16_${label_suffix} + cmp \$14,@{[DWORD($IA0)]} +- je .L_last_num_blocks_is_14_${rndsuffix} +- jmp .L_last_num_blocks_is_13_${rndsuffix} ++ je .L_last_num_blocks_is_14_${label_suffix} ++ jmp .L_last_num_blocks_is_13_${label_suffix} + +-.L_last_num_blocks_is_11_9_${rndsuffix}: ++.L_last_num_blocks_is_11_9_${label_suffix}: + # ;; 11, 10 or 9 + cmp \$10,@{[DWORD($IA0)]} +- je .L_last_num_blocks_is_10_${rndsuffix} +- ja .L_last_num_blocks_is_11_${rndsuffix} +- jmp .L_last_num_blocks_is_9_${rndsuffix} ++ je .L_last_num_blocks_is_10_${label_suffix} ++ ja .L_last_num_blocks_is_11_${label_suffix} ++ jmp .L_last_num_blocks_is_9_${label_suffix} + +-.L_last_num_blocks_is_7_1_${rndsuffix}: ++.L_last_num_blocks_is_7_1_${label_suffix}: + cmp \$4,@{[DWORD($IA0)]} +- je .L_last_num_blocks_is_4_${rndsuffix} +- jb .L_last_num_blocks_is_3_1_${rndsuffix} ++ je .L_last_num_blocks_is_4_${label_suffix} ++ jb .L_last_num_blocks_is_3_1_${label_suffix} + # ;; 7, 6 or 5 + cmp \$6,@{[DWORD($IA0)]} +- ja .L_last_num_blocks_is_7_${rndsuffix} +- je .L_last_num_blocks_is_6_${rndsuffix} +- jmp .L_last_num_blocks_is_5_${rndsuffix} ++ ja .L_last_num_blocks_is_7_${label_suffix} ++ je .L_last_num_blocks_is_6_${label_suffix} ++ jmp .L_last_num_blocks_is_5_${label_suffix} + +-.L_last_num_blocks_is_3_1_${rndsuffix}: ++.L_last_num_blocks_is_3_1_${label_suffix}: + # ;; 3, 2 or 1 + cmp \$2,@{[DWORD($IA0)]} +- ja .L_last_num_blocks_is_3_${rndsuffix} +- je .L_last_num_blocks_is_2_${rndsuffix} ++ ja .L_last_num_blocks_is_3_${label_suffix} ++ je .L_last_num_blocks_is_2_${label_suffix} + ___ + + # ;; fall through for `jmp .L_last_num_blocks_is_1` +@@ -2859,7 +2853,7 @@ ___ + # ;; Use rep to generate different block size variants + # ;; - one block size has to be the first one + for my $num_blocks (1 .. 16) { +- $code .= ".L_last_num_blocks_is_${num_blocks}_${rndsuffix}:\n"; ++ $code .= ".L_last_num_blocks_is_${num_blocks}_${label_suffix}:\n"; + &GHASH_16_ENCRYPT_N_GHASH_N( + $AES_KEYS, $GCM128_CTX, $CIPH_PLAIN_OUT, $PLAIN_CIPH_IN, $DATA_OFFSET, + $LENGTH, $CTR_BE, $CTR_CHECK, $HASHKEY_OFFSET, $GHASHIN_BLK_OFFSET, +@@ -2872,10 +2866,10 @@ ___ + $ENC_DEC, $HASH_IN_OUT, $IA0, $IA1, $MASKREG, + $num_blocks, $PBLOCK_LEN); + +- $code .= "jmp .L_last_blocks_done_${rndsuffix}\n"; ++ $code .= "jmp .L_last_blocks_done_${label_suffix}\n"; + } + +- $code .= ".L_last_num_blocks_is_0_${rndsuffix}:\n"; ++ $code .= ".L_last_num_blocks_is_0_${label_suffix}:\n"; + + # ;; if there is 0 blocks to cipher then there are only 16 blocks for ghash and reduction + # ;; - convert mid into end_reduce +@@ -2891,7 +2885,7 @@ ___ + $GHASHIN_BLK_OFFSET, 0, "%rsp", $HASHKEY_OFFSET, 0, $HASH_IN_OUT, $ZT00, $ZT01, + $ZT02, $ZT03, $ZT04, $ZT05, $ZT06, $ZT07, $ZT08, $ZT09); + +- $code .= ".L_last_blocks_done_${rndsuffix}:\n"; ++ $code .= ".L_last_blocks_done_${label_suffix}:\n"; + } + + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +@@ -2985,20 +2979,20 @@ sub GHASH_16_ENCRYPT_16_PARALLEL { + my $GHDAT1 = $ZT21; + my $GHDAT2 = $ZT22; + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + # ;; prepare counter blocks + + $code .= <<___; + cmpb \$`(256 - 16)`,@{[BYTE($CTR_CHECK)]} +- jae .L_16_blocks_overflow_${rndsuffix} ++ jae .L_16_blocks_overflow_${label_suffix} + vpaddd $ADDBE_1234,$CTR_BE,$B00_03 + vpaddd $ADDBE_4x4,$B00_03,$B04_07 + vpaddd $ADDBE_4x4,$B04_07,$B08_11 + vpaddd $ADDBE_4x4,$B08_11,$B12_15 +- jmp .L_16_blocks_ok_${rndsuffix} +-.L_16_blocks_overflow_${rndsuffix}: ++ jmp .L_16_blocks_ok_${label_suffix} ++.L_16_blocks_overflow_${label_suffix}: + vpshufb $SHFMSK,$CTR_BE,$CTR_BE + vmovdqa64 ddq_add_4444(%rip),$B12_15 + vpaddd ddq_add_1234(%rip),$CTR_BE,$B00_03 +@@ -3009,7 +3003,7 @@ sub GHASH_16_ENCRYPT_16_PARALLEL { + vpshufb $SHFMSK,$B04_07,$B04_07 + vpshufb $SHFMSK,$B08_11,$B08_11 + vpshufb $SHFMSK,$B12_15,$B12_15 +-.L_16_blocks_ok_${rndsuffix}: ++.L_16_blocks_ok_${label_suffix}: + ___ + + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +@@ -3338,25 +3332,25 @@ sub ENCRYPT_SINGLE_BLOCK { + my $XMM0 = $_[1]; # ; [in/out] + my $GPR1 = $_[2]; # ; [clobbered] + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + $code .= <<___; + # ; load number of rounds from AES_KEY structure (offset in bytes is + # ; size of the |rd_key| buffer) + mov `4*15*4`($AES_KEY),@{[DWORD($GPR1)]} + cmp \$9,@{[DWORD($GPR1)]} +- je .Laes_128_${rndsuffix} ++ je .Laes_128_${label_suffix} + cmp \$11,@{[DWORD($GPR1)]} +- je .Laes_192_${rndsuffix} ++ je .Laes_192_${label_suffix} + cmp \$13,@{[DWORD($GPR1)]} +- je .Laes_256_${rndsuffix} +- jmp .Lexit_aes_${rndsuffix} ++ je .Laes_256_${label_suffix} ++ jmp .Lexit_aes_${label_suffix} + ___ + for my $keylen (sort keys %aes_rounds) { + my $nr = $aes_rounds{$keylen}; + $code .= <<___; + .align 32 +-.Laes_${keylen}_${rndsuffix}: ++.Laes_${keylen}_${label_suffix}: + ___ + $code .= "vpxorq `16*0`($AES_KEY),$XMM0, $XMM0\n\n"; + for (my $i = 1; $i <= $nr; $i++) { +@@ -3364,10 +3358,10 @@ ___ + } + $code .= <<___; + vaesenclast `16*($nr+1)`($AES_KEY),$XMM0,$XMM0 +- jmp .Lexit_aes_${rndsuffix} ++ jmp .Lexit_aes_${label_suffix} + ___ + } +- $code .= ".Lexit_aes_${rndsuffix}:\n\n"; ++ $code .= ".Lexit_aes_${label_suffix}:\n\n"; + } + + sub CALC_J0 { +@@ -3562,52 +3556,52 @@ sub GCM_ENC_DEC_SMALL { + my $SHUFMASK = $_[29]; # [in] ZMM with BE/LE shuffle mask + my $PBLOCK_LEN = $_[30]; # [in] partial block length + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + $code .= <<___; + cmp \$8,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_8_${rndsuffix} +- jl .L_small_initial_num_blocks_is_7_1_${rndsuffix} ++ je .L_small_initial_num_blocks_is_8_${label_suffix} ++ jl .L_small_initial_num_blocks_is_7_1_${label_suffix} + + + cmp \$12,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_12_${rndsuffix} +- jl .L_small_initial_num_blocks_is_11_9_${rndsuffix} ++ je .L_small_initial_num_blocks_is_12_${label_suffix} ++ jl .L_small_initial_num_blocks_is_11_9_${label_suffix} + + # ;; 16, 15, 14 or 13 + cmp \$16,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_16_${rndsuffix} ++ je .L_small_initial_num_blocks_is_16_${label_suffix} + cmp \$15,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_15_${rndsuffix} ++ je .L_small_initial_num_blocks_is_15_${label_suffix} + cmp \$14,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_14_${rndsuffix} +- jmp .L_small_initial_num_blocks_is_13_${rndsuffix} ++ je .L_small_initial_num_blocks_is_14_${label_suffix} ++ jmp .L_small_initial_num_blocks_is_13_${label_suffix} + +-.L_small_initial_num_blocks_is_11_9_${rndsuffix}: ++.L_small_initial_num_blocks_is_11_9_${label_suffix}: + # ;; 11, 10 or 9 + cmp \$11,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_11_${rndsuffix} ++ je .L_small_initial_num_blocks_is_11_${label_suffix} + cmp \$10,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_10_${rndsuffix} +- jmp .L_small_initial_num_blocks_is_9_${rndsuffix} ++ je .L_small_initial_num_blocks_is_10_${label_suffix} ++ jmp .L_small_initial_num_blocks_is_9_${label_suffix} + +-.L_small_initial_num_blocks_is_7_1_${rndsuffix}: ++.L_small_initial_num_blocks_is_7_1_${label_suffix}: + cmp \$4,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_4_${rndsuffix} +- jl .L_small_initial_num_blocks_is_3_1_${rndsuffix} ++ je .L_small_initial_num_blocks_is_4_${label_suffix} ++ jl .L_small_initial_num_blocks_is_3_1_${label_suffix} + # ;; 7, 6 or 5 + cmp \$7,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_7_${rndsuffix} ++ je .L_small_initial_num_blocks_is_7_${label_suffix} + cmp \$6,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_6_${rndsuffix} +- jmp .L_small_initial_num_blocks_is_5_${rndsuffix} ++ je .L_small_initial_num_blocks_is_6_${label_suffix} ++ jmp .L_small_initial_num_blocks_is_5_${label_suffix} + +-.L_small_initial_num_blocks_is_3_1_${rndsuffix}: ++.L_small_initial_num_blocks_is_3_1_${label_suffix}: + # ;; 3, 2 or 1 + cmp \$3,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_3_${rndsuffix} ++ je .L_small_initial_num_blocks_is_3_${label_suffix} + cmp \$2,$NUM_BLOCKS +- je .L_small_initial_num_blocks_is_2_${rndsuffix} ++ je .L_small_initial_num_blocks_is_2_${label_suffix} + + # ;; for $NUM_BLOCKS == 1, just fall through and no 'jmp' needed + +@@ -3616,7 +3610,7 @@ sub GCM_ENC_DEC_SMALL { + ___ + + for (my $num_blocks = 1; $num_blocks <= 16; $num_blocks++) { +- $code .= ".L_small_initial_num_blocks_is_${num_blocks}_${rndsuffix}:\n"; ++ $code .= ".L_small_initial_num_blocks_is_${num_blocks}_${label_suffix}:\n"; + &INITIAL_BLOCKS_PARTIAL( + $AES_KEYS, $GCM128_CTX, $CIPH_PLAIN_OUT, $PLAIN_CIPH_IN, $LENGTH, $DATA_OFFSET, + $num_blocks, $CTR, $HASH_IN_OUT, $ENC_DEC, $ZTMP0, $ZTMP1, +@@ -3625,11 +3619,11 @@ ___ + $ZTMP14, $IA0, $IA1, $MASKREG, $SHUFMASK, $PBLOCK_LEN); + + if ($num_blocks != 16) { +- $code .= "jmp .L_small_initial_blocks_encrypted_${rndsuffix}\n"; ++ $code .= "jmp .L_small_initial_blocks_encrypted_${label_suffix}\n"; + } + } + +- $code .= ".L_small_initial_blocks_encrypted_${rndsuffix}:\n"; ++ $code .= ".L_small_initial_blocks_encrypted_${label_suffix}:\n"; + } + + # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +@@ -3710,7 +3704,7 @@ sub GCM_ENC_DEC { + + my $MASKREG = "%k1"; + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + # ;; reduction every 48 blocks, depth 32 blocks + # ;; @note 48 blocks is the maximum capacity of the stack frame +@@ -3751,7 +3745,7 @@ sub GCM_ENC_DEC { + } else { + $code .= "or $PLAIN_CIPH_LEN,$PLAIN_CIPH_LEN\n"; + } +- $code .= "je .L_enc_dec_done_${rndsuffix}\n"; ++ $code .= "je .L_enc_dec_done_${label_suffix}\n"; + + # Length value from context $CTX_OFFSET_InLen`($GCM128_CTX) is updated in + # 'providers/implementations/ciphers/cipher_aes_gcm_hw_vaes_avx512.inc' +@@ -3778,12 +3772,12 @@ sub GCM_ENC_DEC { + # ;; There may be no more data if it was consumed in the partial block. + $code .= <<___; + sub $DATA_OFFSET,$LENGTH +- je .L_enc_dec_done_${rndsuffix} ++ je .L_enc_dec_done_${label_suffix} + ___ + + $code .= <<___; + cmp \$`(16 * 16)`,$LENGTH +- jbe .L_message_below_equal_16_blocks_${rndsuffix} ++ jbe .L_message_below_equal_16_blocks_${label_suffix} + + vmovdqa64 SHUF_MASK(%rip),$SHUF_MASK + vmovdqa64 ddq_addbe_4444(%rip),$ADDBE_4x4 +@@ -3815,7 +3809,7 @@ ___ + + $code .= <<___; + cmp \$`(32 * 16)`,$LENGTH +- jb .L_message_below_32_blocks_${rndsuffix} ++ jb .L_message_below_32_blocks_${label_suffix} + ___ + + # ;; ==== AES-CTR - next 16 blocks +@@ -3836,13 +3830,13 @@ ___ + sub \$`(32 * 16)`,$LENGTH + + cmp \$`($big_loop_nblocks * 16)`,$LENGTH +- jb .L_no_more_big_nblocks_${rndsuffix} ++ jb .L_no_more_big_nblocks_${label_suffix} + ___ + + # ;; ==== + # ;; ==== AES-CTR + GHASH - 48 blocks loop + # ;; ==== +- $code .= ".L_encrypt_big_nblocks_${rndsuffix}:\n"; ++ $code .= ".L_encrypt_big_nblocks_${label_suffix}:\n"; + + # ;; ==== AES-CTR + GHASH - 16 blocks, start + $aesout_offset = ($STACK_LOCAL_OFFSET + (32 * 16)); +@@ -3893,15 +3887,15 @@ ___ + add \$`($big_loop_nblocks * 16)`,$DATA_OFFSET + sub \$`($big_loop_nblocks * 16)`,$LENGTH + cmp \$`($big_loop_nblocks * 16)`,$LENGTH +- jae .L_encrypt_big_nblocks_${rndsuffix} ++ jae .L_encrypt_big_nblocks_${label_suffix} + +-.L_no_more_big_nblocks_${rndsuffix}: ++.L_no_more_big_nblocks_${label_suffix}: + + cmp \$`(32 * 16)`,$LENGTH +- jae .L_encrypt_32_blocks_${rndsuffix} ++ jae .L_encrypt_32_blocks_${label_suffix} + + cmp \$`(16 * 16)`,$LENGTH +- jae .L_encrypt_16_blocks_${rndsuffix} ++ jae .L_encrypt_16_blocks_${label_suffix} + ___ + + # ;; ===================================================== +@@ -3909,7 +3903,7 @@ ___ + # ;; ==== GHASH 1 x 16 blocks + # ;; ==== GHASH 1 x 16 blocks (reduction) & encrypt N blocks + # ;; ==== then GHASH N blocks +- $code .= ".L_encrypt_0_blocks_ghash_32_${rndsuffix}:\n"; ++ $code .= ".L_encrypt_0_blocks_ghash_32_${label_suffix}:\n"; + + # ;; calculate offset to the right hash key + $code .= <<___; +@@ -3937,7 +3931,7 @@ ___ + $IA0, $IA5, $MASKREG, $PBLOCK_LEN); + + $code .= "vpshufb @{[XWORD($SHUF_MASK)]},$CTR_BLOCKx,$CTR_BLOCKx\n"; +- $code .= "jmp .L_ghash_done_${rndsuffix}\n"; ++ $code .= "jmp .L_ghash_done_${label_suffix}\n"; + + # ;; ===================================================== + # ;; ===================================================== +@@ -3946,7 +3940,7 @@ ___ + # ;; ==== GHASH 1 x 16 blocks (reduction) + # ;; ==== GHASH 1 x 16 blocks (reduction) & encrypt N blocks + # ;; ==== then GHASH N blocks +- $code .= ".L_encrypt_32_blocks_${rndsuffix}:\n"; ++ $code .= ".L_encrypt_32_blocks_${label_suffix}:\n"; + + # ;; ==== AES-CTR + GHASH - 16 blocks, start + $aesout_offset = ($STACK_LOCAL_OFFSET + (32 * 16)); +@@ -4007,7 +4001,7 @@ ___ + $IA0, $IA5, $MASKREG, $PBLOCK_LEN); + + $code .= "vpshufb @{[XWORD($SHUF_MASK)]},$CTR_BLOCKx,$CTR_BLOCKx\n"; +- $code .= "jmp .L_ghash_done_${rndsuffix}\n"; ++ $code .= "jmp .L_ghash_done_${label_suffix}\n"; + + # ;; ===================================================== + # ;; ===================================================== +@@ -4015,7 +4009,7 @@ ___ + # ;; ==== GHASH 1 x 16 blocks + # ;; ==== GHASH 1 x 16 blocks (reduction) & encrypt N blocks + # ;; ==== then GHASH N blocks +- $code .= ".L_encrypt_16_blocks_${rndsuffix}:\n"; ++ $code .= ".L_encrypt_16_blocks_${label_suffix}:\n"; + + # ;; ==== AES-CTR + GHASH - 16 blocks, start + $aesout_offset = ($STACK_LOCAL_OFFSET + (32 * 16)); +@@ -4059,9 +4053,9 @@ ___ + + $code .= "vpshufb @{[XWORD($SHUF_MASK)]},$CTR_BLOCKx,$CTR_BLOCKx\n"; + $code .= <<___; +- jmp .L_ghash_done_${rndsuffix} ++ jmp .L_ghash_done_${label_suffix} + +-.L_message_below_32_blocks_${rndsuffix}: ++.L_message_below_32_blocks_${label_suffix}: + # ;; 32 > number of blocks > 16 + + sub \$`(16 * 16)`,$LENGTH +@@ -4094,9 +4088,9 @@ ___ + + $code .= "vpshufb @{[XWORD($SHUF_MASK)]},$CTR_BLOCKx,$CTR_BLOCKx\n"; + $code .= <<___; +- jmp .L_ghash_done_${rndsuffix} ++ jmp .L_ghash_done_${label_suffix} + +-.L_message_below_equal_16_blocks_${rndsuffix}: ++.L_message_below_equal_16_blocks_${label_suffix}: + # ;; Determine how many blocks to process + # ;; - process one additional block if there is a partial block + mov @{[DWORD($LENGTH)]},@{[DWORD($IA1)]} +@@ -4113,13 +4107,13 @@ ___ + + # ;; fall through to exit + +- $code .= ".L_ghash_done_${rndsuffix}:\n"; ++ $code .= ".L_ghash_done_${label_suffix}:\n"; + + # ;; save the last counter block + $code .= "vmovdqu64 $CTR_BLOCKx,`$CTX_OFFSET_CurCount`($GCM128_CTX)\n"; + $code .= <<___; + vmovdqu64 $AAD_HASHx,`$CTX_OFFSET_AadHash`($GCM128_CTX) +-.L_enc_dec_done_${rndsuffix}: ++.L_enc_dec_done_${label_suffix}: + ___ + } + +@@ -4155,7 +4149,7 @@ sub INITIAL_BLOCKS_16 { + my $B08_11 = $T7; + my $B12_15 = $T8; + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + my $stack_offset = $BLK_OFFSET; + $code .= <<___; +@@ -4163,13 +4157,13 @@ sub INITIAL_BLOCKS_16 { + # ;; prepare counter blocks + + cmpb \$`(256 - 16)`,@{[BYTE($CTR_CHECK)]} +- jae .L_next_16_overflow_${rndsuffix} ++ jae .L_next_16_overflow_${label_suffix} + vpaddd $ADDBE_1234,$CTR,$B00_03 + vpaddd $ADDBE_4x4,$B00_03,$B04_07 + vpaddd $ADDBE_4x4,$B04_07,$B08_11 + vpaddd $ADDBE_4x4,$B08_11,$B12_15 +- jmp .L_next_16_ok_${rndsuffix} +-.L_next_16_overflow_${rndsuffix}: ++ jmp .L_next_16_ok_${label_suffix} ++.L_next_16_overflow_${label_suffix}: + vpshufb $SHUF_MASK,$CTR,$CTR + vmovdqa64 ddq_add_4444(%rip),$B12_15 + vpaddd ddq_add_1234(%rip),$CTR,$B00_03 +@@ -4180,7 +4174,7 @@ sub INITIAL_BLOCKS_16 { + vpshufb $SHUF_MASK,$B04_07,$B04_07 + vpshufb $SHUF_MASK,$B08_11,$B08_11 + vpshufb $SHUF_MASK,$B12_15,$B12_15 +-.L_next_16_ok_${rndsuffix}: ++.L_next_16_ok_${label_suffix}: + vshufi64x2 \$0b11111111,$B12_15,$B12_15,$CTR + addb \$16,@{[BYTE($CTR_CHECK)]} + # ;; === load 16 blocks of data +@@ -4264,7 +4258,7 @@ sub GCM_COMPLETE { + my $GCM128_CTX = $_[0]; + my $PBLOCK_LEN = $_[1]; + +- my $rndsuffix = &random_string(); ++ my $label_suffix = $label_count++; + + $code .= <<___; + vmovdqu @{[HashKeyByIdx(1,$GCM128_CTX)]},%xmm2 +@@ -4276,14 +4270,14 @@ ___ + + # ;; Process the final partial block. + cmp \$0,$PBLOCK_LEN +- je .L_partial_done_${rndsuffix} ++ je .L_partial_done_${label_suffix} + ___ + + # ;GHASH computation for the last <16 Byte block + &GHASH_MUL("%xmm4", "%xmm2", "%xmm0", "%xmm16", "%xmm17"); + + $code .= <<___; +-.L_partial_done_${rndsuffix}: ++.L_partial_done_${label_suffix}: + vmovq `$CTX_OFFSET_InLen`($GCM128_CTX), %xmm5 + vpinsrq \$1, `$CTX_OFFSET_AadLen`($GCM128_CTX), %xmm5, %xmm5 # ; xmm5 = len(A)||len(C) + vpsllq \$3, %xmm5, %xmm5 # ; convert bytes into bits +@@ -4297,7 +4291,7 @@ ___ + vpshufb SHUF_MASK(%rip),%xmm4,%xmm4 # ; perform a 16Byte swap + vpxor %xmm4,%xmm3,%xmm3 + +-.L_return_T_${rndsuffix}: ++.L_return_T_${label_suffix}: + vmovdqu %xmm3,`$CTX_OFFSET_AadHash`($GCM128_CTX) + ___ + }