SHA256
1
0
forked from pool/openssl-1_1
Dominique Leuenberger 2022-12-18 16:22:17 +00:00 committed by Git OBS Bridge
commit 1db74d6acf
7 changed files with 3588 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,208 @@
From 9ab6b64ac856157a31a54c0d12207c2338bfa8e2 Mon Sep 17 00:00:00 2001
From: Tomas Mraz <tomas@openssl.org>
Date: Fri, 9 Sep 2022 14:46:24 +0200
Subject: [PATCH] Fix AES-GCM on Power 8 CPUs
Properly fallback to the default implementation on CPUs
missing necessary instructions.
Fixes #19163
Reviewed-by: Dmitry Belyavskiy <beldmit@gmail.com>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/19182)
---
crypto/evp/e_aes.c | 146 ++++++++++++++++++++++++++---------------------------
1 file changed, 74 insertions(+), 72 deletions(-)
--- a/crypto/evp/e_aes.c
+++ b/crypto/evp/e_aes.c
@@ -181,30 +181,16 @@ static void ctr64_inc(unsigned char *cou
# define PPC_AES_GCM_CAPABLE (OPENSSL_ppccap_P & PPC_MADD300)
# define AES_GCM_ENC_BYTES 128
# define AES_GCM_DEC_BYTES 128
-# if PPC_AES_GCM_CAPABLE
size_t ppc_aes_gcm_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const void *key, unsigned char ivec[16],
u64 *Xi);
size_t ppc_aes_gcm_decrypt(const unsigned char *in, unsigned char *out,
size_t len, const void *key, unsigned char ivec[16],
u64 *Xi);
-size_t ppc_aes_gcm_encrypt_wrap(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], u64 *Xi);
-size_t ppc_aes_gcm_decrypt_wrap(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], u64 *Xi);
-# define AES_gcm_encrypt ppc_aes_gcm_encrypt_wrap
-# define AES_gcm_decrypt ppc_aes_gcm_decrypt_wrap
-# define AES_GCM_ASM(gctx) ((gctx)->ctr==aes_p8_ctr32_encrypt_blocks && \
- (gctx)->gcm.ghash==gcm_ghash_p8)
+# define AES_GCM_ASM_PPC(gctx) ((gctx)->ctr==aes_p8_ctr32_encrypt_blocks && \
+ (gctx)->gcm.ghash==gcm_ghash_p8)
void gcm_ghash_p8(u64 Xi[2],const u128 Htable[16],const u8 *inp, size_t len);
-extern size_t ppc_aes_gcm_encrypt(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], u64 *Xi);
-extern size_t ppc_aes_gcm_decrypt(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], u64 *Xi);
-
static inline u32 UTO32(unsigned char *buf)
{
return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) | ((u32) buf[2] << 8) | ((u32) buf[3]);
@@ -223,62 +209,6 @@ static inline u32 add32TOU(unsigned char
return r;
}
-static size_t aes_p10_gcm_crypt(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], u64 *Xi, int encrypt)
-{
- int s = 0;
- int ndone = 0;
- int ctr_reset = 0;
- u64 blocks_unused;
- u64 nb = len / 16;
- u64 next_ctr = 0;
- unsigned char ctr_saved[12];
-
- memcpy(ctr_saved, ivec, 12);
-
- while (nb) {
- blocks_unused = (u64) 0xffffffffU + 1 - (u64) UTO32 (ivec + 12);
- if (nb > blocks_unused) {
- len = blocks_unused * 16;
- nb -= blocks_unused;
- next_ctr = blocks_unused;
- ctr_reset = 1;
- } else {
- len = nb * 16;
- next_ctr = nb;
- nb = 0;
- }
-
- s = encrypt ? ppc_aes_gcm_encrypt(in, out, len, key, ivec, Xi)
- : ppc_aes_gcm_decrypt(in, out, len, key, ivec, Xi);
-
- /* add counter to ivec */
- add32TOU(ivec + 12, (u32) next_ctr);
- if (ctr_reset) {
- ctr_reset = 0;
- in += len;
- out += len;
- }
- memcpy(ivec, ctr_saved, 12);
- ndone += s;
- }
-
- return ndone;
-}
-
-size_t ppc_aes_gcm_encrypt_wrap(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], u64 *Xi)
-{
- return aes_p10_gcm_crypt(in, out, len, key, ivec, Xi, 1);
-}
-
-size_t ppc_aes_gcm_decrypt_wrap(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], u64 *Xi)
-{
- return aes_p10_gcm_crypt(in, out, len, key, ivec, Xi, 0);
-}
-
-# endif
#endif
#if defined(OPENSSL_CPUID_OBJ) && ( \
@@ -3294,6 +3224,51 @@ static int aes_gcm_tls_cipher(EVP_CIPHER
return rv;
}
+#if defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
+static size_t ppc_aes_gcm_crypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], u64 *Xi, int encrypt)
+{
+ int s = 0;
+ int ndone = 0;
+ int ctr_reset = 0;
+ u64 blocks_unused;
+ u64 nb = len / 16;
+ u64 next_ctr = 0;
+ unsigned char ctr_saved[12];
+
+ memcpy(ctr_saved, ivec, 12);
+
+ while (nb) {
+ blocks_unused = (u64) 0xffffffffU + 1 - (u64) UTO32 (ivec + 12);
+ if (nb > blocks_unused) {
+ len = blocks_unused * 16;
+ nb -= blocks_unused;
+ next_ctr = blocks_unused;
+ ctr_reset = 1;
+ } else {
+ len = nb * 16;
+ next_ctr = nb;
+ nb = 0;
+ }
+
+ s = encrypt ? ppc_aes_gcm_encrypt(in, out, len, key, ivec, Xi)
+ : ppc_aes_gcm_decrypt(in, out, len, key, ivec, Xi);
+
+ /* add counter to ivec */
+ add32TOU(ivec + 12, (u32) next_ctr);
+ if (ctr_reset) {
+ ctr_reset = 0;
+ in += len;
+ out += len;
+ }
+ memcpy(ivec, ctr_saved, 12);
+ ndone += s;
+ }
+
+ return ndone;
+}
+#endif
+
static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
const unsigned char *in, size_t len)
{
@@ -3325,6 +3300,20 @@ static int aes_gcm_cipher(EVP_CIPHER_CTX
out + res, len - res,
gctx->gcm.key, gctx->gcm.Yi.c,
gctx->gcm.Xi.u);
+
+ gctx->gcm.len.u[1] += bulk;
+ bulk += res;
+ }
+#elif defined(AES_GCM_ASM_PPC) && defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
+ if (PPC_AES_GCM_CAPABLE && len >= AES_GCM_ENC_BYTES && AES_GCM_ASM_PPC(gctx)) {
+ size_t res = (16 - gctx->gcm.mres) % 16;
+
+ if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
+ return -1;
+
+ bulk = ppc_aes_gcm_crypt(in + res, out + res, len - res,
+ gctx->gcm.key,
+ gctx->gcm.Yi.c, gctx->gcm.Xi.u, 1);
gctx->gcm.len.u[1] += bulk;
bulk += res;
}
@@ -3372,6 +3361,19 @@ static int aes_gcm_cipher(EVP_CIPHER_CTX
gctx->gcm.len.u[1] += bulk;
bulk += res;
}
+#elif defined(AES_GCM_ASM_PPC) && defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
+ if (PPC_AES_GCM_CAPABLE && len >= AES_GCM_DEC_BYTES && AES_GCM_ASM_PPC(gctx)) {
+ size_t res = (16 - gctx->gcm.mres) % 16;
+
+ if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
+ return -1;
+
+ bulk = ppc_aes_gcm_crypt(in + res, out + res, len - res,
+ gctx->gcm.key,
+ gctx->gcm.Yi.c, gctx->gcm.Xi.u, 0);
+ gctx->gcm.len.u[1] += bulk;
+ bulk += res;
+ }
#endif
if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
in + bulk,

View File

@ -0,0 +1,103 @@
From 7e1f3ffcc5bc15fb9a12b9e3bb202f544c6ed5aa Mon Sep 17 00:00:00 2001
From: Danny Tsen <dtsen@us.ibm.com>
Date: Wed, 23 Feb 2022 13:18:35 -0600
Subject: [PATCH] Fixed conditional statement testing 64 and 256 bytes
Reviewed-by: Paul Dale <pauli@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/17760)
---
crypto/chacha/asm/chachap10-ppc.pl | 68 -------------------------------------
1 file changed, 1 insertion(+), 67 deletions(-)
--- a/crypto/chacha/asm/chachap10-ppc.pl
+++ b/crypto/chacha/asm/chachap10-ppc.pl
@@ -101,29 +101,6 @@ my ($x00,$x10,$x20,$x30) = (0, map("r$_"
my $FRAME=$LOCALS+64+7*16; # 7*16 is for v26-v31 offload
-sub VSX_lane_ROUND_1x {
-my $a=@_[0];
-my $b=@_[1];
-my $c=@_[2];
-my $d=@_[3];
-my $odd=@_[4];
- vadduwm ($a,$a,$b);
- vxor ($d,$d,$a);
- vrlw ($d,$d,$sixteen);
- vadduwm ($c,$c,$d);
- vxor ($b,$b,$c);
- vrlw ($b,$b,$twelve);
- vadduwm ($a,$a,$b);
- vxor ($d,$d,$a);
- vrlw ($d,$d,$eight);
- vadduwm ($c,$c,$d);
- vxor ($b,$b,$c);
- vrlw ($b,$b,$seven);
- xxsldwi ($c,$c,$c,2);
- xxsldwi ($b,$b,$b,$odd?3:1);
- xxsldwi ($d,$d,$d,$odd?1:3);
-}
-
sub VSX_lane_ROUND_4x {
my ($a0,$b0,$c0,$d0)=@_;
@@ -192,7 +169,7 @@ $code.=<<___;
.globl .ChaCha20_ctr32_vsx_p10
.align 5
.ChaCha20_ctr32_vsx_p10:
- ${UCMP}i $len,256
+ ${UCMP}i $len,255
bgt ChaCha20_ctr32_vsx_8x
$STU $sp,-$FRAME($sp)
mflr r0
@@ -268,49 +245,6 @@ Loop_outer_vsx:
vspltisw $eight,8
vspltisw $seven,7
- ${UCMP}i $len,64
- bgt Loop_vsx_4x
-
- vmr $xa0,@K[0]
- vmr $xb0,@K[1]
- vmr $xc0,@K[2]
- vmr $xd0,@K[3]
-
-Loop_vsx_1x:
-___
- VSX_lane_ROUND_1x($xa0, $xb0, $xc0,$xd0,0);
- VSX_lane_ROUND_1x($xa0, $xb0, $xc0,$xd0,1);
-
-$code.=<<___;
-
- bdnz Loop_vsx_1x
-
- vadduwm $xa0, $xa0, @K[0]
- vadduwm $xb0, $xb0, @K[1]
- vadduwm $xc0, $xc0, @K[2]
- vadduwm $xd0, $xd0, @K[3]
- ${UCMP}i $len,0x40
- blt Ltail_vsx
-
- lvx_4w $xt0,$x00, $inp
- lvx_4w $xt1,$x10, $inp
- lvx_4w $xt2,$x20, $inp
- lvx_4w $xt3,$x30, $inp
-
- vxor $xa0,$xa0,$xt0
- vxor $xb0,$xb0,$xt1
- vxor $xc0,$xc0,$xt2
- vxor $xd0,$xd0,$xt3
-
- stvx_4w $xa0,$x00,$out
- stvx_4w $xb0,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xc0,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xd0,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx
-
Loop_vsx_4x:
___
foreach (&VSX_lane_ROUND_4x(0, 4, 8,12)) { eval; }

View File

@ -0,0 +1,136 @@
From 345c99b6654b8313c792d54f829943068911ddbd Mon Sep 17 00:00:00 2001
From: Danny Tsen <dtsen@us.ibm.com>
Date: Thu, 27 Jan 2022 18:49:59 -0600
Subject: [PATCH] Fixed counter overflow
Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/17607)
---
crypto/evp/e_aes.c | 101 +++++++++++++++++++++++++++++++++++++---
crypto/modes/asm/aes-gcm-ppc.pl | 1
2 files changed, 94 insertions(+), 8 deletions(-)
--- a/crypto/evp/e_aes.c
+++ b/crypto/evp/e_aes.c
@@ -181,16 +181,103 @@ static void ctr64_inc(unsigned char *cou
# define PPC_AES_GCM_CAPABLE (OPENSSL_ppccap_P & PPC_MADD300)
# define AES_GCM_ENC_BYTES 128
# define AES_GCM_DEC_BYTES 128
-size_t ppc_aes_gcm_encrypt(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], u64 *Xi);
-size_t ppc_aes_gcm_decrypt(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], u64 *Xi);
-void gcm_ghash_p8(u64 Xi[2],const u128 Htable[16],const u8 *inp, size_t len);
# if PPC_AES_GCM_CAPABLE
-# define AES_gcm_encrypt ppc_aes_gcm_encrypt
-# define AES_gcm_decrypt ppc_aes_gcm_decrypt
+size_t ppc_aes_gcm_encrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key, unsigned char ivec[16],
+ u64 *Xi);
+size_t ppc_aes_gcm_decrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key, unsigned char ivec[16],
+ u64 *Xi);
+size_t ppc_aes_gcm_encrypt_wrap(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key,
+ unsigned char ivec[16], u64 *Xi);
+size_t ppc_aes_gcm_decrypt_wrap(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key,
+ unsigned char ivec[16], u64 *Xi);
+# define AES_gcm_encrypt ppc_aes_gcm_encrypt_wrap
+# define AES_gcm_decrypt ppc_aes_gcm_decrypt_wrap
# define AES_GCM_ASM(gctx) ((gctx)->ctr==aes_p8_ctr32_encrypt_blocks && \
(gctx)->gcm.ghash==gcm_ghash_p8)
+void gcm_ghash_p8(u64 Xi[2],const u128 Htable[16],const u8 *inp, size_t len);
+
+extern size_t ppc_aes_gcm_encrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], u64 *Xi);
+extern size_t ppc_aes_gcm_decrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], u64 *Xi);
+
+static inline u32 UTO32(unsigned char *buf)
+{
+ return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) | ((u32) buf[2] << 8) | ((u32) buf[3]);
+}
+
+static inline u32 add32TOU(unsigned char buf[4], u32 n)
+{
+ u32 r;
+
+ r = UTO32(buf);
+ r += n;
+ buf[0] = (unsigned char) (r >> 24) & 0xFF;
+ buf[1] = (unsigned char) (r >> 16) & 0xFF;
+ buf[2] = (unsigned char) (r >> 8) & 0xFF;
+ buf[3] = (unsigned char) r & 0xFF;
+ return r;
+}
+
+static size_t aes_p10_gcm_crypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], u64 *Xi, int encrypt)
+{
+ int s = 0;
+ int ndone = 0;
+ int ctr_reset = 0;
+ u64 blocks_unused;
+ u64 nb = len / 16;
+ u64 next_ctr = 0;
+ unsigned char ctr_saved[12];
+
+ memcpy(ctr_saved, ivec, 12);
+
+ while (nb) {
+ blocks_unused = (u64) 0xffffffffU + 1 - (u64) UTO32 (ivec + 12);
+ if (nb > blocks_unused) {
+ len = blocks_unused * 16;
+ nb -= blocks_unused;
+ next_ctr = blocks_unused;
+ ctr_reset = 1;
+ } else {
+ len = nb * 16;
+ next_ctr = nb;
+ nb = 0;
+ }
+
+ s = encrypt ? ppc_aes_gcm_encrypt(in, out, len, key, ivec, Xi)
+ : ppc_aes_gcm_decrypt(in, out, len, key, ivec, Xi);
+
+ /* add counter to ivec */
+ add32TOU(ivec + 12, (u32) next_ctr);
+ if (ctr_reset) {
+ ctr_reset = 0;
+ in += len;
+ out += len;
+ }
+ memcpy(ivec, ctr_saved, 12);
+ ndone += s;
+ }
+
+ return ndone;
+}
+
+size_t ppc_aes_gcm_encrypt_wrap(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], u64 *Xi)
+{
+ return aes_p10_gcm_crypt(in, out, len, key, ivec, Xi, 1);
+}
+
+size_t ppc_aes_gcm_decrypt_wrap(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], u64 *Xi)
+{
+ return aes_p10_gcm_crypt(in, out, len, key, ivec, Xi, 0);
+}
+
# endif
#endif
--- a/crypto/modes/asm/aes-gcm-ppc.pl
+++ b/crypto/modes/asm/aes-gcm-ppc.pl
@@ -81,7 +81,6 @@ open STDOUT,"| $^X $xlate $flavour \"$ou
$code=<<___;
.machine "any"
-.abiversion 2
.text
# 4x loops

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,13 @@
-------------------------------------------------------------------
Wed Dec 14 09:04:40 UTC 2022 - Otto Hollmann <otto.hollmann@suse.com>
- POWER10 performance enhancements for cryptography [jsc#PED-512]
* openssl-1_1-AES-GCM-performance-optimzation-with-stitched-method.patch
* openssl-1_1-Fixed-counter-overflow.patch
* openssl-1_1-chacha20-performance-optimizations-for-ppc64le-with-.patch
* openssl-1_1-Fixed-conditional-statement-testing-64-and-256-bytes.patch
* openssl-1_1-Fix-AES-GCM-on-Power-8-CPUs.patch
-------------------------------------------------------------------
Wed Nov 2 12:00:40 UTC 2022 - Otto Hollmann <otto.hollmann@suse.com>

View File

@ -123,6 +123,14 @@ Patch72: openssl-1_1-Optimize-AES-GCM-uarchs.patch
Patch73: openssl-1_1-FIPS-fix-error-reason-codes.patch
#PATCH-FIX-SUSE bsc#1180995 Default to RFC7919 groups in FIPS mode
Patch74: openssl-1_1-paramgen-default_to_rfc7919.patch
# PATCH-FIX-UPSTREAM jsc#PED-512
# POWER10 performance enhancements for cryptography
Patch75: openssl-1_1-AES-GCM-performance-optimzation-with-stitched-method.patch
Patch76: openssl-1_1-Fixed-counter-overflow.patch
Patch77: openssl-1_1-chacha20-performance-optimizations-for-ppc64le-with-.patch
Patch78: openssl-1_1-Fixed-conditional-statement-testing-64-and-256-bytes.patch
Patch79: openssl-1_1-Fix-AES-GCM-on-Power-8-CPUs.patch
Requires: libopenssl1_1 = %{version}-%{release}
BuildRequires: pkgconfig
BuildRequires: pkgconfig(zlib)