diff --git a/openssl-3.changes b/openssl-3.changes index 4eed028..a1607cb 100644 --- a/openssl-3.changes +++ b/openssl-3.changes @@ -1,3 +1,10 @@ +------------------------------------------------------------------- +Thu Jan 11 08:07:48 UTC 2024 - Otto Hollmann + +- Security fix: [bsc#1218690, CVE-2023-6129] + * POLY1305: Fix vector register clobbering on PowerPC + * Add openssl-CVE-2023-6129.patch + ------------------------------------------------------------------- Thu Dec 7 09:54:17 UTC 2023 - Guillaume GARDET diff --git a/openssl-3.spec b/openssl-3.spec index 7126567..c15319a 100644 --- a/openssl-3.spec +++ b/openssl-3.spec @@ -1,7 +1,7 @@ # # spec file for package openssl-3 # -# Copyright (c) 2023 SUSE LLC +# Copyright (c) 2024 SUSE LLC # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -55,6 +55,8 @@ Patch10: openssl-Add-Kernel-FIPS-mode-flag-support.patch Patch11: openssl-Fix_test_symbol_presence.patch # PATCH-FIX-UPSTREAM https://github.com/openssl/openssl/pull/22971 Patch12: openssl-Enable-BTI-feature-for-md5-on-aarch64.patch +# PATCH-FIX-UPSTREAM: bsc#1218690 CVE-2023-6129 - POLY1305 MAC implementation corrupts vector registers on PowerPC +Patch13: openssl-CVE-2023-6129.patch BuildRequires: pkgconfig BuildRequires: pkgconfig(zlib) Requires: libopenssl3 = %{version}-%{release} @@ -145,6 +147,7 @@ export MACHINE=armv6l enable-ec_nistp_64_gcc_128 \ %endif enable-fips \ + enable-ktls \ zlib \ --prefix=%{_prefix} \ --libdir=%{_lib} \ diff --git a/openssl-CVE-2023-6129.patch b/openssl-CVE-2023-6129.patch new file mode 100644 index 0000000..c988737 --- /dev/null +++ b/openssl-CVE-2023-6129.patch @@ -0,0 +1,109 @@ +From 5b139f95c9a47a55a0c54100f3837b1eee942b04 Mon Sep 17 00:00:00 2001 +From: Rohan McLure +Date: Thu, 4 Jan 2024 10:25:50 +0100 +Subject: [PATCH] poly1305-ppc.pl: Fix vector register clobbering + +Fixes CVE-2023-6129 + +The POLY1305 MAC (message authentication code) implementation in OpenSSL for +PowerPC CPUs saves the the contents of vector registers in different order +than they are restored. Thus the contents of some of these vector registers +is corrupted when returning to the caller. The vulnerable code is used only +on newer PowerPC processors supporting the PowerISA 2.07 instructions. + +Reviewed-by: Matt Caswell +Reviewed-by: Richard Levitte +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/23200) + +(cherry picked from commit 8d847a3ffd4f0b17ee33962cf69c36224925b34f) +--- + crypto/poly1305/asm/poly1305-ppc.pl | 42 ++++++++++++++--------------- + 1 file changed, 21 insertions(+), 21 deletions(-) + +diff --git a/crypto/poly1305/asm/poly1305-ppc.pl b/crypto/poly1305/asm/poly1305-ppc.pl +index 9f86134d923fb..2e601bb9c24be 100755 +--- a/crypto/poly1305/asm/poly1305-ppc.pl ++++ b/crypto/poly1305/asm/poly1305-ppc.pl +@@ -744,7 +744,7 @@ + my $LOCALS= 6*$SIZE_T; + my $VSXFRAME = $LOCALS + 6*$SIZE_T; + $VSXFRAME += 128; # local variables +- $VSXFRAME += 13*16; # v20-v31 offload ++ $VSXFRAME += 12*16; # v20-v31 offload + + my $BIG_ENDIAN = ($flavour !~ /le/) ? 4 : 0; + +@@ -919,12 +919,12 @@ + addi r11,r11,32 + stvx v22,r10,$sp + addi r10,r10,32 +- stvx v23,r10,$sp +- addi r10,r10,32 +- stvx v24,r11,$sp ++ stvx v23,r11,$sp + addi r11,r11,32 +- stvx v25,r10,$sp ++ stvx v24,r10,$sp + addi r10,r10,32 ++ stvx v25,r11,$sp ++ addi r11,r11,32 + stvx v26,r10,$sp + addi r10,r10,32 + stvx v27,r11,$sp +@@ -1153,12 +1153,12 @@ + addi r11,r11,32 + stvx v22,r10,$sp + addi r10,r10,32 +- stvx v23,r10,$sp +- addi r10,r10,32 +- stvx v24,r11,$sp ++ stvx v23,r11,$sp + addi r11,r11,32 +- stvx v25,r10,$sp ++ stvx v24,r10,$sp + addi r10,r10,32 ++ stvx v25,r11,$sp ++ addi r11,r11,32 + stvx v26,r10,$sp + addi r10,r10,32 + stvx v27,r11,$sp +@@ -1899,26 +1899,26 @@ + mtspr 256,r12 # restore vrsave + lvx v20,r10,$sp + addi r10,r10,32 +- lvx v21,r10,$sp +- addi r10,r10,32 +- lvx v22,r11,$sp ++ lvx v21,r11,$sp + addi r11,r11,32 +- lvx v23,r10,$sp ++ lvx v22,r10,$sp + addi r10,r10,32 +- lvx v24,r11,$sp ++ lvx v23,r11,$sp + addi r11,r11,32 +- lvx v25,r10,$sp ++ lvx v24,r10,$sp + addi r10,r10,32 +- lvx v26,r11,$sp ++ lvx v25,r11,$sp + addi r11,r11,32 +- lvx v27,r10,$sp ++ lvx v26,r10,$sp + addi r10,r10,32 +- lvx v28,r11,$sp ++ lvx v27,r11,$sp + addi r11,r11,32 +- lvx v29,r10,$sp ++ lvx v28,r10,$sp + addi r10,r10,32 +- lvx v30,r11,$sp +- lvx v31,r10,$sp ++ lvx v29,r11,$sp ++ addi r11,r11,32 ++ lvx v30,r10,$sp ++ lvx v31,r11,$sp + $POP r27,`$VSXFRAME-$SIZE_T*5`($sp) + $POP r28,`$VSXFRAME-$SIZE_T*4`($sp) + $POP r29,`$VSXFRAME-$SIZE_T*3`($sp)