110 lines
2.8 KiB
Diff
110 lines
2.8 KiB
Diff
|
From 050d26383d4e264966fb83428e72d5d48f402d35 Mon Sep 17 00:00:00 2001
|
||
|
From: Rohan McLure <rmclure@linux.ibm.com>
|
||
|
Date: Thu, 4 Jan 2024 10:25:50 +0100
|
||
|
Subject: [PATCH] poly1305-ppc.pl: Fix vector register clobbering
|
||
|
|
||
|
Fixes CVE-2023-6129
|
||
|
|
||
|
The POLY1305 MAC (message authentication code) implementation in OpenSSL for
|
||
|
PowerPC CPUs saves the the contents of vector registers in different order
|
||
|
than they are restored. Thus the contents of some of these vector registers
|
||
|
is corrupted when returning to the caller. The vulnerable code is used only
|
||
|
on newer PowerPC processors supporting the PowerISA 2.07 instructions.
|
||
|
|
||
|
Reviewed-by: Matt Caswell <matt@openssl.org>
|
||
|
Reviewed-by: Richard Levitte <levitte@openssl.org>
|
||
|
Reviewed-by: Tomas Mraz <tomas@openssl.org>
|
||
|
(Merged from https://github.com/openssl/openssl/pull/23200)
|
||
|
|
||
|
(cherry picked from commit 8d847a3ffd4f0b17ee33962cf69c36224925b34f)
|
||
|
---
|
||
|
crypto/poly1305/asm/poly1305-ppc.pl | 42 ++++++++++++++---------------
|
||
|
1 file changed, 21 insertions(+), 21 deletions(-)
|
||
|
|
||
|
diff --git a/crypto/poly1305/asm/poly1305-ppc.pl b/crypto/poly1305/asm/poly1305-ppc.pl
|
||
|
index 9f86134d923fb..2e601bb9c24be 100755
|
||
|
--- a/crypto/poly1305/asm/poly1305-ppc.pl
|
||
|
+++ b/crypto/poly1305/asm/poly1305-ppc.pl
|
||
|
@@ -744,7 +744,7 @@
|
||
|
my $LOCALS= 6*$SIZE_T;
|
||
|
my $VSXFRAME = $LOCALS + 6*$SIZE_T;
|
||
|
$VSXFRAME += 128; # local variables
|
||
|
- $VSXFRAME += 13*16; # v20-v31 offload
|
||
|
+ $VSXFRAME += 12*16; # v20-v31 offload
|
||
|
|
||
|
my $BIG_ENDIAN = ($flavour !~ /le/) ? 4 : 0;
|
||
|
|
||
|
@@ -919,12 +919,12 @@
|
||
|
addi r11,r11,32
|
||
|
stvx v22,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
- stvx v23,r10,$sp
|
||
|
- addi r10,r10,32
|
||
|
- stvx v24,r11,$sp
|
||
|
+ stvx v23,r11,$sp
|
||
|
addi r11,r11,32
|
||
|
- stvx v25,r10,$sp
|
||
|
+ stvx v24,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
+ stvx v25,r11,$sp
|
||
|
+ addi r11,r11,32
|
||
|
stvx v26,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
stvx v27,r11,$sp
|
||
|
@@ -1153,12 +1153,12 @@
|
||
|
addi r11,r11,32
|
||
|
stvx v22,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
- stvx v23,r10,$sp
|
||
|
- addi r10,r10,32
|
||
|
- stvx v24,r11,$sp
|
||
|
+ stvx v23,r11,$sp
|
||
|
addi r11,r11,32
|
||
|
- stvx v25,r10,$sp
|
||
|
+ stvx v24,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
+ stvx v25,r11,$sp
|
||
|
+ addi r11,r11,32
|
||
|
stvx v26,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
stvx v27,r11,$sp
|
||
|
@@ -1899,26 +1899,26 @@
|
||
|
mtspr 256,r12 # restore vrsave
|
||
|
lvx v20,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
- lvx v21,r10,$sp
|
||
|
- addi r10,r10,32
|
||
|
- lvx v22,r11,$sp
|
||
|
+ lvx v21,r11,$sp
|
||
|
addi r11,r11,32
|
||
|
- lvx v23,r10,$sp
|
||
|
+ lvx v22,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
- lvx v24,r11,$sp
|
||
|
+ lvx v23,r11,$sp
|
||
|
addi r11,r11,32
|
||
|
- lvx v25,r10,$sp
|
||
|
+ lvx v24,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
- lvx v26,r11,$sp
|
||
|
+ lvx v25,r11,$sp
|
||
|
addi r11,r11,32
|
||
|
- lvx v27,r10,$sp
|
||
|
+ lvx v26,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
- lvx v28,r11,$sp
|
||
|
+ lvx v27,r11,$sp
|
||
|
addi r11,r11,32
|
||
|
- lvx v29,r10,$sp
|
||
|
+ lvx v28,r10,$sp
|
||
|
addi r10,r10,32
|
||
|
- lvx v30,r11,$sp
|
||
|
- lvx v31,r10,$sp
|
||
|
+ lvx v29,r11,$sp
|
||
|
+ addi r11,r11,32
|
||
|
+ lvx v30,r10,$sp
|
||
|
+ lvx v31,r11,$sp
|
||
|
$POP r27,`$VSXFRAME-$SIZE_T*5`($sp)
|
||
|
$POP r28,`$VSXFRAME-$SIZE_T*4`($sp)
|
||
|
$POP r29,`$VSXFRAME-$SIZE_T*3`($sp)
|