SHA256
1
0
forked from pool/grub2
grub2/grub2-fix-x86_64-efi-callwrap-stack-alignment.patch

147 lines
2.9 KiB
Diff
Raw Normal View History

From 4e42521d8c9232b6ee9eac7d8b4945a7479de781 Mon Sep 17 00:00:00 2001
From: Vladimir Serbinenko <phcoder@gmail.com>
Date: Fri, 22 Nov 2013 05:40:32 +0100
Subject: * grub-core/kern/x86_64/efi/callwrap.S: Fix stack
alignment. Previously we misaligned stack by 8 in startup.S and compensated
for it in callwrap.S. According to ABI docs (EFI and sysv amd64) right
behaviour is to align stack in startup.S and keep it aligned in callwrap.S.
startup.S part was committed few commits before. This takes care of
callwrap.S. Reported by: Gary Lin.
References: bnc#841426
Patch-Mainline: yes
Signed-off-by: Gary Ching-Pang Lin <glin@suse.com>
---
grub-core/kern/x86_64/efi/callwrap.S | 52 ++++++++++++++++++------------------
1 file changed, 26 insertions(+), 26 deletions(-)
diff --git a/grub-core/kern/x86_64/efi/callwrap.S b/grub-core/kern/x86_64/efi/callwrap.S
index 2df95dd..1337fd9 100644
--- a/grub-core/kern/x86_64/efi/callwrap.S
+++ b/grub-core/kern/x86_64/efi/callwrap.S
@@ -36,94 +36,94 @@
.text
FUNCTION(efi_wrap_0)
- subq $48, %rsp
+ subq $40, %rsp
call *%rdi
- addq $48, %rsp
+ addq $40, %rsp
ret
FUNCTION(efi_wrap_1)
- subq $48, %rsp
+ subq $40, %rsp
mov %rsi, %rcx
call *%rdi
- addq $48, %rsp
+ addq $40, %rsp
ret
FUNCTION(efi_wrap_2)
- subq $48, %rsp
+ subq $40, %rsp
mov %rsi, %rcx
call *%rdi
- addq $48, %rsp
+ addq $40, %rsp
ret
FUNCTION(efi_wrap_3)
- subq $48, %rsp
+ subq $40, %rsp
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
- addq $48, %rsp
+ addq $40, %rsp
ret
FUNCTION(efi_wrap_4)
- subq $48, %rsp
+ subq $40, %rsp
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
- addq $48, %rsp
+ addq $40, %rsp
ret
FUNCTION(efi_wrap_5)
- subq $48, %rsp
+ subq $40, %rsp
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
- addq $48, %rsp
+ addq $40, %rsp
ret
FUNCTION(efi_wrap_6)
- subq $64, %rsp
- mov 64+8(%rsp), %rax
+ subq $56, %rsp
+ mov 56+8(%rsp), %rax
mov %rax, 40(%rsp)
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
- addq $64, %rsp
+ addq $56, %rsp
ret
FUNCTION(efi_wrap_7)
- subq $96, %rsp
- mov 96+16(%rsp), %rax
+ subq $88, %rsp
+ mov 88+16(%rsp), %rax
mov %rax, 48(%rsp)
- mov 96+8(%rsp), %rax
+ mov 88+8(%rsp), %rax
mov %rax, 40(%rsp)
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
- addq $96, %rsp
+ addq $88, %rsp
ret
FUNCTION(efi_wrap_10)
- subq $96, %rsp
- mov 96+40(%rsp), %rax
+ subq $88, %rsp
+ mov 88+40(%rsp), %rax
mov %rax, 72(%rsp)
- mov 96+32(%rsp), %rax
+ mov 88+32(%rsp), %rax
mov %rax, 64(%rsp)
- mov 96+24(%rsp), %rax
+ mov 88+24(%rsp), %rax
mov %rax, 56(%rsp)
- mov 96+16(%rsp), %rax
+ mov 88+16(%rsp), %rax
mov %rax, 48(%rsp)
- mov 96+8(%rsp), %rax
+ mov 88+8(%rsp), %rax
mov %rax, 40(%rsp)
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
- addq $96, %rsp
+ addq $88, %rsp
ret
--
1.8.1.4