SHA256
3
0
forked from pool/glibc

Accepting request 592314 from home:Andreas_Schwab:Factory

- i386-memmove-sse2-unaligned.patch: Fix SSE2 memmove issue when crossing
  2GB boundary (BZ #22644)

OBS-URL: https://build.opensuse.org/request/show/592314
OBS-URL: https://build.opensuse.org/package/show/Base:System/glibc?expand=0&rev=502
This commit is contained in:
Andreas Schwab 2018-03-29 13:28:21 +00:00 committed by Git OBS Bridge
parent 151fbc5a4d
commit 2f9bf639f9
3 changed files with 158 additions and 0 deletions

View File

@ -1,3 +1,9 @@
-------------------------------------------------------------------
Thu Mar 29 09:53:52 UTC 2018 - schwab@suse.de
- i386-memmove-sse2-unaligned.patch: Fix SSE2 memmove issue when crossing
2GB boundary (BZ #22644)
-------------------------------------------------------------------
Tue Mar 27 11:30:49 UTC 2018 - schwab@suse.de

View File

@ -302,6 +302,8 @@ Patch1008: riscv-tls-init.patch
Patch1009: riscv-fmax-fmin-nan.patch
# PATCH-FIX-UPSTREAM Fix crash in resolver on memory allocation failure (BZ #23005)
Patch1010: res-send-enomem.patch
# PATCH-FIX-UPSTREAM memmove-sse2-unaligned on 32bit x86 produces garbage when crossing 2GB threshold (BZ #22644)
Patch1011: i386-memmove-sse2-unaligned.patch
###
# Patches awaiting upstream approval
@ -522,6 +524,7 @@ mv crypt_blowfish-%crypt_bf_version/*.[chS] crypt/
%patch1008 -p1
%patch1009 -p1
%patch1010 -p1
%patch1011 -p1
%patch2000 -p1
%patch2004 -p1

View File

@ -0,0 +1,149 @@
2018-03-23 Andrew Senkevich <andrew.senkevich@intel.com>
Max Horn <max@quendi.de>
[BZ #22644]
* sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed
branch conditions.
* string/test-memmove.c (do_test2): New testcase.
Index: glibc-2.27/string/test-memmove.c
===================================================================
--- glibc-2.27.orig/string/test-memmove.c
+++ glibc-2.27/string/test-memmove.c
@@ -24,6 +24,7 @@
# define TEST_NAME "memmove"
#endif
#include "test-string.h"
+#include <support/test-driver.h>
char *simple_memmove (char *, const char *, size_t);
@@ -245,6 +246,60 @@ do_random_tests (void)
}
}
+static void
+do_test2 (void)
+{
+ size_t size = 0x20000000;
+ uint32_t * large_buf;
+
+ large_buf = mmap ((void*) 0x70000000, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (large_buf == MAP_FAILED)
+ error (EXIT_UNSUPPORTED, errno, "Large mmap failed");
+
+ if ((uintptr_t) large_buf > 0x80000000 - 128
+ || 0x80000000 - (uintptr_t) large_buf > 0x20000000)
+ {
+ error (0, 0, "Large mmap allocated improperly");
+ ret = EXIT_UNSUPPORTED;
+ munmap ((void *) large_buf, size);
+ return;
+ }
+
+ size_t bytes_move = 0x80000000 - (uintptr_t) large_buf;
+ size_t arr_size = bytes_move / sizeof (uint32_t);
+ size_t i;
+
+ FOR_EACH_IMPL (impl, 0)
+ {
+ for (i = 0; i < arr_size; i++)
+ large_buf[i] = (uint32_t) i;
+
+ uint32_t * dst = &large_buf[33];
+
+#ifdef TEST_BCOPY
+ CALL (impl, (char *) large_buf, (char *) dst, bytes_move);
+#else
+ CALL (impl, (char *) dst, (char *) large_buf, bytes_move);
+#endif
+
+ for (i = 0; i < arr_size; i++)
+ {
+ if (dst[i] != (uint32_t) i)
+ {
+ error (0, 0,
+ "Wrong result in function %s dst \"%p\" src \"%p\" offset \"%zd\"",
+ impl->name, dst, large_buf, i);
+ ret = 1;
+ break;
+ }
+ }
+ }
+
+ munmap ((void *) large_buf, size);
+}
+
int
test_main (void)
{
@@ -284,6 +339,9 @@ test_main (void)
}
do_random_tests ();
+
+ do_test2 ();
+
return ret;
}
Index: glibc-2.27/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
===================================================================
--- glibc-2.27.orig/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
+++ glibc-2.27/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
@@ -72,7 +72,7 @@ ENTRY (MEMCPY)
cmp %edx, %eax
# ifdef USE_AS_MEMMOVE
- jg L(check_forward)
+ ja L(check_forward)
L(mm_len_0_or_more_backward):
/* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
@@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward):
jbe L(mm_len_0_16_bytes_backward)
cmpl $32, %ecx
- jg L(mm_len_32_or_more_backward)
+ ja L(mm_len_32_or_more_backward)
/* Copy [0..32] and return. */
movdqu (%eax), %xmm0
@@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward):
L(mm_len_32_or_more_backward):
cmpl $64, %ecx
- jg L(mm_len_64_or_more_backward)
+ ja L(mm_len_64_or_more_backward)
/* Copy [0..64] and return. */
movdqu (%eax), %xmm0
@@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward):
L(mm_len_64_or_more_backward):
cmpl $128, %ecx
- jg L(mm_len_128_or_more_backward)
+ ja L(mm_len_128_or_more_backward)
/* Copy [0..128] and return. */
movdqu (%eax), %xmm0
@@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward):
add %ecx, %eax
cmp %edx, %eax
movl SRC(%esp), %eax
- jle L(forward)
+ jbe L(forward)
PUSH (%esi)
PUSH (%edi)
PUSH (%ebx)
@@ -269,7 +269,7 @@ L(check_forward):
add %edx, %ecx
cmp %eax, %ecx
movl LEN(%esp), %ecx
- jle L(forward)
+ jbe L(forward)
/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
separately. */