diff --git a/glibc-2.11.3-b72646ad0c41.tar.bz2 b/glibc-2.11.3-b72646ad0c41.tar.bz2 new file mode 100644 index 0000000..cc05708 --- /dev/null +++ b/glibc-2.11.3-b72646ad0c41.tar.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe7395ed702de089ef4f719d3ca0ae0477dbe9c3c0a049aa4bd89f441a00b72e +size 15666419 diff --git a/glibc-2.11.3-bnc658509.diff b/glibc-2.11.3-bnc658509.diff deleted file mode 100644 index bdafe1f..0000000 --- a/glibc-2.11.3-bnc658509.diff +++ /dev/null @@ -1,20 +0,0 @@ ---- ./string/bits/string3.h.orig 2010-12-09 13:29:45.000000000 +0100 -+++ ./string/bits/string3.h 2010-12-09 13:30:15.000000000 +0100 -@@ -53,7 +53,7 @@ - } - - __extern_always_inline void * --__NTH (memmove (void *__restrict __dest, __const void *__restrict __src, -+__NTH (memmove (void * __dest, __const void * __src, - size_t __len)) - { - return __builtin___memmove_chk (__dest, __src, __len, __bos0 (__dest)); -@@ -88,7 +88,7 @@ - - #ifdef __USE_BSD - __extern_always_inline void --__NTH (bcopy (__const void *__restrict __src, void *__restrict __dest, -+__NTH (bcopy (__const void * __src, void * __dest, - size_t __len)) - { - (void) __builtin___memmove_chk (__dest, __src, __len, __bos0 (__dest)); diff --git a/glibc-2.11.3-bso12397.diff b/glibc-2.11.3-bso12397.diff deleted file mode 100644 index 0b54d63..0000000 --- a/glibc-2.11.3-bso12397.diff +++ /dev/null @@ -1,11 +0,0 @@ ---- sysdeps/unix/sysv/linux/mkdirat.c.orig 2011-01-28 15:50:00.000000000 +0100 -+++ sysdeps/unix/sysv/linux/mkdirat.c 2011-01-28 15:51:42.000000000 +0100 -@@ -43,7 +43,7 @@ - { - res = INLINE_SYSCALL (mkdirat, 3, fd, file, mode); - # ifndef __ASSUME_ATFCTS -- if (res == -1 && res == ENOSYS) -+ if (res == -1 && errno == ENOSYS) - __have_atfcts = -1; - else - # endif diff --git a/glibc-2.11.3.tar.bz2 b/glibc-2.11.3.tar.bz2 deleted file mode 100644 index d1dbb03..0000000 --- a/glibc-2.11.3.tar.bz2 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3e3402f050984b78d0784efbd2115d90a4bb4376803fd3f00c25aee28cf7fb92 -size 15669411 diff --git a/glibc-2.3.3-amd64-string.diff b/glibc-2.3.3-amd64-string.diff deleted file mode 100644 index d9d0cba..0000000 --- a/glibc-2.3.3-amd64-string.diff +++ /dev/null @@ -1,4434 +0,0 @@ -Index: sysdeps/x86_64/dl-machine.h -=================================================================== ---- sysdeps/x86_64/dl-machine.h.orig -+++ sysdeps/x86_64/dl-machine.h -@@ -226,6 +226,40 @@ dl_platform_init (void) - if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0') - /* Avoid an empty string which would disturb us. */ - GLRO(dl_platform) = NULL; -+ -+ long int t1, t2; -+ t1 = 0; -+ t2 = 0; -+ -+ asm ( -+ "mov $0x80000000, %%eax # get highest level of support\n\t" -+ "cpuid\n\t" -+ "cmp $0x80000006, %%eax # check for support of cache info\n\t" -+ "jb 1f\n\t" -+ "mov $0x80000005, %%eax # get L1 info\n\t" -+ "cpuid\n\t" -+ "shr $24, %%ecx\n\t" -+ "shl $10, %%ecx\n\t" -+ "mov %%rcx, %0\n\t" -+ "mov $0x80000006, %%eax # get L2 info\n\t" -+ "cpuid\n\t" -+ "shr $16, %%ecx\n\t" -+ "shl $10, %%ecx\n\t" -+ "mov %%rcx, %1\n\t" -+ "1:\n\t" -+ :"=r" (t1), "=r" (t2) :: "%rbx", "%rax", "%rcx", "%rdx" -+ ); -+ -+ if (t1) -+ { -+ GLRO(dl_cache1size) = t1; -+ GLRO(dl_cache1sizehalf) = t1 / 2; -+ } -+ if (t2) -+ { -+ GLRO(dl_cache2size) = t2; -+ GLRO(dl_cache2sizehalf) = t2 / 2; -+ } - } - - static inline Elf64_Addr -Index: sysdeps/x86_64/Makefile -=================================================================== ---- sysdeps/x86_64/Makefile.orig -+++ sysdeps/x86_64/Makefile -@@ -4,7 +4,8 @@ long-double-fcts = yes - ifeq ($(subdir),csu) - sysdep_routines += hp-timing - elide-routines.os += hp-timing --gen-as-const-headers += link-defines.sym -+# get offset to rtld_global._dl_* -+gen-as-const-headers += link-defines.sym rtld-global-offsets.sym - endif - - ifeq ($(subdir),gmon) -Index: sysdeps/x86_64/strcpy.S -=================================================================== ---- sysdeps/x86_64/strcpy.S.orig -+++ sysdeps/x86_64/strcpy.S -@@ -1,159 +1,833 @@ --/* strcpy/stpcpy implementation for x86-64. -- Copyright (C) 2002 Free Software Foundation, Inc. -- This file is part of the GNU C Library. -- Contributed by Andreas Jaeger , 2002. -- -- The GNU C Library is free software; you can redistribute it and/or -- modify it under the terms of the GNU Lesser General Public -- License as published by the Free Software Foundation; either -- version 2.1 of the License, or (at your option) any later version. -- -- The GNU C Library is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- Lesser General Public License for more details. -- -- You should have received a copy of the GNU Lesser General Public -- License along with the GNU C Library; if not, write to the Free -- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA -- 02111-1307 USA. */ -- --#include --#include "asm-syntax.h" --#include "bp-sym.h" --#include "bp-asm.h" -+# $Header: /K8_Projects/Glibc/amd64strcpy.S 7 2/12/04 19:06 Emenezes $ - --#ifndef USE_AS_STPCPY -+# (c) 2002 Advanced Micro Devices, Inc. -+# YOUR USE OF THIS CODE IS SUBJECT TO THE TERMS -+# AND CONDITIONS OF THE GNU LESSER GENERAL PUBLIC -+# LICENSE FOUND IN THE "README" FILE THAT IS -+# INCLUDED WITH THIS FILE -+ -+#include "sysdep.h" -+#include -+ -+ /* XXX: strncpy is broken, just use this for strcpy for now. */ -+#ifdef PIC -+ .globl _rtld_local_ro -+ .hidden _rtld_local_ro -+ .set _rtld_local_ro,_rtld_global_ro -+#endif -+#ifndef STRCPY - # define STRCPY strcpy - #endif -+#define LABEL(s) L(strcpy##s) -+ -+ .text -+ -+ENTRY (STRCPY) # (char *, const char *) -+ -+#ifdef USE_AS_STRNCPY // (char *, const char *, size_t) -+ test %rdx, %rdx # (char *, const char *, size_t) -+ mov %rdx, %r11 -+ jz LABEL(exitn) # early exit -+#endif -+ -+ xor %edx, %edx -+ -+LABEL(aligntry): -+ mov %rsi, %r8 # align by source -+ and $7, %r8 -+ jz LABEL(alignafter) -+ -+LABEL(align): # 8-byte align -+ sub $8, %r8 - -- .text --ENTRY (BP_SYM (STRCPY)) -- movq %rsi, %rcx /* Source register. */ -- andl $7, %ecx /* mask alignment bits */ -- movq %rdi, %rdx /* Duplicate destination pointer. */ -- -- jz 5f /* aligned => start loop */ -- -- neg %ecx /* We need to align to 8 bytes. */ -- addl $8,%ecx -- /* Search the first bytes directly. */ --0: -- movb (%rsi), %al /* Fetch a byte */ -- testb %al, %al /* Is it NUL? */ -- movb %al, (%rdx) /* Store it */ -- jz 4f /* If it was NUL, done! */ -- incq %rsi -- incq %rdx -- decl %ecx -- jnz 0b -- --5: -- movq $0xfefefefefefefeff,%r8 -- -- /* Now the sources is aligned. Unfortunatly we cannot force -- to have both source and destination aligned, so ignore the -- alignment of the destination. */ - .p2align 4 --1: -- /* 1st unroll. */ -- movq (%rsi), %rax /* Read double word (8 bytes). */ -- addq $8, %rsi /* Adjust pointer for next word. */ -- movq %rax, %r9 /* Save a copy for NUL finding. */ -- addq %r8, %r9 /* add the magic value to the word. We get -- carry bits reported for each byte which -- is *not* 0 */ -- jnc 3f /* highest byte is NUL => return pointer */ -- xorq %rax, %r9 /* (word+magic)^word */ -- orq %r8, %r9 /* set all non-carry bits */ -- incq %r9 /* add 1: if one carry bit was *not* set -- the addition will not result in 0. */ -- -- jnz 3f /* found NUL => return pointer */ -- -- movq %rax, (%rdx) /* Write value to destination. */ -- addq $8, %rdx /* Adjust pointer. */ -- -- /* 2nd unroll. */ -- movq (%rsi), %rax /* Read double word (8 bytes). */ -- addq $8, %rsi /* Adjust pointer for next word. */ -- movq %rax, %r9 /* Save a copy for NUL finding. */ -- addq %r8, %r9 /* add the magic value to the word. We get -- carry bits reported for each byte which -- is *not* 0 */ -- jnc 3f /* highest byte is NUL => return pointer */ -- xorq %rax, %r9 /* (word+magic)^word */ -- orq %r8, %r9 /* set all non-carry bits */ -- incq %r9 /* add 1: if one carry bit was *not* set -- the addition will not result in 0. */ -- -- jnz 3f /* found NUL => return pointer */ -- -- movq %rax, (%rdx) /* Write value to destination. */ -- addq $8, %rdx /* Adjust pointer. */ -- -- /* 3rd unroll. */ -- movq (%rsi), %rax /* Read double word (8 bytes). */ -- addq $8, %rsi /* Adjust pointer for next word. */ -- movq %rax, %r9 /* Save a copy for NUL finding. */ -- addq %r8, %r9 /* add the magic value to the word. We get -- carry bits reported for each byte which -- is *not* 0 */ -- jnc 3f /* highest byte is NUL => return pointer */ -- xorq %rax, %r9 /* (word+magic)^word */ -- orq %r8, %r9 /* set all non-carry bits */ -- incq %r9 /* add 1: if one carry bit was *not* set -- the addition will not result in 0. */ -- -- jnz 3f /* found NUL => return pointer */ -- -- movq %rax, (%rdx) /* Write value to destination. */ -- addq $8, %rdx /* Adjust pointer. */ -- -- /* 4th unroll. */ -- movq (%rsi), %rax /* Read double word (8 bytes). */ -- addq $8, %rsi /* Adjust pointer for next word. */ -- movq %rax, %r9 /* Save a copy for NUL finding. */ -- addq %r8, %r9 /* add the magic value to the word. We get -- carry bits reported for each byte which -- is *not* 0 */ -- jnc 3f /* highest byte is NUL => return pointer */ -- xorq %rax, %r9 /* (word+magic)^word */ -- orq %r8, %r9 /* set all non-carry bits */ -- incq %r9 /* add 1: if one carry bit was *not* set -- the addition will not result in 0. */ -- -- jnz 3f /* found NUL => return pointer */ -- -- movq %rax, (%rdx) /* Write value to destination. */ -- addq $8, %rdx /* Adjust pointer. */ -- jmp 1b /* Next iteration. */ - -- /* Do the last few bytes. %rax contains the value to write. -- The loop is unrolled twice. */ -+LABEL(alignloop): -+#ifdef USE_AS_STRNCPY -+ dec %r11 -+ jl LABEL(exitn) -+#endif -+ -+ mov (%rsi, %rdx), %al # check if same character -+ test %al, %al # check if character a NUL -+ mov %al, (%rdi, %rdx) -+ jz LABEL(exit) -+ -+ inc %edx -+ inc %r8 -+ jnz LABEL(alignloop) -+ - .p2align 4 -+ -+LABEL(alignafter): -+ -+LABEL(8try): -+ mov $0xfefefefefefefeff, %rcx -+ -+LABEL(8): # 8-byte -+ mov (%rsi, %rdx), %rax -+ -+LABEL(8loop): -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+LABEL(8after): -+ -+LABEL(64try): -+#ifdef PIC -+ mov _rtld_local_ro@GOTPCREL(%rip), %r8 -+ mov RTLD_GLOBAL_DL_CACHE1SIZEHALF(%r8), %r9 -+#else -+ mov _dl_cache1sizehalf, %r9 -+#endif -+ -+ -+LABEL(64): # 64-byte -+ -+ .p2align 4 -+ -+LABEL(64loop): -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ cmp %r9, %rdx -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ lea 8 (%rdx), %rdx -+ -+ jbe LABEL(64loop) -+ -+LABEL(64after): -+ -+LABEL(pretry): -+#ifdef PIC -+ mov _rtld_local_ro@GOTPCREL(%rip), %r8 -+ mov RTLD_GLOBAL_DL_CACHE2SIZEHALF(%r8), %r9 -+#else -+ mov _dl_cache2sizehalf, %r9 -+#endif -+ -+LABEL(pre): # 64-byte prefetch -+ -+ .p2align 4 -+ -+LABEL(preloop): -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ mov %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %edx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) -+ -+ cmp %r9, %rdx -+ -+ mov %rax, (%rdi, %rdx) -+ prefetcht0 512 + 8 (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ prefetcht0 512 + 8 (%rsi, %rdx) -+ lea 8 (%rdx), %rdx -+ -+ jb LABEL(preloop) -+ -+ .p2align 4 -+ -+LABEL(preafter): -+ -+LABEL(NTtry): -+ sfence -+ -+LABEL(NT): # 64-byte NT -+ -+ .p2align 4 -+ -+LABEL(NTloop): -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(NTtail) -+ -+ movnti %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %rdx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(NTtail) -+ -+ movnti %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %rdx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(NTtail) -+ -+ movnti %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %rdx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(NTtail) -+ -+ movnti %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %rdx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(NTtail) -+ -+ movnti %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %rdx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(NTtail) -+ -+ movnti %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %rdx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(NTtail) -+ -+ movnti %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ add $8, %rdx -+ -+#ifdef USE_AS_STRNCPY -+ sub $8, %r11 -+ jl LABEL(tail) -+#endif -+ -+ mov %rcx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 -+ -+ xor %rax, %r8 -+ or %rcx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(NTtail) -+ -+ movnti %rax, (%rdi, %rdx) -+ mov 8 (%rsi, %rdx), %rax -+ prefetchnta 768 + 8 (%rsi, %rdx) -+ add $8, %rdx -+ -+ jmp LABEL(NTloop) -+ -+ .p2align 4 -+ -+LABEL(NTtail): -+ sfence -+ -+ .p2align 4 -+ -+LABEL(NTafter): -+ -+LABEL(tailtry): -+ -+LABEL(tail): # 1-byte tail -+#ifdef USE_AS_STRNCPY -+ add $8, %r11 -+#endif -+ -+ .p2align 4 -+ -+LABEL(tailloop): -+#ifdef USE_AS_STRNCPY -+ dec %r11 -+ jl LABEL(exitn) -+#endif -+ -+ test %al, %al -+ mov %al, (%rdi, %rdx) -+ jz LABEL(exit) -+ -+ inc %rdx -+ -+#ifdef USE_AS_STRNCPY -+ dec %r11 -+ jl LABEL(exitn) -+ -+ mov %ah, %al -+#endif -+ -+ test %ah, %ah -+ mov %ah, (%rdi, %rdx) -+ jz LABEL(exit) -+ -+ inc %rdx -+ -+#ifdef USE_AS_STRNCPY -+ dec %r11 -+ jl LABEL(exitn) -+#endif -+ -+ shr $16, %rax -+ -+ test %al, %al -+ mov %al, (%rdi, %rdx) -+ jz LABEL(exit) -+ -+ inc %rdx -+ -+#ifdef USE_AS_STRNCPY -+ dec %r11 -+ jl LABEL(exitn) -+ -+ mov %ah, %al -+#endif -+ -+ test %ah, %ah -+ mov %ah, (%rdi, %rdx) -+ jz LABEL(exit) -+ -+ shr $16, %rax -+ inc %rdx -+ -+ jmp LABEL(tailloop) -+ -+ .p2align 4 -+ -+LABEL(tailafter): -+ -+LABEL(exit): -+#ifdef USE_AS_STRNCPY -+ test %r11, %r11 -+ mov %r11, %rcx -+ -+#ifdef USE_AS_STPCPY -+ lea (%rdi, %rdx), %r8 -+#else -+ mov %rdi, %r8 -+#endif -+ -+ jz 2f -+ -+ xor %eax, %eax # bzero () would do too, but usually there are only a handfull of bytes left -+ shr $3, %rcx -+ lea 1 (%rdi, %rdx), %rdi -+ jz 1f -+ -+ rep stosq -+ -+1: -+ mov %r11d, %ecx -+ and $7, %ecx -+ jz 2f -+ -+ .p2align 4,, 3 -+ - 3: -- /* Note that stpcpy needs to return with the value of the NUL -- byte. */ -- movb %al, (%rdx) /* 1st byte. */ -- testb %al, %al /* Is it NUL. */ -- jz 4f /* yes, finish. */ -- incq %rdx /* Increment destination. */ -- movb %ah, (%rdx) /* 2nd byte. */ -- testb %ah, %ah /* Is it NUL?. */ -- jz 4f /* yes, finish. */ -- incq %rdx /* Increment destination. */ -- shrq $16, %rax /* Shift... */ -- jmp 3b /* and look at next two bytes in %rax. */ -+ dec %ecx -+ mov %al, (%rdi, %rcx) -+ jnz 3b -+ -+ .p2align 4,, 3 -+ -+2: -+ mov %r8, %rax -+ ret -+ -+#endif -+ -+ .p2align 4 - --4: -+LABEL(exitn): - #ifdef USE_AS_STPCPY -- movq %rdx, %rax /* Destination is return value. */ -+ lea (%rdi, %rdx), %rax - #else -- movq %rdi, %rax /* Source is return value. */ -+ mov %rdi, %rax - #endif -- retq --END (BP_SYM (STRCPY)) --#ifndef USE_AS_STPCPY --libc_hidden_builtin_def (strcpy) -+ -+ ret -+ -+END (STRCPY) -+#if !defined USE_AS_STPCPY && !defined USE_AS_STRNCPY -+libc_hidden_builtin_def (STRCPY) - #endif -Index: sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c -=================================================================== ---- sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c.orig -+++ sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c -@@ -1,5 +1,5 @@ - #ifdef IS_IN_ldconfig - # include - #else --# include -+# include - #endif -Index: sysdeps/x86_64/dl-procinfo.c -=================================================================== ---- /dev/null -+++ sysdeps/x86_64/dl-procinfo.c -@@ -0,0 +1,108 @@ -+/* Data for x86-64 version of processor capability information. -+ Copyright (C) 2004 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Andreas Jaeger , 2004. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, write to the Free -+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA -+ 02111-1307 USA. */ -+ -+/* This information must be kept in sync with the _DL_HWCAP_COUNT and -+ _DL_PLATFORM_COUNT definitions in procinfo.h. -+ -+ If anything should be added here check whether the size of each string -+ is still ok with the given array size. -+ -+ All the #ifdefs in the definitions ar equite irritating but -+ necessary if we want to avoid duplicating the information. There -+ are three different modes: -+ -+ - PROCINFO_DECL is defined. This means we are only interested in -+ declarations. -+ -+ - PROCINFO_DECL is not defined: -+ -+ + if SHARED is defined the file is included in an array -+ initializer. The .element = { ... } syntax is needed. -+ -+ + if SHARED is not defined a normal array initialization is -+ needed. -+ */ -+ -+#ifndef PROCINFO_CLASS -+#define PROCINFO_CLASS -+#endif -+ -+ /* _dl_cache1size: size of L1 cache */ -+#if !defined PROCINFO_DECL && defined SHARED -+ ._dl_cache1size -+#else -+PROCINFO_CLASS long int _dl_cache1size -+#endif -+#ifndef PROCINFO_DECL -+= 1024 * 64 -+#endif -+#if !defined SHARED || defined PROCINFO_DECL -+; -+#else -+, -+#endif -+ -+ /* _dl_cache1sizehalf: 1/2 size of L1 cache */ -+#if !defined PROCINFO_DECL && defined SHARED -+ ._dl_cache1sizehalf -+#else -+PROCINFO_CLASS long int _dl_cache1sizehalf -+#endif -+#ifndef PROCINFO_DECL -+= 1024 * 64 / 2 -+#endif -+#if !defined SHARED || defined PROCINFO_DECL -+; -+#else -+, -+#endif -+ -+ /* _dl_cache2size: size of L2 cache */ -+#if !defined PROCINFO_DECL && defined SHARED -+ ._dl_cache2size -+#else -+PROCINFO_CLASS long int _dl_cache2size -+#endif -+#ifndef PROCINFO_DECL -+= 1024 * 1024 -+#endif -+#if !defined SHARED || defined PROCINFO_DECL -+; -+#else -+, -+#endif -+ -+ /* _dl_cache2size: 1/2 size of L2 cache */ -+#if !defined PROCINFO_DECL && defined SHARED -+ ._dl_cache2sizehalf -+#else -+PROCINFO_CLASS long int _dl_cache2sizehalf -+#endif -+#ifndef PROCINFO_DECL -+= 1024 * 1024 / 2 -+#endif -+#if !defined SHARED || defined PROCINFO_DECL -+; -+#else -+, -+#endif -+ -+#undef PROCINFO_DECL -+#undef PROCINFO_CLASS -Index: sysdeps/x86_64/elf/rtld-global-offsets.sym -=================================================================== ---- /dev/null -+++ sysdeps/x86_64/elf/rtld-global-offsets.sym -@@ -0,0 +1,10 @@ -+#define SHARED 1 -+ -+#include -+ -+#define rtdl_global_offsetof(mem) offsetof (struct rtld_global_ro, mem) -+ -+RTLD_GLOBAL_DL_CACHE1SIZE rtdl_global_offsetof (_dl_cache1size) -+RTLD_GLOBAL_DL_CACHE1SIZEHALF rtdl_global_offsetof (_dl_cache1sizehalf) -+RTLD_GLOBAL_DL_CACHE2SIZE rtdl_global_offsetof (_dl_cache2size) -+RTLD_GLOBAL_DL_CACHE2SIZEHALF rtdl_global_offsetof (_dl_cache2sizehalf) -Index: sysdeps/x86_64/memcmp.S -=================================================================== ---- sysdeps/x86_64/memcmp.S.orig -+++ sysdeps/x86_64/memcmp.S -@@ -1,358 +1,442 @@ --/* memcmp with SSE2 -- Copyright (C) 2009 Free Software Foundation, Inc. -- Contributed by Intel Corporation. -- This file is part of the GNU C Library. -- -- The GNU C Library is free software; you can redistribute it and/or -- modify it under the terms of the GNU Lesser General Public -- License as published by the Free Software Foundation; either -- version 2.1 of the License, or (at your option) any later version. -- -- The GNU C Library is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- Lesser General Public License for more details. -- -- You should have received a copy of the GNU Lesser General Public -- License along with the GNU C Library; if not, write to the Free -- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA -- 02111-1307 USA. */ -- --#include -- -- .text --ENTRY (memcmp) -- test %rdx, %rdx -- jz L(finz) -- cmpq $1, %rdx -- jle L(finr1b) -- subq %rdi, %rsi -- movq %rdx, %r10 -- cmpq $32, %r10 -- jge L(gt32) -- /* Handle small chunks and last block of less than 32 bytes. */ --L(small): -- testq $1, %r10 -- jz L(s2b) -- movzbl (%rdi), %eax -- movzbl (%rdi, %rsi), %edx -- subq $1, %r10 -- je L(finz1) -- addq $1, %rdi -- subl %edx, %eax -- jnz L(exit) --L(s2b): -- testq $2, %r10 -- jz L(s4b) -- movzwl (%rdi), %eax -- movzwl (%rdi, %rsi), %edx -- subq $2, %r10 -- je L(fin2_7) -- addq $2, %rdi -- cmpl %edx, %eax -- jnz L(fin2_7) --L(s4b): -- testq $4, %r10 -- jz L(s8b) -- movl (%rdi), %eax -- movl (%rdi, %rsi), %edx -- subq $4, %r10 -- je L(fin2_7) -- addq $4, %rdi -- cmpl %edx, %eax -- jnz L(fin2_7) --L(s8b): -- testq $8, %r10 -- jz L(s16b) -- movq (%rdi), %rax -- movq (%rdi, %rsi), %rdx -- subq $8, %r10 -- je L(fin2_7) -- addq $8, %rdi -- cmpq %rdx, %rax -- jnz L(fin2_7) --L(s16b): -- movdqu (%rdi), %xmm1 -- movdqu (%rdi, %rsi), %xmm0 -- pcmpeqb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- xorl %eax, %eax -- subl $0xffff, %edx -- jz L(finz) -- bsfl %edx, %ecx -- leaq (%rdi, %rcx), %rcx -- movzbl (%rcx), %eax -- movzbl (%rsi, %rcx), %edx -- jmp L(finz1) -- -- .p2align 4,, 4 --L(finr1b): -- movzbl (%rdi), %eax -- movzbl (%rsi), %edx --L(finz1): -- subl %edx, %eax --L(exit): -- ret -- -- .p2align 4,, 4 --L(fin2_7): -- cmpq %rdx, %rax -- jz L(finz) -- movq %rax, %r11 -- subq %rdx, %r11 -- bsfq %r11, %rcx -- sarq $3, %rcx -- salq $3, %rcx -- sarq %cl, %rax -- movzbl %al, %eax -- sarq %cl, %rdx -- movzbl %dl, %edx -- subl %edx, %eax -- ret -- -- .p2align 4,, 4 --L(finz): -- xorl %eax, %eax -- ret -- -- /* For blocks bigger than 32 bytes -- 1. Advance one of the addr pointer to be 16B aligned. -- 2. Treat the case of both addr pointers aligned to 16B -- separately to avoid movdqu. -- 3. Handle any blocks of greater than 64 consecutive bytes with -- unrolling to reduce branches. -- 4. At least one addr pointer is 16B aligned, use memory version -- of pcmbeqb. -- */ -- .p2align 4,, 4 --L(gt32): -- movq %rdx, %r11 -- addq %rdi, %r11 -- movq %rdi, %r8 -- -- andq $15, %r8 -- jz L(16am) -- /* Both pointers may be misaligned. */ -- movdqu (%rdi), %xmm1 -- movdqu (%rdi, %rsi), %xmm0 -- pcmpeqb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- subl $0xffff, %edx -- jnz L(neq) -- neg %r8 -- leaq 16(%rdi, %r8), %rdi --L(16am): -- /* Handle two 16B aligned pointers separately. */ -- testq $15, %rsi -- jz L(ATR) -- testq $16, %rdi -- jz L(A32) -- movdqu (%rdi, %rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi --L(A32): -- movq %r11, %r10 -- andq $-32, %r10 -- cmpq %r10, %rdi -- jge L(mt16) -- /* Pre-unroll to be ready for unrolled 64B loop. */ -- testq $32, %rdi -- jz L(A64) -- movdqu (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqu (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- --L(A64): -- movq %r11, %r10 -- andq $-64, %r10 -- cmpq %r10, %rdi -- jge L(mt32) -- --L(A64main): -- movdqu (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqu (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqu (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqu (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- cmpq %rdi, %r10 -- jne L(A64main) -- --L(mt32): -- movq %r11, %r10 -- andq $-32, %r10 -- cmpq %r10, %rdi -- jge L(mt16) -- --L(A32main): -- movdqu (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqu (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- cmpq %rdi, %r10 -- jne L(A32main) --L(mt16): -- subq %rdi, %r11 -- je L(finz) -- movq %r11, %r10 -- jmp L(small) -- -- .p2align 4,, 4 --L(neq): -- bsfl %edx, %ecx -- movzbl (%rdi, %rcx), %eax -- addq %rdi, %rsi -- movzbl (%rsi,%rcx), %edx -- jmp L(finz1) -- -- .p2align 4,, 4 --L(ATR): -- movq %r11, %r10 -- andq $-32, %r10 -- cmpq %r10, %rdi -- jge L(mt16) -- testq $16, %rdi -- jz L(ATR32) -- -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- cmpq %rdi, %r10 -- je L(mt16) -- --L(ATR32): -- movq %r11, %r10 -- andq $-64, %r10 -- testq $32, %rdi -- jz L(ATR64) -- -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- --L(ATR64): -- cmpq %rdi, %r10 -- je L(mt32) -- --L(ATR64main): -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- cmpq %rdi, %r10 -- jne L(ATR64main) -- -- movq %r11, %r10 -- andq $-32, %r10 -- cmpq %r10, %rdi -- jge L(mt16) -- --L(ATR32res): -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- movdqa (%rdi,%rsi), %xmm0 -- pcmpeqb (%rdi), %xmm0 -- pmovmskb %xmm0, %edx -- subl $0xffff, %edx -- jnz L(neq) -- addq $16, %rdi -- -- cmpq %r10, %rdi -- jne L(ATR32res) -- -- subq %rdi, %r11 -- je L(finz) -- movq %r11, %r10 -- jmp L(small) -- /* Align to 16byte to improve instruction fetch. */ -- .p2align 4,, 4 --END(memcmp) -+# $Header: /K8_Projects/Glibc/amd64memcmp.S 4 10/06/03 10:57 Emenezes $ -+ -+# (c) 2002 Advanced Micro Devices, Inc. -+# YOUR USE OF THIS CODE IS SUBJECT TO THE TERMS -+# AND CONDITIONS OF THE GNU LESSER GENERAL PUBLIC -+# LICENSE FOUND IN THE "README" FILE THAT IS -+# INCLUDED WITH THIS FILE -+ -+#include "sysdep.h" -+#include -+ -+#ifdef PIC -+ .globl _rtld_local_ro -+ .hidden _rtld_local_ro -+ .set _rtld_local_ro,_rtld_global_ro -+#endif -+ -+ .text -+ -+ENTRY (memcmp) # (const void *, const void*, size_t) -+ -+L(memcmptry1): -+ cmp $8, %rdx -+ jae L(memcmp1after) -+ -+L(memcmp1): # 1-byte -+ test %rdx, %rdx -+ mov $0, %eax -+ jz L(memcmpexit) -+ -+L(memcmp1loop): -+ movzbl (%rdi), %eax -+ movzbl (%rsi), %ecx -+ sub %ecx, %eax -+ jnz L(memcmpexit) -+ -+ dec %rdx -+ -+ lea 1 (%rdi), %rdi -+ lea 1 (%rsi), %rsi -+ -+ jnz L(memcmp1loop) -+ -+L(memcmpexit): -+ rep -+ ret -+ -+ .p2align 4 -+ -+L(memcmp1after): -+ -+L(memcmp8try): -+ cmp $32, %rdx -+ jae L(memcmp8after) -+ -+L(memcmp8): # 8-byte -+ mov %edx, %ecx -+ shr $3, %ecx -+ jz L(memcmp1) -+ -+ .p2align 4 -+ -+L(memcmp8loop): -+ mov (%rsi), %rax -+ cmp (%rdi), %rax -+ jne L(memcmp1) -+ -+ sub $8, %rdx -+ dec %ecx -+ -+ lea 8 (%rsi), %rsi -+ lea 8 (%rdi), %rdi -+ -+ jnz L(memcmp8loop) -+ -+L(memcmp8skip): -+ and $7, %edx -+ jnz L(memcmp1) -+ -+ xor %eax, %eax -+ ret -+ -+ .p2align 4 -+ -+L(memcmp8after): -+ -+L(memcmp32try): -+ cmp $2048, %rdx -+ ja L(memcmp32after) -+ -+L(memcmp32): # 32-byte -+ mov %edx, %ecx -+ shr $5, %ecx -+ jz L(memcmp8) -+ -+ .p2align 4 -+ -+L(memcmp32loop): -+ mov (%rsi), %rax -+ mov 8 (%rsi), %r8 -+ mov 16 (%rsi), %r9 -+ mov 24 (%rsi), %r10 -+ sub (%rdi), %rax -+ sub 8 (%rdi), %r8 -+ sub 16 (%rdi), %r9 -+ sub 24 (%rdi), %r10 -+ -+ or %rax, %r8 -+ or %r9, %r10 -+ or %r8, %r10 -+ jnz L(memcmp8) -+ -+ sub $32, %rdx -+ dec %ecx -+ -+ lea 32 (%rsi), %rsi -+ lea 32 (%rdi), %rdi -+ -+ jnz L(memcmp32loop) -+ -+L(memcmp32skip): -+ and $31, %edx -+ jnz L(memcmp8) -+ -+ xor %eax, %eax -+ ret -+ -+ .p2align 4 -+ -+L(memcmp32after): -+ -+#ifdef PIC -+ mov _rtld_local_ro@GOTPCREL(%rip), %r8 -+ mov RTLD_GLOBAL_DL_CACHE1SIZEHALF(%r8), %r9 -+#else -+ mov _dl_cache1sizehalf, %r9 -+#endif -+ prefetcht0 (%r9) -+ -+ -+.alignsrctry: -+ mov %esi, %r8d # align by source -+ -+ and $7, %r8d -+ jz .alignsrcafter # not unaligned -+ -+.alignsrc: # align -+ lea -8 (%r8, %rdx), %rdx -+ sub $8, %r8d -+ -+# .p2align 4 -+ -+.alignsrcloop: -+ movzbl (%rdi), %eax -+ movzbl (%rsi), %ecx -+ sub %ecx, %eax -+ jnz L(memcmpexit) -+ -+ inc %r8d -+ -+ lea 1 (%rdi), %rdi -+ lea 1 (%rsi), %rsi -+ -+ jnz .alignsrcloop -+ -+ .p2align 4 -+ -+.alignsrcafter: -+ -+ -+L(memcmp64try): -+#ifdef PIC -+ mov _rtld_local_ro@GOTPCREL(%rip), %r8 -+ mov RTLD_GLOBAL_DL_CACHE1SIZEHALF(%r8), %rcx -+#else -+ mov _dl_cache1sizehalf, %rcx -+#endif -+ cmp %rdx, %rcx -+ cmova %rdx, %rcx -+ -+L(memcmp64): # 64-byte -+ shr $6, %rcx -+ jz L(memcmp32) -+ -+ .p2align 4 -+ -+L(memcmp64loop): -+ mov (%rsi), %rax -+ mov 8 (%rsi), %r8 -+ sub (%rdi), %rax -+ sub 8 (%rdi), %r8 -+ or %r8, %rax -+ -+ mov 16 (%rsi), %r9 -+ mov 24 (%rsi), %r10 -+ sub 16 (%rdi), %r9 -+ sub 24 (%rdi), %r10 -+ or %r10, %r9 -+ -+ or %r9, %rax -+ jnz L(memcmp32) -+ -+ mov 32 (%rsi), %rax -+ mov 40 (%rsi), %r8 -+ sub 32 (%rdi), %rax -+ sub 40 (%rdi), %r8 -+ or %r8, %rax -+ -+ mov 48 (%rsi), %r9 -+ mov 56 (%rsi), %r10 -+ sub 48 (%rdi), %r9 -+ sub 56 (%rdi), %r10 -+ or %r10, %r9 -+ -+ or %r9, %rax -+ jnz L(memcmp32) -+ -+ lea 64 (%rsi), %rsi -+ lea 64 (%rdi), %rdi -+ -+ sub $64, %rdx -+ dec %rcx -+ jnz L(memcmp64loop) -+ -+# .p2align 4 -+ -+L(memcmp64skip): -+ cmp $2048, %rdx -+ ja L(memcmp64after) -+ -+ test %edx, %edx -+ jnz L(memcmp32) -+ -+ xor %eax, %eax -+ ret -+ -+ .p2align 4 -+ -+L(memcmp64after): -+ -+L(memcmppretry): -+ -+L(memcmppre): # 64-byte prefetching -+#ifdef PIC -+ mov _rtld_local_ro@GOTPCREL(%rip), %r8 -+ mov RTLD_GLOBAL_DL_CACHE2SIZEHALF(%r8), %rcx -+#else -+ mov _dl_cache2sizehalf, %rcx -+#endif -+ cmp %rdx, %rcx -+ cmova %rdx, %rcx -+ -+ shr $6, %rcx -+ jz L(memcmppreskip) -+ -+ prefetcht0 512 (%rsi) -+ prefetcht0 512 (%rdi) -+ -+ mov (%rsi), %rax -+ mov 8 (%rsi), %r9 -+ mov 16 (%rsi), %r10 -+ mov 24 (%rsi), %r11 -+ sub (%rdi), %rax -+ sub 8 (%rdi), %r9 -+ sub 16 (%rdi), %r10 -+ sub 24 (%rdi), %r11 -+ -+ or %r9, %rax -+ or %r11, %r10 -+ or %r10, %rax -+ jnz L(memcmp32) -+ -+ mov 32 (%rsi), %rax -+ mov 40 (%rsi), %r9 -+ mov 48 (%rsi), %r10 -+ mov 56 (%rsi), %r11 -+ sub 32 (%rdi), %rax -+ sub 40 (%rdi), %r9 -+ sub 48 (%rdi), %r10 -+ sub 56 (%rdi), %r11 -+ -+ or %r9, %rax -+ or %r11, %r10 -+ or %r10, %rax -+ jnz L(memcmp32) -+ -+ lea 64 (%rsi), %rsi -+ lea 64 (%rdi), %rdi -+ -+ sub $64, %rdx -+ dec %rcx -+ -+ .p2align 4 -+ -+L(memcmppreloop): -+ prefetcht0 512 (%rsi) -+ prefetcht0 512 (%rdi) -+ -+ mov (%rsi), %rax -+ mov 8 (%rsi), %r9 -+ mov 16 (%rsi), %r10 -+ mov 24 (%rsi), %r11 -+ sub (%rdi), %rax -+ sub 8 (%rdi), %r9 -+ sub 16 (%rdi), %r10 -+ sub 24 (%rdi), %r11 -+ -+ or %r9, %rax -+ or %r11, %r10 -+ or %r10, %rax -+ jnz L(memcmp32) -+ -+ mov 32 (%rsi), %rax -+ mov 40 (%rsi), %r9 -+ mov 48 (%rsi), %r10 -+ mov 56 (%rsi), %r11 -+ sub 32 (%rdi), %rax -+ sub 40 (%rdi), %r9 -+ sub 48 (%rdi), %r10 -+ sub 56 (%rdi), %r11 -+ -+ or %r9, %rax -+ or %r11, %r10 -+ or %r10, %rax -+ jnz L(memcmp32) -+ -+ lea 64 (%rsi), %rsi -+ lea 64 (%rdi), %rdi -+ -+ sub $64, %rdx -+ dec %rcx -+ jnz L(memcmppreloop) -+ -+# .p2align 4 -+ -+L(memcmppreskip): -+ cmp $2048, %rdx -+ ja L(memcmppreafter) -+ -+ test %edx, %edx -+ jnz L(memcmp32) -+ -+ xor %eax, %eax -+ ret -+ -+ .p2align 4 -+ -+L(memcmppreafter): -+ -+L(memcmp128try): -+ -+L(memcmp128): # 128-byte -+ mov %rdx, %rcx -+ shr $7, %rcx -+ jz L(memcmp128skip) -+ -+ .p2align 4 -+ -+L(memcmp128loop): -+ prefetcht0 512 (%rsi) -+ prefetcht0 512 (%rdi) -+ -+ mov (%rsi), %rax -+ mov 8 (%rsi), %r8 -+ sub (%rdi), %rax -+ sub 8 (%rdi), %r8 -+ mov 16 (%rsi), %r9 -+ mov 24 (%rsi), %r10 -+ sub 16 (%rdi), %r9 -+ sub 24 (%rdi), %r10 -+ -+ or %r8, %rax -+ or %r9, %r10 -+ or %r10, %rax -+ -+ mov 32 (%rsi), %r8 -+ mov 40 (%rsi), %r9 -+ sub 32 (%rdi), %r8 -+ sub 40 (%rdi), %r9 -+ mov 48 (%rsi), %r10 -+ mov 56 (%rsi), %r11 -+ sub 48 (%rdi), %r10 -+ sub 56 (%rdi), %r11 -+ -+ or %r9, %r8 -+ or %r11, %r10 -+ or %r10, %r8 -+ -+ or %r8, %rax -+ jnz L(memcmp32) -+ -+ prefetcht0 576 (%rsi) -+ prefetcht0 576 (%rdi) -+ -+ mov 64 (%rsi), %rax -+ mov 72 (%rsi), %r8 -+ sub 64 (%rdi), %rax -+ sub 72 (%rdi), %r8 -+ mov 80 (%rsi), %r9 -+ mov 88 (%rsi), %r10 -+ sub 80 (%rdi), %r9 -+ sub 88 (%rdi), %r10 -+ -+ or %r8, %rax -+ or %r9, %r10 -+ or %r10, %rax -+ -+ mov 96 (%rsi), %r8 -+ mov 104 (%rsi), %r9 -+ sub 96 (%rdi), %r8 -+ sub 104 (%rdi), %r9 -+ mov 112 (%rsi), %r10 -+ mov 120 (%rsi), %r11 -+ sub 112 (%rdi), %r10 -+ sub 120 (%rdi), %r11 -+ -+ or %r9, %r8 -+ or %r11, %r10 -+ or %r10, %r8 -+ -+ or %r8, %rax -+ jnz L(memcmp32) -+ -+ sub $128, %rdx -+ dec %rcx -+ -+ lea 128 (%rsi), %rsi -+ lea 128 (%rdi), %rdi -+ -+ jnz L(memcmp128loop) -+ -+L(memcmp128skip): -+ and $127, %edx -+ jnz L(memcmp32) -+ -+ xor %eax, %eax -+ ret -+ -+END (memcmp) - - #undef bcmp - weak_alias (memcmp, bcmp) -Index: sysdeps/x86_64/strcmp.S -=================================================================== ---- sysdeps/x86_64/strcmp.S.orig -+++ sysdeps/x86_64/strcmp.S -@@ -1,2108 +1,490 @@ --/* Highly optimized version for x86-64. -- Copyright (C) 1999, 2000, 2002, 2003, 2005, 2009 -- Free Software Foundation, Inc. -- This file is part of the GNU C Library. -- Based on i686 version contributed by Ulrich Drepper -- , 1999. -- Updated with SSE2 support contributed by Intel Corporation. -- -- The GNU C Library is free software; you can redistribute it and/or -- modify it under the terms of the GNU Lesser General Public -- License as published by the Free Software Foundation; either -- version 2.1 of the License, or (at your option) any later version. -- -- The GNU C Library is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- Lesser General Public License for more details. -- -- You should have received a copy of the GNU Lesser General Public -- License along with the GNU C Library; if not, write to the Free -- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA -- 02111-1307 USA. */ - - #include - #include "asm-syntax.h" - #include "bp-sym.h" - #include "bp-asm.h" - --#undef UPDATE_STRNCMP_COUNTER -- - #ifndef LABEL - #define LABEL(l) L(l) - #endif - --#ifdef USE_AS_STRNCMP --/* Since the counter, %r11, is unsigned, we branch to strcmp_exitz -- if the new counter > the old one or is 0. */ --# define UPDATE_STRNCMP_COUNTER \ -- /* calculate left number to compare */ \ -- lea -16(%rcx, %r11), %r9; \ -- cmp %r9, %r11; \ -- jb LABEL(strcmp_exitz); \ -- test %r9, %r9; \ -- je LABEL(strcmp_exitz); \ -- mov %r9, %r11 -- --#else --# define UPDATE_STRNCMP_COUNTER -+#ifndef USE_AS_STRNCMP - # ifndef STRCMP - # define STRCMP strcmp - # endif - #endif -- --#ifndef USE_SSSE3 - .text --#else -- .section .text.ssse3,"ax",@progbits --#endif - --ENTRY (BP_SYM (STRCMP)) --#ifdef NOT_IN_libc --/* Simple version since we can't use SSE registers in ld.so. */ --L(oop): movb (%rdi), %al -- cmpb (%rsi), %al -- jne L(neq) -- incq %rdi -- incq %rsi -- testb %al, %al -- jnz L(oop) -- -- xorl %eax, %eax -- ret -- --L(neq): movl $1, %eax -- movl $-1, %ecx -- cmovbl %ecx, %eax -- ret --END (BP_SYM (STRCMP)) --#else /* NOT_IN_libc */ --/* -- * This implementation uses SSE to compare up to 16 bytes at a time. -- */ --#ifdef USE_AS_STRNCMP -- test %rdx, %rdx -- je LABEL(strcmp_exitz) -- cmp $1, %rdx -- je LABEL(Byte0) -- mov %rdx, %r11 --#endif -- mov %esi, %ecx -- mov %edi, %eax --/* Use 64bit AND here to avoid long NOP padding. */ -- and $0x3f, %rcx /* rsi alignment in cache line */ -- and $0x3f, %rax /* rdi alignment in cache line */ -- cmp $0x30, %ecx -- ja LABEL(crosscache) /* rsi: 16-byte load will cross cache line */ -- cmp $0x30, %eax -- ja LABEL(crosscache) /* rdi: 16-byte load will cross cache line */ -- movlpd (%rdi), %xmm1 -- movlpd (%rsi), %xmm2 -- movhpd 8(%rdi), %xmm1 -- movhpd 8(%rsi), %xmm2 -- pxor %xmm0, %xmm0 /* clear %xmm0 for null char checks */ -- pcmpeqb %xmm1, %xmm0 /* Any null chars? */ -- pcmpeqb %xmm2, %xmm1 /* compare first 16 bytes for equality */ -- psubb %xmm0, %xmm1 /* packed sub of comparison results*/ -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx /* if first 16 bytes are same, edx == 0xffff */ -- jnz LABEL(less16bytes) /* If not, find different value or null char */ --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) /* finish comparision */ --#endif -- add $16, %rsi /* prepare to search next 16 bytes */ -- add $16, %rdi /* prepare to search next 16 bytes */ -+ENTRY (STRCMP) # (const char *, const char *) - -- /* -- * Determine source and destination string offsets from 16-byte alignment. -- * Use relative offset difference between the two to determine which case -- * below to use. -- */ -- .p2align 4 --LABEL(crosscache): -- and $0xfffffffffffffff0, %rsi /* force %rsi is 16 byte aligned */ -- and $0xfffffffffffffff0, %rdi /* force %rdi is 16 byte aligned */ -- mov $0xffff, %edx /* for equivalent offset */ -- xor %r8d, %r8d -- and $0xf, %ecx /* offset of rsi */ -- and $0xf, %eax /* offset of rdi */ -- cmp %eax, %ecx -- je LABEL(ashr_0) /* rsi and rdi relative offset same */ -- ja LABEL(bigger) -- mov %edx, %r8d /* r8d is offset flag for exit tail */ -- xchg %ecx, %eax -- xchg %rsi, %rdi --LABEL(bigger): -- lea 15(%rax), %r9 -- sub %rcx, %r9 -- lea LABEL(unaligned_table)(%rip), %r10 -- movslq (%r10, %r9,4), %r9 -- lea (%r10, %r9), %r10 -- jmp *%r10 /* jump to corresponding case */ -- --/* -- * The following cases will be handled by ashr_0 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(0~15) n(0~15) 15(15+ n-n) ashr_0 -- */ -- .p2align 4 --LABEL(ashr_0): -- -- movdqa (%rsi), %xmm1 -- pxor %xmm0, %xmm0 /* clear %xmm0 for null char check */ -- pcmpeqb %xmm1, %xmm0 /* Any null chars? */ -- pcmpeqb (%rdi), %xmm1 /* compare 16 bytes for equality */ -- psubb %xmm0, %xmm1 /* packed sub of comparison results*/ -- pmovmskb %xmm1, %r9d -- shr %cl, %edx /* adjust 0xffff for offset */ -- shr %cl, %r9d /* adjust for 16-byte offset */ -- sub %r9d, %edx -- /* -- * edx must be the same with r9d if in left byte (16-rcx) is equal to -- * the start from (16-rax) and no null char was seen. -- */ -- jne LABEL(less32bytes) /* mismatch or null char */ -- UPDATE_STRNCMP_COUNTER -- mov $16, %rcx -- mov $16, %r9 -- pxor %xmm0, %xmm0 /* clear xmm0, may have changed above */ -- -- /* -- * Now both strings are aligned at 16-byte boundary. Loop over strings -- * checking 32-bytes per iteration. -- */ -- .p2align 4 --LABEL(loop_ashr_0): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) /* mismatch or null char seen */ -+ xor %ecx, %ecx - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -- add $16, %rcx -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -- add $16, %rcx -- jmp LABEL(loop_ashr_0) -+#ifdef USE_AS_STRNCMP // (const char *, const char *, size_t) -+ mov %r14, -8 (%rsp) -+ mov %rdx, %r14 - --/* -- * The following cases will be handled by ashr_1 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(15) n -15 0(15 +(n-15) - n) ashr_1 -- */ -- .p2align 4 --LABEL(ashr_1): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 /* Any null chars? */ -- pslldq $15, %xmm2 /* shift first string to align with second */ -- pcmpeqb %xmm1, %xmm2 /* compare 16 bytes for equality */ -- psubb %xmm0, %xmm2 /* packed sub of comparison results*/ -- pmovmskb %xmm2, %r9d -- shr %cl, %edx /* adjust 0xffff for offset */ -- shr %cl, %r9d /* adjust for 16-byte offset */ -- sub %r9d, %edx -- jnz LABEL(less32bytes) /* mismatch or null char seen */ -- movdqa (%rdi), %xmm3 -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads*/ -- mov $1, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 1(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_1): -- add $16, %r10 -- jg LABEL(nibble_ashr_1) /* cross page boundary */ -- --LABEL(gobble_ashr_1): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 /* store for next cycle */ -- --#ifndef USE_SSSE3 -- psrldq $1, %xmm3 -- pslldq $15, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $1, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -- --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ test %rdx, %rdx -+ mov %edx, %eax -+ jz LABEL(exitz) - #endif -- add $16, %rcx -- movdqa %xmm4, %xmm3 - -- add $16, %r10 -- jg LABEL(nibble_ashr_1) /* cross page boundary */ -+LABEL(aligntry): -+ mov %rsi, %r8 # align by "source" -+ and $8 - 1, %r8 # between 0 and 8 characters compared -+ jz LABEL(alignafter) - -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 /* store for next cycle */ -- --#ifndef USE_SSSE3 -- psrldq $1, %xmm3 -- pslldq $15, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $1, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+LABEL(align): -+ sub $8, %r8 - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_1) -- -- /* -- * Nibble avoids loads across page boundary. This is to avoid a potential -- * access into unmapped memory. -- */ -- .p2align 4 --LABEL(nibble_ashr_1): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char*/ -- pmovmskb %xmm0, %edx -- test $0xfffe, %edx -- jnz LABEL(ashr_1_exittail) /* find null char*/ -+ .p2align 4 -+ -+LABEL(alignloop): -+ mov (%rsi, %rcx), %al -+ mov (%rdi, %rcx), %dl - - #ifdef USE_AS_STRNCMP -- cmp $14, %r11 -- jbe LABEL(ashr_1_exittail) -+ dec %r14 -+ jl LABEL(exitafter) - #endif - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 /* substract 4K from %r10 */ -- jmp LABEL(gobble_ashr_1) -- -- /* -- * Once find null char, determine if there is a string mismatch -- * before the null char. -- */ -- .p2align 4 --LABEL(ashr_1_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $1, %xmm0 -- psrldq $1, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_2 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(14~15) n -14 1(15 +(n-14) - n) ashr_2 -- */ -- .p2align 4 --LABEL(ashr_2): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $14, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $2, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 2(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_2): -- add $16, %r10 -- jg LABEL(nibble_ashr_2) -- --LABEL(gobble_ashr_2): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $2, %xmm3 -- pslldq $14, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $2, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ cmp %dl, %al # check if same character -+ jne LABEL(exitafter) -+ test %al, %al # check if character a NUL -+ jz LABEL(exitafter) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ inc %ecx - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ inc %r8 -+ jnz LABEL(alignloop) - -- add $16, %r10 -- jg LABEL(nibble_ashr_2) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $2, %xmm3 -- pslldq $14, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $2, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ .p2align 4 - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+LABEL(alignafter): - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_2) -- -- .p2align 4 --LABEL(nibble_ashr_2): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xfffc, %edx -- jnz LABEL(ashr_2_exittail) -+ mov %r15, -32 (%rsp) -+ mov %rbp, -24 (%rsp) -+ mov %rbx, -16 (%rsp) - --#ifdef USE_AS_STRNCMP -- cmp $13, %r11 -- jbe LABEL(ashr_2_exittail) --#endif -+LABEL(pagealigntry): # page align by "destination" -+ mov $4096, %r15d # page size is 4096 -+ lea (%rdi, %rcx), %ebp -+ and $4095, %ebp # page mask -+ sub %r15d, %ebp - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_2) -- -- .p2align 4 --LABEL(ashr_2_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $2, %xmm0 -- psrldq $2, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_3 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(13~15) n -13 2(15 +(n-13) - n) ashr_3 -- */ -- .p2align 4 --LABEL(ashr_3): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $13, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $3, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 3(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_3): -- add $16, %r10 -- jg LABEL(nibble_ashr_3) -- --LABEL(gobble_ashr_3): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $3, %xmm3 -- pslldq $13, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $3, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+LABEL(64): # 64-byte -+ mov $0xfefefefefefefeff, %rbx # magic number - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ .p2align 4 - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+LABEL(64loop): -+ add $64, %ebp # check if "destination" crosses a page unevenly -+ jle LABEL(64gobble) - -- add $16, %r10 -- jg LABEL(nibble_ashr_3) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $3, %xmm3 -- pslldq $13, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $3, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ sub %r15d, %ebp -+ lea 64 (%rcx), %r8 - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ .p2align 4 - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_3) -- -- .p2align 4 --LABEL(nibble_ashr_3): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xfff8, %edx -- jnz LABEL(ashr_3_exittail) -+LABEL(64nibble): -+ mov (%rsi, %rcx), %al -+ mov (%rdi, %rcx), %dl - - #ifdef USE_AS_STRNCMP -- cmp $12, %r11 -- jbe LABEL(ashr_3_exittail) -+ dec %r14 -+ jl .exit - #endif - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_3) -- -- .p2align 4 --LABEL(ashr_3_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $3, %xmm0 -- psrldq $3, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_4 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(12~15) n -12 3(15 +(n-12) - n) ashr_4 -- */ -- .p2align 4 --LABEL(ashr_4): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $12, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $4, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 4(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_4): -- add $16, %r10 -- jg LABEL(nibble_ashr_4) -- --LABEL(gobble_ashr_4): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $4, %xmm3 -- pslldq $12, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $4, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ cmp %dl, %al # check if same character -+ jne .exit -+ test %al, %al # check if character a NUL -+ jz .exit - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ inc %ecx -+ -+ cmp %ecx, %r8d -+ ja LABEL(64nibble) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ .p2align 4 - -- add $16, %r10 -- jg LABEL(nibble_ashr_4) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $4, %xmm3 -- pslldq $12, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $4, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+LABEL(64gobble): -+ mov (%rsi, %rcx), %rax -+ mov (%rdi, %rcx), %rdx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ sub $8, %r14 -+ jl LABEL(tail) - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_4) -- -- .p2align 4 --LABEL(nibble_ashr_4): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xfff0, %edx -- jnz LABEL(ashr_4_exittail) -+ mov %rbx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 - --#ifdef USE_AS_STRNCMP -- cmp $11, %r11 -- jbe LABEL(ashr_4_exittail) --#endif -+ mov %rbx, %r9 -+ add %rdx, %r9 -+ sbb %r11, %r11 - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_4) -- -- .p2align 4 --LABEL(ashr_4_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $4, %xmm0 -- psrldq $4, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_5 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(11~15) n - 11 4(15 +(n-11) - n) ashr_5 -- */ -- .p2align 4 --LABEL(ashr_5): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $11, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $5, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 5(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_5): -- add $16, %r10 -- jg LABEL(nibble_ashr_5) -- --LABEL(gobble_ashr_5): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $5, %xmm3 -- pslldq $11, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $5, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ xor %rax, %r8 -+ or %rbx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ xor %rdx, %r9 -+ or %rbx, %r9 -+ sub %r11, %r9 -+ jnz LABEL(tail) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %rdx, %rax -+ jne LABEL(tail) - -- add $16, %r10 -- jg LABEL(nibble_ashr_5) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $5, %xmm3 -- pslldq $11, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $5, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ mov 8 (%rsi, %rcx), %rax -+ mov 8 (%rdi, %rcx), %rdx -+ add $8, %ecx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ sub $8, %r14 -+ jl LABEL(tail) - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_5) -- -- .p2align 4 --LABEL(nibble_ashr_5): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xffe0, %edx -- jnz LABEL(ashr_5_exittail) -+ mov %rbx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 - --#ifdef USE_AS_STRNCMP -- cmp $10, %r11 -- jbe LABEL(ashr_5_exittail) --#endif -+ mov %rbx, %r9 -+ add %rdx, %r9 -+ sbb %r11, %r11 - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_5) -- -- .p2align 4 --LABEL(ashr_5_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $5, %xmm0 -- psrldq $5, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_6 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(10~15) n - 10 5(15 +(n-10) - n) ashr_6 -- */ -- .p2align 4 --LABEL(ashr_6): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $10, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $6, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 6(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_6): -- add $16, %r10 -- jg LABEL(nibble_ashr_6) -- --LABEL(gobble_ashr_6): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $6, %xmm3 -- pslldq $10, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $6, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ xor %rax, %r8 -+ or %rbx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ xor %rdx, %r9 -+ or %rbx, %r9 -+ sub %r11, %r9 -+ jnz LABEL(tail) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %rdx, %rax -+ jne LABEL(tail) - -- add $16, %r10 -- jg LABEL(nibble_ashr_6) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $6, %xmm3 -- pslldq $10, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $6, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ mov 8 (%rsi, %rcx), %rax -+ mov 8 (%rdi, %rcx), %rdx -+ add $8, %ecx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ sub $8, %r14 -+ jl LABEL(tail) - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_6) -- -- .p2align 4 --LABEL(nibble_ashr_6): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xffc0, %edx -- jnz LABEL(ashr_6_exittail) -+ mov %rbx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 - --#ifdef USE_AS_STRNCMP -- cmp $9, %r11 -- jbe LABEL(ashr_6_exittail) --#endif -+ mov %rbx, %r9 -+ add %rdx, %r9 -+ sbb %r11, %r11 - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_6) -- -- .p2align 4 --LABEL(ashr_6_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $6, %xmm0 -- psrldq $6, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_7 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(9~15) n - 9 6(15 +(n - 9) - n) ashr_7 -- */ -- .p2align 4 --LABEL(ashr_7): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $9, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $7, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 7(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_7): -- add $16, %r10 -- jg LABEL(nibble_ashr_7) -- --LABEL(gobble_ashr_7): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $7, %xmm3 -- pslldq $9, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $7, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ xor %rax, %r8 -+ or %rbx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ xor %rdx, %r9 -+ or %rbx, %r9 -+ sub %r11, %r9 -+ jnz LABEL(tail) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %rdx, %rax -+ jne LABEL(tail) - -- add $16, %r10 -- jg LABEL(nibble_ashr_7) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $7, %xmm3 -- pslldq $9, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $7, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ mov 8 (%rsi, %rcx), %rax -+ mov 8 (%rdi, %rcx), %rdx -+ add $8, %ecx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ sub $8, %r14 -+ jl LABEL(tail) - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_7) -- -- .p2align 4 --LABEL(nibble_ashr_7): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xff80, %edx -- jnz LABEL(ashr_7_exittail) -+ mov %rbx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 - --#ifdef USE_AS_STRNCMP -- cmp $8, %r11 -- jbe LABEL(ashr_7_exittail) --#endif -+ mov %rbx, %r9 -+ add %rdx, %r9 -+ sbb %r11, %r11 - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_7) -- -- .p2align 4 --LABEL(ashr_7_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $7, %xmm0 -- psrldq $7, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_8 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(8~15) n - 8 7(15 +(n - 8) - n) ashr_8 -- */ -- .p2align 4 --LABEL(ashr_8): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $8, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $8, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 8(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_8): -- add $16, %r10 -- jg LABEL(nibble_ashr_8) -- --LABEL(gobble_ashr_8): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $8, %xmm3 -- pslldq $8, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $8, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ xor %rax, %r8 -+ or %rbx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ xor %rdx, %r9 -+ or %rbx, %r9 -+ sub %r11, %r9 -+ jnz LABEL(tail) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %rdx, %rax -+ jne LABEL(tail) - -- add $16, %r10 -- jg LABEL(nibble_ashr_8) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $8, %xmm3 -- pslldq $8, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $8, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ mov 8 (%rsi, %rcx), %rax -+ mov 8 (%rdi, %rcx), %rdx -+ add $8, %ecx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ sub $8, %r14 -+ jl LABEL(tail) - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_8) -- -- .p2align 4 --LABEL(nibble_ashr_8): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xff00, %edx -- jnz LABEL(ashr_8_exittail) -+ mov %rbx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 - --#ifdef USE_AS_STRNCMP -- cmp $7, %r11 -- jbe LABEL(ashr_8_exittail) --#endif -+ mov %rbx, %r9 -+ add %rdx, %r9 -+ sbb %r11, %r11 - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_8) -- -- .p2align 4 --LABEL(ashr_8_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $8, %xmm0 -- psrldq $8, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_9 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(7~15) n - 7 8(15 +(n - 7) - n) ashr_9 -- */ -- .p2align 4 --LABEL(ashr_9): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $7, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $9, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 9(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_9): -- add $16, %r10 -- jg LABEL(nibble_ashr_9) -- --LABEL(gobble_ashr_9): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $9, %xmm3 -- pslldq $7, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $9, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ xor %rax, %r8 -+ or %rbx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ xor %rdx, %r9 -+ or %rbx, %r9 -+ sub %r11, %r9 -+ jnz LABEL(tail) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %rdx, %rax -+ jne LABEL(tail) - -- add $16, %r10 -- jg LABEL(nibble_ashr_9) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $9, %xmm3 -- pslldq $7, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $9, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ mov 8 (%rsi, %rcx), %rax -+ mov 8 (%rdi, %rcx), %rdx -+ add $8, %ecx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ sub $8, %r14 -+ jl LABEL(tail) - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 /* store for next cycle */ -- jmp LABEL(loop_ashr_9) -- -- .p2align 4 --LABEL(nibble_ashr_9): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xfe00, %edx -- jnz LABEL(ashr_9_exittail) -+ mov %rbx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 - --#ifdef USE_AS_STRNCMP -- cmp $6, %r11 -- jbe LABEL(ashr_9_exittail) --#endif -+ mov %rbx, %r9 -+ add %rdx, %r9 -+ sbb %r11, %r11 - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_9) -- -- .p2align 4 --LABEL(ashr_9_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $9, %xmm0 -- psrldq $9, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_10 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(6~15) n - 6 9(15 +(n - 6) - n) ashr_10 -- */ -- .p2align 4 --LABEL(ashr_10): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $6, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $10, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 10(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_10): -- add $16, %r10 -- jg LABEL(nibble_ashr_10) -- --LABEL(gobble_ashr_10): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $10, %xmm3 -- pslldq $6, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $10, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ xor %rax, %r8 -+ or %rbx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ xor %rdx, %r9 -+ or %rbx, %r9 -+ sub %r11, %r9 -+ jnz LABEL(tail) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %rdx, %rax -+ jne LABEL(tail) - -- add $16, %r10 -- jg LABEL(nibble_ashr_10) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $10, %xmm3 -- pslldq $6, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $10, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ mov 8 (%rsi, %rcx), %rax -+ mov 8 (%rdi, %rcx), %rdx -+ add $8, %ecx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ sub $8, %r14 -+ jl LABEL(tail) - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_10) -- -- .p2align 4 --LABEL(nibble_ashr_10): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xfc00, %edx -- jnz LABEL(ashr_10_exittail) -+ mov %rbx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 - --#ifdef USE_AS_STRNCMP -- cmp $5, %r11 -- jbe LABEL(ashr_10_exittail) --#endif -+ mov %rbx, %r9 -+ add %rdx, %r9 -+ sbb %r11, %r11 - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_10) -- -- .p2align 4 --LABEL(ashr_10_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $10, %xmm0 -- psrldq $10, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_11 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(5~15) n - 5 10(15 +(n - 5) - n) ashr_11 -- */ -- .p2align 4 --LABEL(ashr_11): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $5, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $11, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 11(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_11): -- add $16, %r10 -- jg LABEL(nibble_ashr_11) -- --LABEL(gobble_ashr_11): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $11, %xmm3 -- pslldq $5, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $11, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ xor %rax, %r8 -+ or %rbx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ xor %rdx, %r9 -+ or %rbx, %r9 -+ sub %r11, %r9 -+ jnz LABEL(tail) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %rdx, %rax -+ jne LABEL(tail) - -- add $16, %r10 -- jg LABEL(nibble_ashr_11) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $11, %xmm3 -- pslldq $5, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $11, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ mov 8 (%rsi, %rcx), %rax -+ mov 8 (%rdi, %rcx), %rdx -+ add $8, %ecx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ sub $8, %r14 -+ jl LABEL(tail) - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_11) -- -- .p2align 4 --LABEL(nibble_ashr_11): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xf800, %edx -- jnz LABEL(ashr_11_exittail) -+ mov %rbx, %r8 -+ add %rax, %r8 -+ sbb %r10, %r10 - --#ifdef USE_AS_STRNCMP -- cmp $4, %r11 -- jbe LABEL(ashr_11_exittail) --#endif -+ mov %rbx, %r9 -+ add %rdx, %r9 -+ sbb %r11, %r11 - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_11) -- -- .p2align 4 --LABEL(ashr_11_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $11, %xmm0 -- psrldq $11, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_12 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(4~15) n - 4 11(15 +(n - 4) - n) ashr_12 -- */ -- .p2align 4 --LABEL(ashr_12): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $4, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $12, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 12(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_12): -- add $16, %r10 -- jg LABEL(nibble_ashr_12) -- --LABEL(gobble_ashr_12): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $12, %xmm3 -- pslldq $4, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $12, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ xor %rax, %r8 -+ or %rbx, %r8 -+ sub %r10, %r8 -+ jnz LABEL(tail) - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ xor %rdx, %r9 -+ or %rbx, %r9 -+ sub %r11, %r9 -+ jnz LABEL(tail) - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %rdx, %rax -+ jne LABEL(tail) - -- add $16, %r10 -- jg LABEL(nibble_ashr_12) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $12, %xmm3 -- pslldq $4, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $12, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ add $8, %ecx - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+ jmp LABEL(64loop) -+ -+LABEL(64after): - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_12) -- -- .p2align 4 --LABEL(nibble_ashr_12): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xf000, %edx -- jnz LABEL(ashr_12_exittail) -+LABEL(tailtry): -+# mov (%rsi, %rcx), %rax -+# mov (%rdi, %rcx), %rdx -+# add $8, %rcx - -+LABEL(tail): # byte tail - #ifdef USE_AS_STRNCMP -- cmp $3, %r11 -- jbe LABEL(ashr_12_exittail) -+ add $7, %r14 - #endif - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_12) -- -- .p2align 4 --LABEL(ashr_12_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $12, %xmm0 -- psrldq $12, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_13 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(3~15) n - 3 12(15 +(n - 3) - n) ashr_13 -- */ -- .p2align 4 --LABEL(ashr_13): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $3, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $13, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 13(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_13): -- add $16, %r10 -- jg LABEL(nibble_ashr_13) -- --LABEL(gobble_ashr_13): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $13, %xmm3 -- pslldq $3, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $13, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ cmp %dl, %al # check if same character -+ jne .exit -+ test %al, %al # check if character a NUL -+ jz .exit -+ -+ shr $8, %rax -+ shr $8, %rdx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ dec %r14 -+ jl .exit - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %dl, %al -+ jne .exit -+ test %al, %al -+ jz .exit - -- add $16, %r10 -- jg LABEL(nibble_ashr_13) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $13, %xmm3 -- pslldq $3, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $13, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ shr $8, %rax -+ shr $8, %rdx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ dec %r14 -+ jl .exit - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_13) -- -- .p2align 4 --LABEL(nibble_ashr_13): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xe000, %edx -- jnz LABEL(ashr_13_exittail) -+ cmp %dl, %al -+ jne .exit -+ test %al, %al -+ jz .exit -+ -+ shr $8, %rax -+ shr $8, %rdx - - #ifdef USE_AS_STRNCMP -- cmp $2, %r11 -- jbe LABEL(ashr_13_exittail) -+ dec %r14 -+ jl .exit - #endif - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_13) -- -- .p2align 4 --LABEL(ashr_13_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $13, %xmm0 -- psrldq $13, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_14 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(2~15) n - 2 13(15 +(n - 2) - n) ashr_14 -- */ -- .p2align 4 --LABEL(ashr_14): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $2, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $14, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 14(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_14): -- add $16, %r10 -- jg LABEL(nibble_ashr_14) -- --LABEL(gobble_ashr_14): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $14, %xmm3 -- pslldq $2, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $14, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ cmp %dl, %al -+ jne .exit -+ test %al, %al -+ jz .exit -+ -+ shr $8, %rax -+ shr $8, %rdx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ dec %r14 -+ jl .exit - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %dl, %al -+ jne .exit -+ test %al, %al -+ jz .exit - -- add $16, %r10 -- jg LABEL(nibble_ashr_14) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $14, %xmm3 -- pslldq $2, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $14, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ shr $8, %eax -+ shr $8, %edx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ dec %r14 -+ jl .exit - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_14) -- -- .p2align 4 --LABEL(nibble_ashr_14): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0xc000, %edx -- jnz LABEL(ashr_14_exittail) -+ cmp %dl, %al -+ jne .exit -+ test %al, %al -+ jz .exit -+ -+ shr $8, %eax -+ shr $8, %edx - - #ifdef USE_AS_STRNCMP -- cmp $1, %r11 -- jbe LABEL(ashr_14_exittail) -+ dec %r14 -+ jl .exit - #endif - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_14) -- -- .p2align 4 --LABEL(ashr_14_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $14, %xmm0 -- psrldq $14, %xmm3 -- jmp LABEL(aftertail) -- --/* -- * The following cases will be handled by ashr_15 -- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case -- * n(1~15) n - 1 14(15 +(n - 1) - n) ashr_15 -- */ -- .p2align 4 --LABEL(ashr_15): -- pxor %xmm0, %xmm0 -- movdqa (%rdi), %xmm2 -- movdqa (%rsi), %xmm1 -- pcmpeqb %xmm1, %xmm0 -- pslldq $1, %xmm2 -- pcmpeqb %xmm1, %xmm2 -- psubb %xmm0, %xmm2 -- pmovmskb %xmm2, %r9d -- shr %cl, %edx -- shr %cl, %r9d -- sub %r9d, %edx -- jnz LABEL(less32bytes) -- -- movdqa (%rdi), %xmm3 -- -- UPDATE_STRNCMP_COUNTER -- -- pxor %xmm0, %xmm0 -- mov $16, %rcx /* index for loads */ -- mov $15, %r9d /* byte position left over from less32bytes case */ -- /* -- * Setup %r10 value allows us to detect crossing a page boundary. -- * When %r10 goes positive we have crossed a page boundary and -- * need to do a nibble. -- */ -- lea 15(%rdi), %r10 -- and $0xfff, %r10 /* offset into 4K page */ -- -- sub $0x1000, %r10 /* subtract 4K pagesize */ -- -- .p2align 4 --LABEL(loop_ashr_15): -- add $16, %r10 -- jg LABEL(nibble_ashr_15) -- --LABEL(gobble_ashr_15): -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $15, %xmm3 -- pslldq $1, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $15, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ cmp %dl, %al -+ jne .exit -+ test %al, %al -+ jz .exit -+ -+ shr $8, %eax -+ shr $8, %edx - - #ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) -+ dec %r14 -+ jl .exit - #endif - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -+ cmp %dl, %al -+ jne .exit -+# test %al, %al -+# jz .exit - -- add $16, %r10 -- jg LABEL(nibble_ashr_15) /* cross page boundary */ -- -- movdqa (%rsi, %rcx), %xmm1 -- movdqa (%rdi, %rcx), %xmm2 -- movdqa %xmm2, %xmm4 -- --#ifndef USE_SSSE3 -- psrldq $15, %xmm3 -- pslldq $1, %xmm2 -- por %xmm3, %xmm2 /* merge into one 16byte value */ --#else -- palignr $15, %xmm3, %xmm2 /* merge into one 16byte value */ --#endif -- -- pcmpeqb %xmm1, %xmm0 -- pcmpeqb %xmm2, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- sub $0xffff, %edx -- jnz LABEL(exit) -+ .p2align 4,, 15 - --#ifdef USE_AS_STRNCMP -- sub $16, %r11 -- jbe LABEL(strcmp_exitz) --#endif -+LABEL(tailafter): - -- add $16, %rcx -- movdqa %xmm4, %xmm3 -- jmp LABEL(loop_ashr_15) -- -- .p2align 4 --LABEL(nibble_ashr_15): -- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */ -- pmovmskb %xmm0, %edx -- test $0x8000, %edx -- jnz LABEL(ashr_15_exittail) -+.exit: -+ mov -32 (%rsp), %r15 -+ mov -24 (%rsp), %rbp -+ mov -16 (%rsp), %rbx - -+ .p2align 4,, 3 -+ -+LABEL(exitafter): - #ifdef USE_AS_STRNCMP -- test %r11, %r11 -- je LABEL(ashr_15_exittail) -+ test %r14, %r14 -+ cmovl %edx, %eax - #endif - -- pxor %xmm0, %xmm0 -- sub $0x1000, %r10 -- jmp LABEL(gobble_ashr_15) -- -- .p2align 4 --LABEL(ashr_15_exittail): -- movdqa (%rsi, %rcx), %xmm1 -- psrldq $15, %xmm3 -- psrldq $15, %xmm0 -- -- .p2align 4 --LABEL(aftertail): -- pcmpeqb %xmm3, %xmm1 -- psubb %xmm0, %xmm1 -- pmovmskb %xmm1, %edx -- not %edx -- -- .p2align 4 --LABEL(exit): -- lea -16(%r9, %rcx), %rax /* locate the exact offset for rdi */ --LABEL(less32bytes): -- lea (%rdi, %rax), %rdi /* locate the exact address for first operand(rdi) */ -- lea (%rsi, %rcx), %rsi /* locate the exact address for second operand(rsi) */ -- test %r8d, %r8d -- jz LABEL(ret) -- xchg %rsi, %rdi /* recover original order according to flag(%r8d) */ -- -- .p2align 4 --LABEL(ret): --LABEL(less16bytes): -- bsf %rdx, %rdx /* find and store bit index in %rdx */ -+ movzx %al, %eax -+ movzx %dl, %edx -+ sub %eax, %edx -+ xchg %edx, %eax - - #ifdef USE_AS_STRNCMP -- sub %rdx, %r11 -- jbe LABEL(strcmp_exitz) -+LABEL(exitz): -+ mov -8 (%rsp), %r14 - #endif -- movzbl (%rsi, %rdx), %ecx -- movzbl (%rdi, %rdx), %eax -- -- sub %ecx, %eax -- ret -+ ret - --LABEL(strcmp_exitz): -- xor %eax, %eax -- ret -- -- .p2align 4 --LABEL(Byte0): -- movzx (%rsi), %ecx -- movzx (%rdi), %eax -- -- sub %ecx, %eax -- ret --END (BP_SYM (STRCMP)) -- -- .section .rodata,"a",@progbits -- .p2align 3 --LABEL(unaligned_table): -- .int LABEL(ashr_1) - LABEL(unaligned_table) -- .int LABEL(ashr_2) - LABEL(unaligned_table) -- .int LABEL(ashr_3) - LABEL(unaligned_table) -- .int LABEL(ashr_4) - LABEL(unaligned_table) -- .int LABEL(ashr_5) - LABEL(unaligned_table) -- .int LABEL(ashr_6) - LABEL(unaligned_table) -- .int LABEL(ashr_7) - LABEL(unaligned_table) -- .int LABEL(ashr_8) - LABEL(unaligned_table) -- .int LABEL(ashr_9) - LABEL(unaligned_table) -- .int LABEL(ashr_10) - LABEL(unaligned_table) -- .int LABEL(ashr_11) - LABEL(unaligned_table) -- .int LABEL(ashr_12) - LABEL(unaligned_table) -- .int LABEL(ashr_13) - LABEL(unaligned_table) -- .int LABEL(ashr_14) - LABEL(unaligned_table) -- .int LABEL(ashr_15) - LABEL(unaligned_table) -- .int LABEL(ashr_0) - LABEL(unaligned_table) --#endif /* NOT_IN_libc */ -+END (strcmp) - libc_hidden_builtin_def (STRCMP) -Index: sysdeps/x86_64/memcpy.S -=================================================================== ---- sysdeps/x86_64/memcpy.S.orig -+++ sysdeps/x86_64/memcpy.S -@@ -39,7 +39,7 @@ - - .text - --#if defined PIC && !defined NOT_IN_libc -+#if defined PIC && !defined NOT_IN_libc && !defined USE_AS_BCOPY - ENTRY (__memcpy_chk) - - cmpq %rdx, %rcx diff --git a/glibc-fini-unwind.diff b/glibc-fini-unwind.diff new file mode 100644 index 0000000..e2da629 --- /dev/null +++ b/glibc-fini-unwind.diff @@ -0,0 +1,60 @@ +Index: sysdeps/x86_64/elf/initfini.c +=================================================================== +--- sysdeps/x86_64/elf/initfini.c.orig 2004-08-16 06:50:55.000000000 +0200 ++++ sysdeps/x86_64/elf/initfini.c 2010-04-16 16:41:11.000000000 +0200 +@@ -44,6 +44,25 @@ + * crtn.s puts the corresponding function epilogues + in the .init and .fini sections. */ + ++/* The unwind annotation for _fini is peculiar for good reasons: ++ (a) We need a real function that isn't constructed separately ++ (i.e. one which has a .size directive) in order to attach unwind ++ info to it. Hence _fini is a wrapper around _real_fini, the ++ former being a normal function, the latter being the first ++ instruction of the traditional _fini. ++ (b) We must not fiddle with the stack pointer in _real_fini, ++ as we wouldn't be able to describe the effects in unwind info ++ (c) some versions of GCC have no correct unwind info for ++ __do_global_dtors_aux, meaning they can't properly restore %rbp ++ (unwinding through it is possible but later up when we next ++ need %rbp we can't access it anymore) ++ Therefore we save/restore it in _fini for uses later up the call chain. ++ But we don't make the CFA use that register (that would lead to ++ the above problem) ++ (d) We want an 16-aligned stack pointer at _real_fini. Because of (a) ++ we can't align it in _real_fini, hence we do it in the caller by ++ subtracting 8, making in 8mod16 which the call then make 0mod16 ++ again. */ + __asm__ ("\n\ + #include \"defs.h\"\n\ + \n\ +@@ -88,16 +107,28 @@ _init:\n\ + .globl _fini\n\ + .type _fini,@function\n\ + _fini:\n\ ++ .cfi_startproc\n\ ++ push %rbp\n\ ++ .cfi_def_cfa_offset 16\n\ ++ .cfi_offset 6,-16\n\ + subq $8, %rsp\n\ ++ .cfi_def_cfa_offset 24\n\ ++ call _real_fini\n\ ++ addq $8, %rsp\n\ ++ .cfi_def_cfa_offset 16\n\ ++ pop %rbp\n\ ++ ret\n\ ++ .cfi_endproc\n\ + ALIGN\n\ + END_FINI\n\ ++.size _fini, .-_fini\n\ ++_real_fini:\n\ + \n\ + /*@_fini_PROLOG_ENDS*/\n\ + call i_am_not_a_leaf@PLT\n\ + \n\ + /*@_fini_EPILOG_BEGINS*/\n\ + .section .fini\n\ +- addq $8, %rsp\n\ + ret\n\ + END_FINI\n\ + \n\ diff --git a/glibc-gconvcache-s390.diff b/glibc-gconvcache-s390.diff new file mode 100644 index 0000000..93d58c7 --- /dev/null +++ b/glibc-gconvcache-s390.diff @@ -0,0 +1,22 @@ +Index: glibc/sysdeps/s390/s390-64/Makefile +=================================================================== +--- sysdeps/s390/s390-64/Makefile 2009-08-03 10:18:31.000000000 +0200 ++++ sysdeps/s390/s390-64/Makefile 2010-04-07 10:01:35.000000000 +0200 +@@ -74,5 +74,17 @@ $(objpfx)gconv-modules-s390: gconv-modul + + $(inst_gconvdir)/gconv-modules: $(objpfx)gconv-modules-s390 $(+force) + $(do-install) ++ifeq (no,$(cross-compiling)) ++# Update the $(prefix)/lib/gconv/gconv-modules.cache file. This is necessary ++# if this libc has more gconv modules than the previously installed one. ++ if test -f "$(inst_gconvdir)/gconv-modules.cache"; then \ ++ LC_ALL=C LANGUAGE=C \ ++ $(common-objpfx)elf/ld.so --library-path $(rpath-link) \ ++ $(common-objpfx)iconv/iconvconfig \ ++ $(addprefix --prefix=,$(install_root)); \ ++ fi ++else ++ @echo '*@*@*@ You should recreate $(inst_gconvdir)/gconv-modules.cache' ++endif + + endif diff --git a/glibc-malloc-arena-max.diff b/glibc-malloc-arena-max.diff new file mode 100644 index 0000000..24b91dc --- /dev/null +++ b/glibc-malloc-arena-max.diff @@ -0,0 +1,235 @@ +Allow M_ARENA_MAX / MALLOC_ARENA_MAX limit even with PER_THREAD disabled + +With the new PER_THREAD compile-time option, the allocator also offers +a way to limit the total number of arenas using MALLOC_ARENA_MAX +environment variable or mallopt(M_ARENA_MAX). + +In principle, this feature is not tied to the PER_THREAD option. This +patch makes it possible to use it even with the default compilation +settings. + +One motivation to limit the number of arenas may be libhugetlbfs users +that rely on its __morecore hook providing hugetlbfs-backed memory for +the allocator - this can work only with a single arena and multi-threaded +programs wishing to use this feature need a way to limit the allocator +to a single arena. Another motivation is avoiding pathological behavior +in extremely thread-intensive applications. + + +2011-02-04 Petr Baudis + + * malloc/arena.c: Define and manage narenas even ifndef + PER_THREAD. + * malloc/arena.c (ptmalloc_init_minimal): Likewise. + * malloc/arena.c (_int_new_arena): Likewise. + * malloc/arena.c (ptmalloc_init): Implement MALLOC_ARENA_MAX + even ifndef PER_THREAD. + * malloc/arena.c (reused_arena): Split off get_narenas_limit(), + define even ifndef PER_THREAD. + * malloc/arena.c (arena_get2): Adjust for get_narenas_limit() + split, call reused_arena even ifndef PER_THREAD. + * malloc/hooks.c (public_gET_STATe): Set arena_max, narenas + even ifndef PER_THREAD. + * malloc/hooks.c (public_sET_STATe): Likewise. + * malloc/malloc.c (malloc_par): Define arena_max even ifndef + PER_THREAD. + * malloc/malloc.c (mALLOPt): Implement M_ARENA_MAX even ifndef + PER_THREAD. + * malloc/malloc.c: Remove redundant M_* defines. + +diff --git a/malloc/arena.c b/malloc/arena.c +index 4d0deef..ea80724 100644 +--- a/malloc/arena.c ++++ b/malloc/arena.c +@@ -78,8 +78,8 @@ extern int sanity_check_heap_info_alignment[(sizeof (heap_info) + + static tsd_key_t arena_key; + static mutex_t list_lock; +-#ifdef PER_THREAD + static size_t narenas; ++#ifdef PER_THREAD + static mstate free_list; + #endif + +@@ -416,8 +416,8 @@ ptmalloc_init_minimal (void) + #ifdef PER_THREAD + # define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8)) + mp_.arena_test = NARENAS_FROM_NCORES (1); +- narenas = 1; + #endif ++ narenas = 1; + } + + +@@ -574,10 +574,8 @@ ptmalloc_init (void) + { + if (memcmp (envline, "MMAP_MAX_", 9) == 0) + mALLOPt(M_MMAP_MAX, atoi(&envline[10])); +-#ifdef PER_THREAD + else if (memcmp (envline, "ARENA_MAX", 9) == 0) + mALLOPt(M_ARENA_MAX, atoi(&envline[10])); +-#endif + } + break; + #ifdef PER_THREAD +@@ -946,9 +944,9 @@ _int_new_arena(size_t size) + atomic_write_barrier (); + main_arena.next = a; + +-#ifdef PER_THREAD + ++narenas; + ++#ifdef PER_THREAD + (void)mutex_unlock(&list_lock); + #endif + +@@ -982,13 +980,9 @@ get_free_list (void) + return result; + } + +- +-static mstate +-reused_arena (void) ++static int ++get_narenas_limit (void) __attribute__((pure)) + { +- if (narenas <= mp_.arena_test) +- return NULL; +- + static int narenas_limit; + if (narenas_limit == 0) + { +@@ -1006,10 +1000,16 @@ reused_arena (void) + narenas_limit = NARENAS_FROM_NCORES (2); + } + } ++ return narenas_limit; ++} ++#endif + +- if (narenas < narenas_limit) +- return NULL; + ++/* Reuse and return one of the existing arenas; if all arenas are busy, ++ * pick one in a round-robin fashion and block until it becomes available. */ ++static mstate ++reused_arena (void) ++{ + mstate result; + static mstate next_to_use; + if (next_to_use == NULL) +@@ -1035,7 +1035,6 @@ reused_arena (void) + + return result; + } +-#endif + + static mstate + internal_function +@@ -1048,10 +1047,15 @@ arena_get2(a_tsd, size) mstate a_tsd; size_t size; + mstate a; + + #ifdef PER_THREAD +- if ((a = get_free_list ()) == NULL +- && (a = reused_arena ()) == NULL) +- /* Nothing immediately available, so generate a new arena. */ +- a = _int_new_arena(size); ++ if ((a = get_free_list ()) == NULL) ++ { ++ if (narenas > mp_.arena_test && narenas >= get_narenas_limit()) ++ a = reused_arena (); ++ else ++ /* Nothing immediately available, but we can still generate more ++ * arenas, so get a new one. */ ++ a = _int_new_arena(size); ++ } + #else + if(!a_tsd) + a = a_tsd = &main_arena; +@@ -1093,8 +1097,14 @@ arena_get2(a_tsd, size) mstate a_tsd; size_t size; + goto repeat; + } + +- /* Nothing immediately available, so generate a new arena. */ +- a = _int_new_arena(size); ++ if (__builtin_expect(mp_.arena_max > 0, 0) && narenas >= mp_.arena_max) ++ /* Try again, this time blocking in case we are still unable to find ++ * a free arena. */ ++ a = reused_arena(); ++ else ++ /* Nothing immediately available, so generate a new arena. */ ++ a = _int_new_arena(size); ++ + (void)mutex_unlock(&list_lock); + #endif + +diff --git a/malloc/hooks.c b/malloc/hooks.c +index 28845ee..e938492 100644 +--- a/malloc/hooks.c ++++ b/malloc/hooks.c +@@ -579,9 +579,9 @@ public_gET_STATe(void) + ms->max_fast = get_max_fast(); + #ifdef PER_THREAD + ms->arena_test = mp_.arena_test; ++#endif + ms->arena_max = mp_.arena_max; + ms->narenas = narenas; +-#endif + (void)mutex_unlock(&main_arena.mutex); + return (Void_t*)ms; + } +@@ -683,9 +683,9 @@ public_sET_STATe(Void_t* msptr) + if (ms->version >= 4) { + #ifdef PER_THREAD + mp_.arena_test = ms->arena_test; ++#endif + mp_.arena_max = ms->arena_max; + narenas = ms->narenas; +-#endif + } + check_malloc_state(&main_arena); + +diff --git a/malloc/malloc.c b/malloc/malloc.c +index b1d43c6..8dbadfa 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -2406,9 +2406,10 @@ struct malloc_par { + INTERNAL_SIZE_T top_pad; + INTERNAL_SIZE_T mmap_threshold; + #ifdef PER_THREAD ++ /* Lower bound for arena_max. */ + INTERNAL_SIZE_T arena_test; +- INTERNAL_SIZE_T arena_max; + #endif ++ INTERNAL_SIZE_T arena_max; + + /* Memory map support */ + int n_mmaps; +@@ -2446,13 +2447,6 @@ static struct malloc_state main_arena; + static struct malloc_par mp_; + + +-#ifdef PER_THREAD +-/* Non public mallopt parameters. */ +-#define M_ARENA_TEST -7 +-#define M_ARENA_MAX -8 +-#endif +- +- + /* Maximum size of memory handled in fastbins. */ + static INTERNAL_SIZE_T global_max_fast; + +@@ -6095,12 +6089,12 @@ int mALLOPt(param_number, value) int param_number; int value; + if (value > 0) + mp_.arena_test = value; + break; ++#endif + + case M_ARENA_MAX: + if (value > 0) + mp_.arena_max = value; + break; +-#endif + } + (void)mutex_unlock(&av->mutex); + return res; diff --git a/glibc-nss-deepbind.diff b/glibc-nss-deepbind.diff deleted file mode 100644 index 671f52f..0000000 --- a/glibc-nss-deepbind.diff +++ /dev/null @@ -1,26 +0,0 @@ -Use DEEPBIND to load the nss modules. Helps thunderbird (linked against its -own version of the ldap libs) when using nss_ldap (linked against system -libldap) leading to crashes due to incompatibilities. - -This has a downside: Linking against libraries overriding malloc() and free() -will break (unless the malloc()'d pointers by glibc are free()able by these). -This is fixable in principle, just needs some work. - -See https://bugzilla.novell.com/show_bug.cgi?id=157078 and -http://sourceware.org/bugzilla/show_bug.cgi?id=6610 - -Index: nss/nsswitch.c -=================================================================== ---- nss/nsswitch.c.orig -+++ nss/nsswitch.c -@@ -361,7 +361,9 @@ __nss_lookup_function (service_user *ni, - ".so"), - __nss_shlib_revision); - -- ni->library->lib_handle = __libc_dlopen (shlib_name); -+ ni->library->lib_handle -+ = __libc_dlopen_mode (shlib_name, -+ RTLD_LAZY | __RTLD_DLOPEN | RTLD_DEEPBIND); - if (ni->library->lib_handle == NULL) - { - /* Failed to load the library. */ diff --git a/glibc-sparc64-fxstat.diff b/glibc-sparc64-fxstat.diff deleted file mode 100644 index fa06e5a..0000000 --- a/glibc-sparc64-fxstat.diff +++ /dev/null @@ -1,7 +0,0 @@ -Index: glibc-2.10.1/sysdeps/unix/sysv/linux/sparc/sparc64/fxstat.c -=================================================================== ---- glibc-2.10.1.orig/sysdeps/unix/sysv/linux/sparc/sparc64/fxstat.c -+++ glibc-2.10.1/sysdeps/unix/sysv/linux/sparc/sparc64/fxstat.c -@@ -1 +1 @@ --#include "../../fxstat.c" -+#include "../../i386/fxstat.c" diff --git a/glibc-vfprintf-positional.diff b/glibc-vfprintf-positional.diff new file mode 100644 index 0000000..9947188 --- /dev/null +++ b/glibc-vfprintf-positional.diff @@ -0,0 +1,20 @@ +2011-01-27 Petr Baudis + + * stdio-common/vfprintf.c (vfprintf): Pass correct newlen + to extend_alloca(). + +diff --git a/stdio-common/vfprintf.c b/stdio-common/vfprintf.c +index fc370e8..ecf5dfa 100644 +--- a/stdio-common/vfprintf.c ++++ b/stdio-common/vfprintf.c +@@ -1682,7 +1682,9 @@ do_positional: + { + /* Extend the array of format specifiers. */ + struct printf_spec *old = specs; +- specs = extend_alloca (specs, nspecs_max, 2 * nspecs_max); ++ specs = extend_alloca (specs, nspecs_max, ++ 2 * nspecs_max ++ * sizeof (struct printf_spec)); + + /* Copy the old array's elements to the new space. */ + memmove (specs, old, nspecs * sizeof (struct printf_spec)); diff --git a/glibc.changes b/glibc.changes index adc4b56..0fa266b 100644 --- a/glibc.changes +++ b/glibc.changes @@ -1,3 +1,31 @@ +------------------------------------------------------------------- +Fri Feb 4 00:46:40 CET 2011 - pbaudis@suse.cz + +- Upgrade to latest release/2.11/master - glibc-2.11.3-b72646ad0c41 + - Random assortion of bugfixes, some #defines for new kernels +- Retired patches: + - glibc-2.11.3-bnc658509.diff + - glibc-2.11.3-bso12397.diff + - glibc-sparc64-fxstat.diff +- Remove the NSS hack of opening modules using RTLD_DEEPBIND. + This was useful for nss_ldap, since some applications used a different + LDAP library with clashing symbol names. However, it also created + many headaches, especially with the NSS modules not respecting + malloc() overrides. Now, sssd is used by default for LDAP resolutions + and we can therefore safely get rid of the hack. [bnc#477061] +- Remove the currently disabled AMD string function overrides. + Benchmarking did not unearth any differences that would make + convincing case for keeping the functions with all the associated + maintenance headaches; AMD does not recommend keeping their custom + versions of the functions either. +- Introduce MALLOC_ARENA_MAX and M_ARENA_MAX support [bnc#659090] +- Fixed stack unwinding past glibc _fini function (proper showing + of destructor backtraces) [bnc#585879] +- Fix gconv cache generation on s390 [bnc#592944] +- Add missing iconvconfig for refreshing gconv.cache to glibc-locale %post +- Fixed stack corruption in *printf() with large number of positional + specifiers [bnc#666179] + ------------------------------------------------------------------- Fri Jan 28 14:53:35 UTC 2011 - rguenther@novell.com diff --git a/glibc.spec b/glibc.spec index a7373d4..5b40911 100644 --- a/glibc.spec +++ b/glibc.spec @@ -68,7 +68,7 @@ Release: 3 Url: http://www.gnu.org/software/libc/libc.html PreReq: filesystem BuildRoot: %{_tmppath}/%{name}-%{version}-build -Source: glibc-%{version}.tar.bz2 +Source: glibc-%{version}-b72646ad0c41.tar.bz2 Source2: glibc-ports-2.10.1-2b2b217196.tar.bz2 Source3: noversion.tar.bz2 Source4: manpages.tar.bz2 @@ -98,51 +98,49 @@ Patch9: glibc-2.3-regcomp.diff Patch10: glibc-2.3.2-revert_tcsetattr.diff Patch11: glibc-2.3.1.localedef.diff Patch12: glibc-2.3.2.no_archive.diff -Patch13: glibc-2.3.3-amd64-string.diff -Patch14: libm-x86-64.diff.bz2 -Patch15: glibc-2.3.90-bindresvport.blacklist.diff -Patch16: glibc-suse-note.diff -Patch17: glibc-2.4.90-no_NO.diff -Patch18: glibc-2.3.90-ld.so-madvise.diff -Patch19: glibc-2.3.3-amd64-s_ceil.diff -Patch20: glibc-2.3.3-execstack.diff -Patch21: glibc-2.4-china.diff -Patch22: glibc-2.3.4-gb18030-big5hkscs.diff.bz2 -Patch23: glibc-2.4.90-nscd.diff -Patch24: glibc-2.3.3-nscd-db-path.diff -Patch25: glibc-2.3.5-nscd-zeronegtimeout.diff -Patch26: glibc-2.3.90-langpackdir.diff -Patch27: glibc-nptl-2.4-nofixsyscallnr.diff -Patch30: glibc-2.6-configure.diff -Patch31: glibc-2.2-sunrpc.diff -Patch32: glibc-2.8-getconf.diff -Patch33: getaddrinfo-ipv6-sanity.diff -Patch35: ppc-atomic.diff -Patch36: glibc-2.8-clone.diff -Patch37: glibc-nss-deepbind.diff -Patch39: glibc-compiled-binaries.diff -Patch40: glibc-selinux.diff -Patch41: glibc-check-native-missing-include.diff -Patch42: glibc-no-unwind-tables.diff -Patch43: glibc-2.10-nscd-nostack.diff -Patch44: glibc-cpusetsize.diff -Patch46: glibc-2.10.99-ia64-include.diff -Patch47: libm-x86-64-exceptions.diff -Patch48: glibc-uio-cell.diff -Patch54: glibc-statfs64-ia64.diff -Patch60: ld-prelink-unique.diff -Patch61: glibc-ppc64-vdso-time.diff -Patch64: glibc-gai-private4.diff -Patch65: glibc-resolv-mdnshint.diff -Patch69: glibc-nscd-hconf.diff +Patch13: libm-x86-64.diff.bz2 +Patch14: glibc-2.3.90-bindresvport.blacklist.diff +Patch15: glibc-suse-note.diff +Patch16: glibc-2.4.90-no_NO.diff +Patch17: glibc-2.3.90-ld.so-madvise.diff +Patch18: glibc-2.3.3-amd64-s_ceil.diff +Patch19: glibc-2.3.3-execstack.diff +Patch20: glibc-2.4-china.diff +Patch21: glibc-2.3.4-gb18030-big5hkscs.diff.bz2 +Patch22: glibc-2.4.90-nscd.diff +Patch23: glibc-2.3.3-nscd-db-path.diff +Patch24: glibc-2.3.5-nscd-zeronegtimeout.diff +Patch25: glibc-2.3.90-langpackdir.diff +Patch26: glibc-nptl-2.4-nofixsyscallnr.diff +Patch27: glibc-2.6-configure.diff +Patch28: glibc-2.2-sunrpc.diff +Patch29: glibc-2.8-getconf.diff +Patch30: getaddrinfo-ipv6-sanity.diff +Patch31: ppc-atomic.diff +Patch32: glibc-2.8-clone.diff +Patch33: glibc-compiled-binaries.diff +Patch34: glibc-selinux.diff +Patch35: glibc-check-native-missing-include.diff +Patch36: glibc-no-unwind-tables.diff +Patch37: glibc-2.10-nscd-nostack.diff +Patch38: glibc-cpusetsize.diff +Patch39: glibc-2.10.99-ia64-include.diff +Patch40: libm-x86-64-exceptions.diff +Patch41: glibc-uio-cell.diff +Patch42: glibc-statfs64-ia64.diff +Patch43: ld-prelink-unique.diff +Patch44: glibc-ppc64-vdso-time.diff +Patch45: glibc-gai-private4.diff +Patch46: glibc-resolv-mdnshint.diff +Patch47: glibc-nscd-hconf.diff +Patch48: glibc-malloc-arena-max.diff +Patch49: glibc-fini-unwind.diff +Patch50: glibc-gconvcache-s390.diff +Patch51: glibc-vfprintf-positional.diff Patch500: ARM_glibc-2.10.1-local-eabi-wchar.diff Patch501: ARM_glibc-2.10.1-local-hwcap-updates.diff Patch502: ARM_glibc-2.10.1-local-lowlevellock.diff Patch503: ARM_glibc-2.10.1-local-no-hwcap.diff -# http://sources.redhat.com/bugzilla/show_bug.cgi?id=11155 -Patch600: glibc-sparc64-fxstat.diff -Patch601: glibc-2.11.3-bnc658509.diff -Patch602: glibc-2.11.3-bso12397.diff %description The GNU C Library provides the most important standard libraries used @@ -311,12 +309,10 @@ versions of your software. %patch10 %patch11 %patch12 -#%patch13 -# strncmp.S triggers amd64 assembler bug [bnc#540647] -#rm sysdeps/x86_64/strncmp.S -%patch14 -E +%patch13 -E # We have s_sincos.c in patch13, remove duplicate rm sysdeps/x86_64/fpu/s_sincos.S +%patch14 %patch15 %patch16 %patch17 @@ -324,48 +320,47 @@ rm sysdeps/x86_64/fpu/s_sincos.S %patch19 %patch20 %patch21 -%patch22 # avoid changing nscd_stat.c mtime to avoid code generation # differences on each rebuild touch -r nscd/nscd_stat.c nscd/s-stamp +%patch22 %patch23 %patch24 -%patch25 touch -r nscd/s-stamp nscd/nscd_stat.c rm nscd/s-stamp +%patch25 %patch26 %patch27 +%patch28 +%patch29 %patch30 %patch31 %patch32 %patch33 +%patch34 %patch35 %patch36 %patch37 +%patch38 %patch39 %patch40 -%patch41 +%patch41 -p1 %patch42 -%patch43 -%patch44 -%patch46 -%patch47 +%patch43 -p1 +%patch44 -p1 +%patch45 +%patch46 -p1 +%patch47 -p1 %patch48 -p1 -%patch54 -%patch60 -p1 -%patch61 -p1 -%patch64 -%patch65 -p1 -%patch69 -p1 +%patch49 +%patch50 +%patch51 -p1 %ifarch %arm armv5tel armv7l %patch500 %patch501 %patch502 %patch503 %endif -%patch600 -p1 -%patch601 -%patch602 # # Inconsistency detected by ld.so: dl-close.c: 719: _dl_close: Assertion `map->l_init_called' failed! # @@ -782,6 +777,7 @@ for l in /usr/share/locale/locale.alias %{_libdir}/gconv/gconv-modules; do echo "###X# The following is autogenerated from extra files in the .d directory:" >>"$l" cat "$l.d"/* >>"$l" done +/usr/sbin/iconvconfig %post info %install_info --info-dir=%{_infodir} %{_infodir}/libc.info.gz diff --git a/ready b/ready new file mode 100644 index 0000000..473a0f4