SHA256
3
0
forked from pool/glibc
glibc/glibc-2.3.3-amd64-string.diff

4435 lines
100 KiB
Diff
Raw Normal View History

Index: sysdeps/x86_64/dl-machine.h
===================================================================
--- sysdeps/x86_64/dl-machine.h.orig
+++ sysdeps/x86_64/dl-machine.h
@@ -226,6 +226,40 @@ dl_platform_init (void)
if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
/* Avoid an empty string which would disturb us. */
GLRO(dl_platform) = NULL;
+
+ long int t1, t2;
+ t1 = 0;
+ t2 = 0;
+
+ asm (
+ "mov $0x80000000, %%eax # get highest level of support\n\t"
+ "cpuid\n\t"
+ "cmp $0x80000006, %%eax # check for support of cache info\n\t"
+ "jb 1f\n\t"
+ "mov $0x80000005, %%eax # get L1 info\n\t"
+ "cpuid\n\t"
+ "shr $24, %%ecx\n\t"
+ "shl $10, %%ecx\n\t"
+ "mov %%rcx, %0\n\t"
+ "mov $0x80000006, %%eax # get L2 info\n\t"
+ "cpuid\n\t"
+ "shr $16, %%ecx\n\t"
+ "shl $10, %%ecx\n\t"
+ "mov %%rcx, %1\n\t"
+ "1:\n\t"
+ :"=r" (t1), "=r" (t2) :: "%rbx", "%rax", "%rcx", "%rdx"
+ );
+
+ if (t1)
+ {
+ GLRO(dl_cache1size) = t1;
+ GLRO(dl_cache1sizehalf) = t1 / 2;
+ }
+ if (t2)
+ {
+ GLRO(dl_cache2size) = t2;
+ GLRO(dl_cache2sizehalf) = t2 / 2;
+ }
}
static inline Elf64_Addr
Index: sysdeps/x86_64/Makefile
===================================================================
--- sysdeps/x86_64/Makefile.orig
+++ sysdeps/x86_64/Makefile
@@ -4,7 +4,8 @@ long-double-fcts = yes
ifeq ($(subdir),csu)
sysdep_routines += hp-timing
elide-routines.os += hp-timing
-gen-as-const-headers += link-defines.sym
+# get offset to rtld_global._dl_*
+gen-as-const-headers += link-defines.sym rtld-global-offsets.sym
endif
ifeq ($(subdir),gmon)
Index: sysdeps/x86_64/strcpy.S
===================================================================
--- sysdeps/x86_64/strcpy.S.orig
+++ sysdeps/x86_64/strcpy.S
@@ -1,159 +1,833 @@
-/* strcpy/stpcpy implementation for x86-64.
- Copyright (C) 2002 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Andreas Jaeger <aj@suse.de>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <sysdep.h>
-#include "asm-syntax.h"
-#include "bp-sym.h"
-#include "bp-asm.h"
+# $Header: /K8_Projects/Glibc/amd64strcpy.S 7 2/12/04 19:06 Emenezes $
-#ifndef USE_AS_STPCPY
+# (c) 2002 Advanced Micro Devices, Inc.
+# YOUR USE OF THIS CODE IS SUBJECT TO THE TERMS
+# AND CONDITIONS OF THE GNU LESSER GENERAL PUBLIC
+# LICENSE FOUND IN THE "README" FILE THAT IS
+# INCLUDED WITH THIS FILE
+
+#include "sysdep.h"
+#include <rtld-global-offsets.h>
+
+ /* XXX: strncpy is broken, just use this for strcpy for now. */
+#ifdef PIC
+ .globl _rtld_local_ro
+ .hidden _rtld_local_ro
+ .set _rtld_local_ro,_rtld_global_ro
+#endif
+#ifndef STRCPY
# define STRCPY strcpy
#endif
+#define LABEL(s) L(strcpy##s)
+
+ .text
+
+ENTRY (STRCPY) # (char *, const char *)
+
+#ifdef USE_AS_STRNCPY // (char *, const char *, size_t)
+ test %rdx, %rdx # (char *, const char *, size_t)
+ mov %rdx, %r11
+ jz LABEL(exitn) # early exit
+#endif
+
+ xor %edx, %edx
+
+LABEL(aligntry):
+ mov %rsi, %r8 # align by source
+ and $7, %r8
+ jz LABEL(alignafter)
+
+LABEL(align): # 8-byte align
+ sub $8, %r8
- .text
-ENTRY (BP_SYM (STRCPY))
- movq %rsi, %rcx /* Source register. */
- andl $7, %ecx /* mask alignment bits */
- movq %rdi, %rdx /* Duplicate destination pointer. */
-
- jz 5f /* aligned => start loop */
-
- neg %ecx /* We need to align to 8 bytes. */
- addl $8,%ecx
- /* Search the first bytes directly. */
-0:
- movb (%rsi), %al /* Fetch a byte */
- testb %al, %al /* Is it NUL? */
- movb %al, (%rdx) /* Store it */
- jz 4f /* If it was NUL, done! */
- incq %rsi
- incq %rdx
- decl %ecx
- jnz 0b
-
-5:
- movq $0xfefefefefefefeff,%r8
-
- /* Now the sources is aligned. Unfortunatly we cannot force
- to have both source and destination aligned, so ignore the
- alignment of the destination. */
.p2align 4
-1:
- /* 1st unroll. */
- movq (%rsi), %rax /* Read double word (8 bytes). */
- addq $8, %rsi /* Adjust pointer for next word. */
- movq %rax, %r9 /* Save a copy for NUL finding. */
- addq %r8, %r9 /* add the magic value to the word. We get
- carry bits reported for each byte which
- is *not* 0 */
- jnc 3f /* highest byte is NUL => return pointer */
- xorq %rax, %r9 /* (word+magic)^word */
- orq %r8, %r9 /* set all non-carry bits */
- incq %r9 /* add 1: if one carry bit was *not* set
- the addition will not result in 0. */
-
- jnz 3f /* found NUL => return pointer */
-
- movq %rax, (%rdx) /* Write value to destination. */
- addq $8, %rdx /* Adjust pointer. */
-
- /* 2nd unroll. */
- movq (%rsi), %rax /* Read double word (8 bytes). */
- addq $8, %rsi /* Adjust pointer for next word. */
- movq %rax, %r9 /* Save a copy for NUL finding. */
- addq %r8, %r9 /* add the magic value to the word. We get
- carry bits reported for each byte which
- is *not* 0 */
- jnc 3f /* highest byte is NUL => return pointer */
- xorq %rax, %r9 /* (word+magic)^word */
- orq %r8, %r9 /* set all non-carry bits */
- incq %r9 /* add 1: if one carry bit was *not* set
- the addition will not result in 0. */
-
- jnz 3f /* found NUL => return pointer */
-
- movq %rax, (%rdx) /* Write value to destination. */
- addq $8, %rdx /* Adjust pointer. */
-
- /* 3rd unroll. */
- movq (%rsi), %rax /* Read double word (8 bytes). */
- addq $8, %rsi /* Adjust pointer for next word. */
- movq %rax, %r9 /* Save a copy for NUL finding. */
- addq %r8, %r9 /* add the magic value to the word. We get
- carry bits reported for each byte which
- is *not* 0 */
- jnc 3f /* highest byte is NUL => return pointer */
- xorq %rax, %r9 /* (word+magic)^word */
- orq %r8, %r9 /* set all non-carry bits */
- incq %r9 /* add 1: if one carry bit was *not* set
- the addition will not result in 0. */
-
- jnz 3f /* found NUL => return pointer */
-
- movq %rax, (%rdx) /* Write value to destination. */
- addq $8, %rdx /* Adjust pointer. */
-
- /* 4th unroll. */
- movq (%rsi), %rax /* Read double word (8 bytes). */
- addq $8, %rsi /* Adjust pointer for next word. */
- movq %rax, %r9 /* Save a copy for NUL finding. */
- addq %r8, %r9 /* add the magic value to the word. We get
- carry bits reported for each byte which
- is *not* 0 */
- jnc 3f /* highest byte is NUL => return pointer */
- xorq %rax, %r9 /* (word+magic)^word */
- orq %r8, %r9 /* set all non-carry bits */
- incq %r9 /* add 1: if one carry bit was *not* set
- the addition will not result in 0. */
-
- jnz 3f /* found NUL => return pointer */
-
- movq %rax, (%rdx) /* Write value to destination. */
- addq $8, %rdx /* Adjust pointer. */
- jmp 1b /* Next iteration. */
- /* Do the last few bytes. %rax contains the value to write.
- The loop is unrolled twice. */
+LABEL(alignloop):
+#ifdef USE_AS_STRNCPY
+ dec %r11
+ jl LABEL(exitn)
+#endif
+
+ mov (%rsi, %rdx), %al # check if same character
+ test %al, %al # check if character a NUL
+ mov %al, (%rdi, %rdx)
+ jz LABEL(exit)
+
+ inc %edx
+ inc %r8
+ jnz LABEL(alignloop)
+
.p2align 4
+
+LABEL(alignafter):
+
+LABEL(8try):
+ mov $0xfefefefefefefeff, %rcx
+
+LABEL(8): # 8-byte
+ mov (%rsi, %rdx), %rax
+
+LABEL(8loop):
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+LABEL(8after):
+
+LABEL(64try):
+#ifdef PIC
+ mov _rtld_local_ro@GOTPCREL(%rip), %r8
+ mov RTLD_GLOBAL_DL_CACHE1SIZEHALF(%r8), %r9
+#else
+ mov _dl_cache1sizehalf, %r9
+#endif
+
+
+LABEL(64): # 64-byte
+
+ .p2align 4
+
+LABEL(64loop):
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ cmp %r9, %rdx
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ lea 8 (%rdx), %rdx
+
+ jbe LABEL(64loop)
+
+LABEL(64after):
+
+LABEL(pretry):
+#ifdef PIC
+ mov _rtld_local_ro@GOTPCREL(%rip), %r8
+ mov RTLD_GLOBAL_DL_CACHE2SIZEHALF(%r8), %r9
+#else
+ mov _dl_cache2sizehalf, %r9
+#endif
+
+LABEL(pre): # 64-byte prefetch
+
+ .p2align 4
+
+LABEL(preloop):
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ mov %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %edx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
+
+ cmp %r9, %rdx
+
+ mov %rax, (%rdi, %rdx)
+ prefetcht0 512 + 8 (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ prefetcht0 512 + 8 (%rsi, %rdx)
+ lea 8 (%rdx), %rdx
+
+ jb LABEL(preloop)
+
+ .p2align 4
+
+LABEL(preafter):
+
+LABEL(NTtry):
+ sfence
+
+LABEL(NT): # 64-byte NT
+
+ .p2align 4
+
+LABEL(NTloop):
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(NTtail)
+
+ movnti %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %rdx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(NTtail)
+
+ movnti %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %rdx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(NTtail)
+
+ movnti %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %rdx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(NTtail)
+
+ movnti %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %rdx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(NTtail)
+
+ movnti %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %rdx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(NTtail)
+
+ movnti %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %rdx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(NTtail)
+
+ movnti %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ add $8, %rdx
+
+#ifdef USE_AS_STRNCPY
+ sub $8, %r11
+ jl LABEL(tail)
+#endif
+
+ mov %rcx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
+
+ xor %rax, %r8
+ or %rcx, %r8
+ sub %r10, %r8
+ jnz LABEL(NTtail)
+
+ movnti %rax, (%rdi, %rdx)
+ mov 8 (%rsi, %rdx), %rax
+ prefetchnta 768 + 8 (%rsi, %rdx)
+ add $8, %rdx
+
+ jmp LABEL(NTloop)
+
+ .p2align 4
+
+LABEL(NTtail):
+ sfence
+
+ .p2align 4
+
+LABEL(NTafter):
+
+LABEL(tailtry):
+
+LABEL(tail): # 1-byte tail
+#ifdef USE_AS_STRNCPY
+ add $8, %r11
+#endif
+
+ .p2align 4
+
+LABEL(tailloop):
+#ifdef USE_AS_STRNCPY
+ dec %r11
+ jl LABEL(exitn)
+#endif
+
+ test %al, %al
+ mov %al, (%rdi, %rdx)
+ jz LABEL(exit)
+
+ inc %rdx
+
+#ifdef USE_AS_STRNCPY
+ dec %r11
+ jl LABEL(exitn)
+
+ mov %ah, %al
+#endif
+
+ test %ah, %ah
+ mov %ah, (%rdi, %rdx)
+ jz LABEL(exit)
+
+ inc %rdx
+
+#ifdef USE_AS_STRNCPY
+ dec %r11
+ jl LABEL(exitn)
+#endif
+
+ shr $16, %rax
+
+ test %al, %al
+ mov %al, (%rdi, %rdx)
+ jz LABEL(exit)
+
+ inc %rdx
+
+#ifdef USE_AS_STRNCPY
+ dec %r11
+ jl LABEL(exitn)
+
+ mov %ah, %al
+#endif
+
+ test %ah, %ah
+ mov %ah, (%rdi, %rdx)
+ jz LABEL(exit)
+
+ shr $16, %rax
+ inc %rdx
+
+ jmp LABEL(tailloop)
+
+ .p2align 4
+
+LABEL(tailafter):
+
+LABEL(exit):
+#ifdef USE_AS_STRNCPY
+ test %r11, %r11
+ mov %r11, %rcx
+
+#ifdef USE_AS_STPCPY
+ lea (%rdi, %rdx), %r8
+#else
+ mov %rdi, %r8
+#endif
+
+ jz 2f
+
+ xor %eax, %eax # bzero () would do too, but usually there are only a handfull of bytes left
+ shr $3, %rcx
+ lea 1 (%rdi, %rdx), %rdi
+ jz 1f
+
+ rep stosq
+
+1:
+ mov %r11d, %ecx
+ and $7, %ecx
+ jz 2f
+
+ .p2align 4,, 3
+
3:
- /* Note that stpcpy needs to return with the value of the NUL
- byte. */
- movb %al, (%rdx) /* 1st byte. */
- testb %al, %al /* Is it NUL. */
- jz 4f /* yes, finish. */
- incq %rdx /* Increment destination. */
- movb %ah, (%rdx) /* 2nd byte. */
- testb %ah, %ah /* Is it NUL?. */
- jz 4f /* yes, finish. */
- incq %rdx /* Increment destination. */
- shrq $16, %rax /* Shift... */
- jmp 3b /* and look at next two bytes in %rax. */
+ dec %ecx
+ mov %al, (%rdi, %rcx)
+ jnz 3b
+
+ .p2align 4,, 3
+
+2:
+ mov %r8, %rax
+ ret
+
+#endif
+
+ .p2align 4
-4:
+LABEL(exitn):
#ifdef USE_AS_STPCPY
- movq %rdx, %rax /* Destination is return value. */
+ lea (%rdi, %rdx), %rax
#else
- movq %rdi, %rax /* Source is return value. */
+ mov %rdi, %rax
#endif
- retq
-END (BP_SYM (STRCPY))
-#ifndef USE_AS_STPCPY
-libc_hidden_builtin_def (strcpy)
+
+ ret
+
+END (STRCPY)
+#if !defined USE_AS_STPCPY && !defined USE_AS_STRNCPY
+libc_hidden_builtin_def (STRCPY)
#endif
Index: sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c
===================================================================
--- sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c.orig
+++ sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c
@@ -1,5 +1,5 @@
#ifdef IS_IN_ldconfig
# include <sysdeps/i386/dl-procinfo.c>
#else
-# include <sysdeps/generic/dl-procinfo.c>
+# include <sysdeps/x86_64/dl-procinfo.c>
#endif
Index: sysdeps/x86_64/dl-procinfo.c
===================================================================
--- /dev/null
+++ sysdeps/x86_64/dl-procinfo.c
@@ -0,0 +1,108 @@
+/* Data for x86-64 version of processor capability information.
+ Copyright (C) 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Andreas Jaeger <aj@suse.de>, 2004.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This information must be kept in sync with the _DL_HWCAP_COUNT and
+ _DL_PLATFORM_COUNT definitions in procinfo.h.
+
+ If anything should be added here check whether the size of each string
+ is still ok with the given array size.
+
+ All the #ifdefs in the definitions ar equite irritating but
+ necessary if we want to avoid duplicating the information. There
+ are three different modes:
+
+ - PROCINFO_DECL is defined. This means we are only interested in
+ declarations.
+
+ - PROCINFO_DECL is not defined:
+
+ + if SHARED is defined the file is included in an array
+ initializer. The .element = { ... } syntax is needed.
+
+ + if SHARED is not defined a normal array initialization is
+ needed.
+ */
+
+#ifndef PROCINFO_CLASS
+#define PROCINFO_CLASS
+#endif
+
+ /* _dl_cache1size: size of L1 cache */
+#if !defined PROCINFO_DECL && defined SHARED
+ ._dl_cache1size
+#else
+PROCINFO_CLASS long int _dl_cache1size
+#endif
+#ifndef PROCINFO_DECL
+= 1024 * 64
+#endif
+#if !defined SHARED || defined PROCINFO_DECL
+;
+#else
+,
+#endif
+
+ /* _dl_cache1sizehalf: 1/2 size of L1 cache */
+#if !defined PROCINFO_DECL && defined SHARED
+ ._dl_cache1sizehalf
+#else
+PROCINFO_CLASS long int _dl_cache1sizehalf
+#endif
+#ifndef PROCINFO_DECL
+= 1024 * 64 / 2
+#endif
+#if !defined SHARED || defined PROCINFO_DECL
+;
+#else
+,
+#endif
+
+ /* _dl_cache2size: size of L2 cache */
+#if !defined PROCINFO_DECL && defined SHARED
+ ._dl_cache2size
+#else
+PROCINFO_CLASS long int _dl_cache2size
+#endif
+#ifndef PROCINFO_DECL
+= 1024 * 1024
+#endif
+#if !defined SHARED || defined PROCINFO_DECL
+;
+#else
+,
+#endif
+
+ /* _dl_cache2size: 1/2 size of L2 cache */
+#if !defined PROCINFO_DECL && defined SHARED
+ ._dl_cache2sizehalf
+#else
+PROCINFO_CLASS long int _dl_cache2sizehalf
+#endif
+#ifndef PROCINFO_DECL
+= 1024 * 1024 / 2
+#endif
+#if !defined SHARED || defined PROCINFO_DECL
+;
+#else
+,
+#endif
+
+#undef PROCINFO_DECL
+#undef PROCINFO_CLASS
Index: sysdeps/x86_64/elf/rtld-global-offsets.sym
===================================================================
--- /dev/null
+++ sysdeps/x86_64/elf/rtld-global-offsets.sym
@@ -0,0 +1,10 @@
+#define SHARED 1
+
+#include <ldsodefs.h>
+
+#define rtdl_global_offsetof(mem) offsetof (struct rtld_global_ro, mem)
+
+RTLD_GLOBAL_DL_CACHE1SIZE rtdl_global_offsetof (_dl_cache1size)
+RTLD_GLOBAL_DL_CACHE1SIZEHALF rtdl_global_offsetof (_dl_cache1sizehalf)
+RTLD_GLOBAL_DL_CACHE2SIZE rtdl_global_offsetof (_dl_cache2size)
+RTLD_GLOBAL_DL_CACHE2SIZEHALF rtdl_global_offsetof (_dl_cache2sizehalf)
Index: sysdeps/x86_64/memcmp.S
===================================================================
--- sysdeps/x86_64/memcmp.S.orig
+++ sysdeps/x86_64/memcmp.S
@@ -1,358 +1,442 @@
-/* memcmp with SSE2
- Copyright (C) 2009 Free Software Foundation, Inc.
- Contributed by Intel Corporation.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <sysdep.h>
-
- .text
-ENTRY (memcmp)
- test %rdx, %rdx
- jz L(finz)
- cmpq $1, %rdx
- jle L(finr1b)
- subq %rdi, %rsi
- movq %rdx, %r10
- cmpq $32, %r10
- jge L(gt32)
- /* Handle small chunks and last block of less than 32 bytes. */
-L(small):
- testq $1, %r10
- jz L(s2b)
- movzbl (%rdi), %eax
- movzbl (%rdi, %rsi), %edx
- subq $1, %r10
- je L(finz1)
- addq $1, %rdi
- subl %edx, %eax
- jnz L(exit)
-L(s2b):
- testq $2, %r10
- jz L(s4b)
- movzwl (%rdi), %eax
- movzwl (%rdi, %rsi), %edx
- subq $2, %r10
- je L(fin2_7)
- addq $2, %rdi
- cmpl %edx, %eax
- jnz L(fin2_7)
-L(s4b):
- testq $4, %r10
- jz L(s8b)
- movl (%rdi), %eax
- movl (%rdi, %rsi), %edx
- subq $4, %r10
- je L(fin2_7)
- addq $4, %rdi
- cmpl %edx, %eax
- jnz L(fin2_7)
-L(s8b):
- testq $8, %r10
- jz L(s16b)
- movq (%rdi), %rax
- movq (%rdi, %rsi), %rdx
- subq $8, %r10
- je L(fin2_7)
- addq $8, %rdi
- cmpq %rdx, %rax
- jnz L(fin2_7)
-L(s16b):
- movdqu (%rdi), %xmm1
- movdqu (%rdi, %rsi), %xmm0
- pcmpeqb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- xorl %eax, %eax
- subl $0xffff, %edx
- jz L(finz)
- bsfl %edx, %ecx
- leaq (%rdi, %rcx), %rcx
- movzbl (%rcx), %eax
- movzbl (%rsi, %rcx), %edx
- jmp L(finz1)
-
- .p2align 4,, 4
-L(finr1b):
- movzbl (%rdi), %eax
- movzbl (%rsi), %edx
-L(finz1):
- subl %edx, %eax
-L(exit):
- ret
-
- .p2align 4,, 4
-L(fin2_7):
- cmpq %rdx, %rax
- jz L(finz)
- movq %rax, %r11
- subq %rdx, %r11
- bsfq %r11, %rcx
- sarq $3, %rcx
- salq $3, %rcx
- sarq %cl, %rax
- movzbl %al, %eax
- sarq %cl, %rdx
- movzbl %dl, %edx
- subl %edx, %eax
- ret
-
- .p2align 4,, 4
-L(finz):
- xorl %eax, %eax
- ret
-
- /* For blocks bigger than 32 bytes
- 1. Advance one of the addr pointer to be 16B aligned.
- 2. Treat the case of both addr pointers aligned to 16B
- separately to avoid movdqu.
- 3. Handle any blocks of greater than 64 consecutive bytes with
- unrolling to reduce branches.
- 4. At least one addr pointer is 16B aligned, use memory version
- of pcmbeqb.
- */
- .p2align 4,, 4
-L(gt32):
- movq %rdx, %r11
- addq %rdi, %r11
- movq %rdi, %r8
-
- andq $15, %r8
- jz L(16am)
- /* Both pointers may be misaligned. */
- movdqu (%rdi), %xmm1
- movdqu (%rdi, %rsi), %xmm0
- pcmpeqb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- subl $0xffff, %edx
- jnz L(neq)
- neg %r8
- leaq 16(%rdi, %r8), %rdi
-L(16am):
- /* Handle two 16B aligned pointers separately. */
- testq $15, %rsi
- jz L(ATR)
- testq $16, %rdi
- jz L(A32)
- movdqu (%rdi, %rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-L(A32):
- movq %r11, %r10
- andq $-32, %r10
- cmpq %r10, %rdi
- jge L(mt16)
- /* Pre-unroll to be ready for unrolled 64B loop. */
- testq $32, %rdi
- jz L(A64)
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
-L(A64):
- movq %r11, %r10
- andq $-64, %r10
- cmpq %r10, %rdi
- jge L(mt32)
-
-L(A64main):
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- cmpq %rdi, %r10
- jne L(A64main)
-
-L(mt32):
- movq %r11, %r10
- andq $-32, %r10
- cmpq %r10, %rdi
- jge L(mt16)
-
-L(A32main):
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- cmpq %rdi, %r10
- jne L(A32main)
-L(mt16):
- subq %rdi, %r11
- je L(finz)
- movq %r11, %r10
- jmp L(small)
-
- .p2align 4,, 4
-L(neq):
- bsfl %edx, %ecx
- movzbl (%rdi, %rcx), %eax
- addq %rdi, %rsi
- movzbl (%rsi,%rcx), %edx
- jmp L(finz1)
-
- .p2align 4,, 4
-L(ATR):
- movq %r11, %r10
- andq $-32, %r10
- cmpq %r10, %rdi
- jge L(mt16)
- testq $16, %rdi
- jz L(ATR32)
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
- cmpq %rdi, %r10
- je L(mt16)
-
-L(ATR32):
- movq %r11, %r10
- andq $-64, %r10
- testq $32, %rdi
- jz L(ATR64)
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
-L(ATR64):
- cmpq %rdi, %r10
- je L(mt32)
-
-L(ATR64main):
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
- cmpq %rdi, %r10
- jne L(ATR64main)
-
- movq %r11, %r10
- andq $-32, %r10
- cmpq %r10, %rdi
- jge L(mt16)
-
-L(ATR32res):
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- cmpq %r10, %rdi
- jne L(ATR32res)
-
- subq %rdi, %r11
- je L(finz)
- movq %r11, %r10
- jmp L(small)
- /* Align to 16byte to improve instruction fetch. */
- .p2align 4,, 4
-END(memcmp)
+# $Header: /K8_Projects/Glibc/amd64memcmp.S 4 10/06/03 10:57 Emenezes $
+
+# (c) 2002 Advanced Micro Devices, Inc.
+# YOUR USE OF THIS CODE IS SUBJECT TO THE TERMS
+# AND CONDITIONS OF THE GNU LESSER GENERAL PUBLIC
+# LICENSE FOUND IN THE "README" FILE THAT IS
+# INCLUDED WITH THIS FILE
+
+#include "sysdep.h"
+#include <rtld-global-offsets.h>
+
+#ifdef PIC
+ .globl _rtld_local_ro
+ .hidden _rtld_local_ro
+ .set _rtld_local_ro,_rtld_global_ro
+#endif
+
+ .text
+
+ENTRY (memcmp) # (const void *, const void*, size_t)
+
+L(memcmptry1):
+ cmp $8, %rdx
+ jae L(memcmp1after)
+
+L(memcmp1): # 1-byte
+ test %rdx, %rdx
+ mov $0, %eax
+ jz L(memcmpexit)
+
+L(memcmp1loop):
+ movzbl (%rdi), %eax
+ movzbl (%rsi), %ecx
+ sub %ecx, %eax
+ jnz L(memcmpexit)
+
+ dec %rdx
+
+ lea 1 (%rdi), %rdi
+ lea 1 (%rsi), %rsi
+
+ jnz L(memcmp1loop)
+
+L(memcmpexit):
+ rep
+ ret
+
+ .p2align 4
+
+L(memcmp1after):
+
+L(memcmp8try):
+ cmp $32, %rdx
+ jae L(memcmp8after)
+
+L(memcmp8): # 8-byte
+ mov %edx, %ecx
+ shr $3, %ecx
+ jz L(memcmp1)
+
+ .p2align 4
+
+L(memcmp8loop):
+ mov (%rsi), %rax
+ cmp (%rdi), %rax
+ jne L(memcmp1)
+
+ sub $8, %rdx
+ dec %ecx
+
+ lea 8 (%rsi), %rsi
+ lea 8 (%rdi), %rdi
+
+ jnz L(memcmp8loop)
+
+L(memcmp8skip):
+ and $7, %edx
+ jnz L(memcmp1)
+
+ xor %eax, %eax
+ ret
+
+ .p2align 4
+
+L(memcmp8after):
+
+L(memcmp32try):
+ cmp $2048, %rdx
+ ja L(memcmp32after)
+
+L(memcmp32): # 32-byte
+ mov %edx, %ecx
+ shr $5, %ecx
+ jz L(memcmp8)
+
+ .p2align 4
+
+L(memcmp32loop):
+ mov (%rsi), %rax
+ mov 8 (%rsi), %r8
+ mov 16 (%rsi), %r9
+ mov 24 (%rsi), %r10
+ sub (%rdi), %rax
+ sub 8 (%rdi), %r8
+ sub 16 (%rdi), %r9
+ sub 24 (%rdi), %r10
+
+ or %rax, %r8
+ or %r9, %r10
+ or %r8, %r10
+ jnz L(memcmp8)
+
+ sub $32, %rdx
+ dec %ecx
+
+ lea 32 (%rsi), %rsi
+ lea 32 (%rdi), %rdi
+
+ jnz L(memcmp32loop)
+
+L(memcmp32skip):
+ and $31, %edx
+ jnz L(memcmp8)
+
+ xor %eax, %eax
+ ret
+
+ .p2align 4
+
+L(memcmp32after):
+
+#ifdef PIC
+ mov _rtld_local_ro@GOTPCREL(%rip), %r8
+ mov RTLD_GLOBAL_DL_CACHE1SIZEHALF(%r8), %r9
+#else
+ mov _dl_cache1sizehalf, %r9
+#endif
+ prefetcht0 (%r9)
+
+
+.alignsrctry:
+ mov %esi, %r8d # align by source
+
+ and $7, %r8d
+ jz .alignsrcafter # not unaligned
+
+.alignsrc: # align
+ lea -8 (%r8, %rdx), %rdx
+ sub $8, %r8d
+
+# .p2align 4
+
+.alignsrcloop:
+ movzbl (%rdi), %eax
+ movzbl (%rsi), %ecx
+ sub %ecx, %eax
+ jnz L(memcmpexit)
+
+ inc %r8d
+
+ lea 1 (%rdi), %rdi
+ lea 1 (%rsi), %rsi
+
+ jnz .alignsrcloop
+
+ .p2align 4
+
+.alignsrcafter:
+
+
+L(memcmp64try):
+#ifdef PIC
+ mov _rtld_local_ro@GOTPCREL(%rip), %r8
+ mov RTLD_GLOBAL_DL_CACHE1SIZEHALF(%r8), %rcx
+#else
+ mov _dl_cache1sizehalf, %rcx
+#endif
+ cmp %rdx, %rcx
+ cmova %rdx, %rcx
+
+L(memcmp64): # 64-byte
+ shr $6, %rcx
+ jz L(memcmp32)
+
+ .p2align 4
+
+L(memcmp64loop):
+ mov (%rsi), %rax
+ mov 8 (%rsi), %r8
+ sub (%rdi), %rax
+ sub 8 (%rdi), %r8
+ or %r8, %rax
+
+ mov 16 (%rsi), %r9
+ mov 24 (%rsi), %r10
+ sub 16 (%rdi), %r9
+ sub 24 (%rdi), %r10
+ or %r10, %r9
+
+ or %r9, %rax
+ jnz L(memcmp32)
+
+ mov 32 (%rsi), %rax
+ mov 40 (%rsi), %r8
+ sub 32 (%rdi), %rax
+ sub 40 (%rdi), %r8
+ or %r8, %rax
+
+ mov 48 (%rsi), %r9
+ mov 56 (%rsi), %r10
+ sub 48 (%rdi), %r9
+ sub 56 (%rdi), %r10
+ or %r10, %r9
+
+ or %r9, %rax
+ jnz L(memcmp32)
+
+ lea 64 (%rsi), %rsi
+ lea 64 (%rdi), %rdi
+
+ sub $64, %rdx
+ dec %rcx
+ jnz L(memcmp64loop)
+
+# .p2align 4
+
+L(memcmp64skip):
+ cmp $2048, %rdx
+ ja L(memcmp64after)
+
+ test %edx, %edx
+ jnz L(memcmp32)
+
+ xor %eax, %eax
+ ret
+
+ .p2align 4
+
+L(memcmp64after):
+
+L(memcmppretry):
+
+L(memcmppre): # 64-byte prefetching
+#ifdef PIC
+ mov _rtld_local_ro@GOTPCREL(%rip), %r8
+ mov RTLD_GLOBAL_DL_CACHE2SIZEHALF(%r8), %rcx
+#else
+ mov _dl_cache2sizehalf, %rcx
+#endif
+ cmp %rdx, %rcx
+ cmova %rdx, %rcx
+
+ shr $6, %rcx
+ jz L(memcmppreskip)
+
+ prefetcht0 512 (%rsi)
+ prefetcht0 512 (%rdi)
+
+ mov (%rsi), %rax
+ mov 8 (%rsi), %r9
+ mov 16 (%rsi), %r10
+ mov 24 (%rsi), %r11
+ sub (%rdi), %rax
+ sub 8 (%rdi), %r9
+ sub 16 (%rdi), %r10
+ sub 24 (%rdi), %r11
+
+ or %r9, %rax
+ or %r11, %r10
+ or %r10, %rax
+ jnz L(memcmp32)
+
+ mov 32 (%rsi), %rax
+ mov 40 (%rsi), %r9
+ mov 48 (%rsi), %r10
+ mov 56 (%rsi), %r11
+ sub 32 (%rdi), %rax
+ sub 40 (%rdi), %r9
+ sub 48 (%rdi), %r10
+ sub 56 (%rdi), %r11
+
+ or %r9, %rax
+ or %r11, %r10
+ or %r10, %rax
+ jnz L(memcmp32)
+
+ lea 64 (%rsi), %rsi
+ lea 64 (%rdi), %rdi
+
+ sub $64, %rdx
+ dec %rcx
+
+ .p2align 4
+
+L(memcmppreloop):
+ prefetcht0 512 (%rsi)
+ prefetcht0 512 (%rdi)
+
+ mov (%rsi), %rax
+ mov 8 (%rsi), %r9
+ mov 16 (%rsi), %r10
+ mov 24 (%rsi), %r11
+ sub (%rdi), %rax
+ sub 8 (%rdi), %r9
+ sub 16 (%rdi), %r10
+ sub 24 (%rdi), %r11
+
+ or %r9, %rax
+ or %r11, %r10
+ or %r10, %rax
+ jnz L(memcmp32)
+
+ mov 32 (%rsi), %rax
+ mov 40 (%rsi), %r9
+ mov 48 (%rsi), %r10
+ mov 56 (%rsi), %r11
+ sub 32 (%rdi), %rax
+ sub 40 (%rdi), %r9
+ sub 48 (%rdi), %r10
+ sub 56 (%rdi), %r11
+
+ or %r9, %rax
+ or %r11, %r10
+ or %r10, %rax
+ jnz L(memcmp32)
+
+ lea 64 (%rsi), %rsi
+ lea 64 (%rdi), %rdi
+
+ sub $64, %rdx
+ dec %rcx
+ jnz L(memcmppreloop)
+
+# .p2align 4
+
+L(memcmppreskip):
+ cmp $2048, %rdx
+ ja L(memcmppreafter)
+
+ test %edx, %edx
+ jnz L(memcmp32)
+
+ xor %eax, %eax
+ ret
+
+ .p2align 4
+
+L(memcmppreafter):
+
+L(memcmp128try):
+
+L(memcmp128): # 128-byte
+ mov %rdx, %rcx
+ shr $7, %rcx
+ jz L(memcmp128skip)
+
+ .p2align 4
+
+L(memcmp128loop):
+ prefetcht0 512 (%rsi)
+ prefetcht0 512 (%rdi)
+
+ mov (%rsi), %rax
+ mov 8 (%rsi), %r8
+ sub (%rdi), %rax
+ sub 8 (%rdi), %r8
+ mov 16 (%rsi), %r9
+ mov 24 (%rsi), %r10
+ sub 16 (%rdi), %r9
+ sub 24 (%rdi), %r10
+
+ or %r8, %rax
+ or %r9, %r10
+ or %r10, %rax
+
+ mov 32 (%rsi), %r8
+ mov 40 (%rsi), %r9
+ sub 32 (%rdi), %r8
+ sub 40 (%rdi), %r9
+ mov 48 (%rsi), %r10
+ mov 56 (%rsi), %r11
+ sub 48 (%rdi), %r10
+ sub 56 (%rdi), %r11
+
+ or %r9, %r8
+ or %r11, %r10
+ or %r10, %r8
+
+ or %r8, %rax
+ jnz L(memcmp32)
+
+ prefetcht0 576 (%rsi)
+ prefetcht0 576 (%rdi)
+
+ mov 64 (%rsi), %rax
+ mov 72 (%rsi), %r8
+ sub 64 (%rdi), %rax
+ sub 72 (%rdi), %r8
+ mov 80 (%rsi), %r9
+ mov 88 (%rsi), %r10
+ sub 80 (%rdi), %r9
+ sub 88 (%rdi), %r10
+
+ or %r8, %rax
+ or %r9, %r10
+ or %r10, %rax
+
+ mov 96 (%rsi), %r8
+ mov 104 (%rsi), %r9
+ sub 96 (%rdi), %r8
+ sub 104 (%rdi), %r9
+ mov 112 (%rsi), %r10
+ mov 120 (%rsi), %r11
+ sub 112 (%rdi), %r10
+ sub 120 (%rdi), %r11
+
+ or %r9, %r8
+ or %r11, %r10
+ or %r10, %r8
+
+ or %r8, %rax
+ jnz L(memcmp32)
+
+ sub $128, %rdx
+ dec %rcx
+
+ lea 128 (%rsi), %rsi
+ lea 128 (%rdi), %rdi
+
+ jnz L(memcmp128loop)
+
+L(memcmp128skip):
+ and $127, %edx
+ jnz L(memcmp32)
+
+ xor %eax, %eax
+ ret
+
+END (memcmp)
#undef bcmp
weak_alias (memcmp, bcmp)
Index: sysdeps/x86_64/strcmp.S
===================================================================
--- sysdeps/x86_64/strcmp.S.orig
+++ sysdeps/x86_64/strcmp.S
@@ -1,2108 +1,490 @@
-/* Highly optimized version for x86-64.
- Copyright (C) 1999, 2000, 2002, 2003, 2005, 2009
- Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Based on i686 version contributed by Ulrich Drepper
- <drepper@cygnus.com>, 1999.
- Updated with SSE2 support contributed by Intel Corporation.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
#include <sysdep.h>
#include "asm-syntax.h"
#include "bp-sym.h"
#include "bp-asm.h"
-#undef UPDATE_STRNCMP_COUNTER
-
#ifndef LABEL
#define LABEL(l) L(l)
#endif
-#ifdef USE_AS_STRNCMP
-/* Since the counter, %r11, is unsigned, we branch to strcmp_exitz
- if the new counter > the old one or is 0. */
-# define UPDATE_STRNCMP_COUNTER \
- /* calculate left number to compare */ \
- lea -16(%rcx, %r11), %r9; \
- cmp %r9, %r11; \
- jb LABEL(strcmp_exitz); \
- test %r9, %r9; \
- je LABEL(strcmp_exitz); \
- mov %r9, %r11
-
-#else
-# define UPDATE_STRNCMP_COUNTER
+#ifndef USE_AS_STRNCMP
# ifndef STRCMP
# define STRCMP strcmp
# endif
#endif
-
-#ifndef USE_SSSE3
.text
-#else
- .section .text.ssse3,"ax",@progbits
-#endif
-ENTRY (BP_SYM (STRCMP))
-#ifdef NOT_IN_libc
-/* Simple version since we can't use SSE registers in ld.so. */
-L(oop): movb (%rdi), %al
- cmpb (%rsi), %al
- jne L(neq)
- incq %rdi
- incq %rsi
- testb %al, %al
- jnz L(oop)
-
- xorl %eax, %eax
- ret
-
-L(neq): movl $1, %eax
- movl $-1, %ecx
- cmovbl %ecx, %eax
- ret
-END (BP_SYM (STRCMP))
-#else /* NOT_IN_libc */
-/*
- * This implementation uses SSE to compare up to 16 bytes at a time.
- */
-#ifdef USE_AS_STRNCMP
- test %rdx, %rdx
- je LABEL(strcmp_exitz)
- cmp $1, %rdx
- je LABEL(Byte0)
- mov %rdx, %r11
-#endif
- mov %esi, %ecx
- mov %edi, %eax
-/* Use 64bit AND here to avoid long NOP padding. */
- and $0x3f, %rcx /* rsi alignment in cache line */
- and $0x3f, %rax /* rdi alignment in cache line */
- cmp $0x30, %ecx
- ja LABEL(crosscache) /* rsi: 16-byte load will cross cache line */
- cmp $0x30, %eax
- ja LABEL(crosscache) /* rdi: 16-byte load will cross cache line */
- movlpd (%rdi), %xmm1
- movlpd (%rsi), %xmm2
- movhpd 8(%rdi), %xmm1
- movhpd 8(%rsi), %xmm2
- pxor %xmm0, %xmm0 /* clear %xmm0 for null char checks */
- pcmpeqb %xmm1, %xmm0 /* Any null chars? */
- pcmpeqb %xmm2, %xmm1 /* compare first 16 bytes for equality */
- psubb %xmm0, %xmm1 /* packed sub of comparison results*/
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx /* if first 16 bytes are same, edx == 0xffff */
- jnz LABEL(less16bytes) /* If not, find different value or null char */
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz) /* finish comparision */
-#endif
- add $16, %rsi /* prepare to search next 16 bytes */
- add $16, %rdi /* prepare to search next 16 bytes */
+ENTRY (STRCMP) # (const char *, const char *)
- /*
- * Determine source and destination string offsets from 16-byte alignment.
- * Use relative offset difference between the two to determine which case
- * below to use.
- */
- .p2align 4
-LABEL(crosscache):
- and $0xfffffffffffffff0, %rsi /* force %rsi is 16 byte aligned */
- and $0xfffffffffffffff0, %rdi /* force %rdi is 16 byte aligned */
- mov $0xffff, %edx /* for equivalent offset */
- xor %r8d, %r8d
- and $0xf, %ecx /* offset of rsi */
- and $0xf, %eax /* offset of rdi */
- cmp %eax, %ecx
- je LABEL(ashr_0) /* rsi and rdi relative offset same */
- ja LABEL(bigger)
- mov %edx, %r8d /* r8d is offset flag for exit tail */
- xchg %ecx, %eax
- xchg %rsi, %rdi
-LABEL(bigger):
- lea 15(%rax), %r9
- sub %rcx, %r9
- lea LABEL(unaligned_table)(%rip), %r10
- movslq (%r10, %r9,4), %r9
- lea (%r10, %r9), %r10
- jmp *%r10 /* jump to corresponding case */
-
-/*
- * The following cases will be handled by ashr_0
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(0~15) n(0~15) 15(15+ n-n) ashr_0
- */
- .p2align 4
-LABEL(ashr_0):
-
- movdqa (%rsi), %xmm1
- pxor %xmm0, %xmm0 /* clear %xmm0 for null char check */
- pcmpeqb %xmm1, %xmm0 /* Any null chars? */
- pcmpeqb (%rdi), %xmm1 /* compare 16 bytes for equality */
- psubb %xmm0, %xmm1 /* packed sub of comparison results*/
- pmovmskb %xmm1, %r9d
- shr %cl, %edx /* adjust 0xffff for offset */
- shr %cl, %r9d /* adjust for 16-byte offset */
- sub %r9d, %edx
- /*
- * edx must be the same with r9d if in left byte (16-rcx) is equal to
- * the start from (16-rax) and no null char was seen.
- */
- jne LABEL(less32bytes) /* mismatch or null char */
- UPDATE_STRNCMP_COUNTER
- mov $16, %rcx
- mov $16, %r9
- pxor %xmm0, %xmm0 /* clear xmm0, may have changed above */
-
- /*
- * Now both strings are aligned at 16-byte boundary. Loop over strings
- * checking 32-bytes per iteration.
- */
- .p2align 4
-LABEL(loop_ashr_0):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit) /* mismatch or null char seen */
+ xor %ecx, %ecx
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rcx
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rcx
- jmp LABEL(loop_ashr_0)
+#ifdef USE_AS_STRNCMP // (const char *, const char *, size_t)
+ mov %r14, -8 (%rsp)
+ mov %rdx, %r14
-/*
- * The following cases will be handled by ashr_1
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(15) n -15 0(15 +(n-15) - n) ashr_1
- */
- .p2align 4
-LABEL(ashr_1):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0 /* Any null chars? */
- pslldq $15, %xmm2 /* shift first string to align with second */
- pcmpeqb %xmm1, %xmm2 /* compare 16 bytes for equality */
- psubb %xmm0, %xmm2 /* packed sub of comparison results*/
- pmovmskb %xmm2, %r9d
- shr %cl, %edx /* adjust 0xffff for offset */
- shr %cl, %r9d /* adjust for 16-byte offset */
- sub %r9d, %edx
- jnz LABEL(less32bytes) /* mismatch or null char seen */
- movdqa (%rdi), %xmm3
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads*/
- mov $1, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 1(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_1):
- add $16, %r10
- jg LABEL(nibble_ashr_1) /* cross page boundary */
-
-LABEL(gobble_ashr_1):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4 /* store for next cycle */
-
-#ifndef USE_SSSE3
- psrldq $1, %xmm3
- pslldq $15, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $1, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
-
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ test %rdx, %rdx
+ mov %edx, %eax
+ jz LABEL(exitz)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- add $16, %r10
- jg LABEL(nibble_ashr_1) /* cross page boundary */
+LABEL(aligntry):
+ mov %rsi, %r8 # align by "source"
+ and $8 - 1, %r8 # between 0 and 8 characters compared
+ jz LABEL(alignafter)
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4 /* store for next cycle */
-
-#ifndef USE_SSSE3
- psrldq $1, %xmm3
- pslldq $15, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $1, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+LABEL(align):
+ sub $8, %r8
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_1)
-
- /*
- * Nibble avoids loads across page boundary. This is to avoid a potential
- * access into unmapped memory.
- */
- .p2align 4
-LABEL(nibble_ashr_1):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char*/
- pmovmskb %xmm0, %edx
- test $0xfffe, %edx
- jnz LABEL(ashr_1_exittail) /* find null char*/
+ .p2align 4
+
+LABEL(alignloop):
+ mov (%rsi, %rcx), %al
+ mov (%rdi, %rcx), %dl
#ifdef USE_AS_STRNCMP
- cmp $14, %r11
- jbe LABEL(ashr_1_exittail)
+ dec %r14
+ jl LABEL(exitafter)
#endif
- pxor %xmm0, %xmm0
- sub $0x1000, %r10 /* substract 4K from %r10 */
- jmp LABEL(gobble_ashr_1)
-
- /*
- * Once find null char, determine if there is a string mismatch
- * before the null char.
- */
- .p2align 4
-LABEL(ashr_1_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $1, %xmm0
- psrldq $1, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_2
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(14~15) n -14 1(15 +(n-14) - n) ashr_2
- */
- .p2align 4
-LABEL(ashr_2):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $14, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $2, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 2(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_2):
- add $16, %r10
- jg LABEL(nibble_ashr_2)
-
-LABEL(gobble_ashr_2):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $2, %xmm3
- pslldq $14, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $2, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ cmp %dl, %al # check if same character
+ jne LABEL(exitafter)
+ test %al, %al # check if character a NUL
+ jz LABEL(exitafter)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ inc %ecx
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ inc %r8
+ jnz LABEL(alignloop)
- add $16, %r10
- jg LABEL(nibble_ashr_2) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $2, %xmm3
- pslldq $14, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $2, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ .p2align 4
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+LABEL(alignafter):
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_2)
-
- .p2align 4
-LABEL(nibble_ashr_2):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xfffc, %edx
- jnz LABEL(ashr_2_exittail)
+ mov %r15, -32 (%rsp)
+ mov %rbp, -24 (%rsp)
+ mov %rbx, -16 (%rsp)
-#ifdef USE_AS_STRNCMP
- cmp $13, %r11
- jbe LABEL(ashr_2_exittail)
-#endif
+LABEL(pagealigntry): # page align by "destination"
+ mov $4096, %r15d # page size is 4096
+ lea (%rdi, %rcx), %ebp
+ and $4095, %ebp # page mask
+ sub %r15d, %ebp
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_2)
-
- .p2align 4
-LABEL(ashr_2_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $2, %xmm0
- psrldq $2, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_3
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(13~15) n -13 2(15 +(n-13) - n) ashr_3
- */
- .p2align 4
-LABEL(ashr_3):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $13, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $3, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 3(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_3):
- add $16, %r10
- jg LABEL(nibble_ashr_3)
-
-LABEL(gobble_ashr_3):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $3, %xmm3
- pslldq $13, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $3, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+LABEL(64): # 64-byte
+ mov $0xfefefefefefefeff, %rbx # magic number
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ .p2align 4
- add $16, %rcx
- movdqa %xmm4, %xmm3
+LABEL(64loop):
+ add $64, %ebp # check if "destination" crosses a page unevenly
+ jle LABEL(64gobble)
- add $16, %r10
- jg LABEL(nibble_ashr_3) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $3, %xmm3
- pslldq $13, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $3, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ sub %r15d, %ebp
+ lea 64 (%rcx), %r8
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ .p2align 4
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_3)
-
- .p2align 4
-LABEL(nibble_ashr_3):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xfff8, %edx
- jnz LABEL(ashr_3_exittail)
+LABEL(64nibble):
+ mov (%rsi, %rcx), %al
+ mov (%rdi, %rcx), %dl
#ifdef USE_AS_STRNCMP
- cmp $12, %r11
- jbe LABEL(ashr_3_exittail)
+ dec %r14
+ jl .exit
#endif
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_3)
-
- .p2align 4
-LABEL(ashr_3_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $3, %xmm0
- psrldq $3, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_4
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(12~15) n -12 3(15 +(n-12) - n) ashr_4
- */
- .p2align 4
-LABEL(ashr_4):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $12, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $4, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 4(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_4):
- add $16, %r10
- jg LABEL(nibble_ashr_4)
-
-LABEL(gobble_ashr_4):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $4, %xmm3
- pslldq $12, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $4, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ cmp %dl, %al # check if same character
+ jne .exit
+ test %al, %al # check if character a NUL
+ jz .exit
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ inc %ecx
+
+ cmp %ecx, %r8d
+ ja LABEL(64nibble)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ .p2align 4
- add $16, %r10
- jg LABEL(nibble_ashr_4) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $4, %xmm3
- pslldq $12, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $4, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+LABEL(64gobble):
+ mov (%rsi, %rcx), %rax
+ mov (%rdi, %rcx), %rdx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ sub $8, %r14
+ jl LABEL(tail)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_4)
-
- .p2align 4
-LABEL(nibble_ashr_4):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xfff0, %edx
- jnz LABEL(ashr_4_exittail)
+ mov %rbx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
-#ifdef USE_AS_STRNCMP
- cmp $11, %r11
- jbe LABEL(ashr_4_exittail)
-#endif
+ mov %rbx, %r9
+ add %rdx, %r9
+ sbb %r11, %r11
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_4)
-
- .p2align 4
-LABEL(ashr_4_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $4, %xmm0
- psrldq $4, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_5
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(11~15) n - 11 4(15 +(n-11) - n) ashr_5
- */
- .p2align 4
-LABEL(ashr_5):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $11, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $5, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 5(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_5):
- add $16, %r10
- jg LABEL(nibble_ashr_5)
-
-LABEL(gobble_ashr_5):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $5, %xmm3
- pslldq $11, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $5, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ xor %rax, %r8
+ or %rbx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ xor %rdx, %r9
+ or %rbx, %r9
+ sub %r11, %r9
+ jnz LABEL(tail)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %rdx, %rax
+ jne LABEL(tail)
- add $16, %r10
- jg LABEL(nibble_ashr_5) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $5, %xmm3
- pslldq $11, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $5, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ mov 8 (%rsi, %rcx), %rax
+ mov 8 (%rdi, %rcx), %rdx
+ add $8, %ecx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ sub $8, %r14
+ jl LABEL(tail)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_5)
-
- .p2align 4
-LABEL(nibble_ashr_5):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xffe0, %edx
- jnz LABEL(ashr_5_exittail)
+ mov %rbx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
-#ifdef USE_AS_STRNCMP
- cmp $10, %r11
- jbe LABEL(ashr_5_exittail)
-#endif
+ mov %rbx, %r9
+ add %rdx, %r9
+ sbb %r11, %r11
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_5)
-
- .p2align 4
-LABEL(ashr_5_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $5, %xmm0
- psrldq $5, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_6
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(10~15) n - 10 5(15 +(n-10) - n) ashr_6
- */
- .p2align 4
-LABEL(ashr_6):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $10, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $6, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 6(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_6):
- add $16, %r10
- jg LABEL(nibble_ashr_6)
-
-LABEL(gobble_ashr_6):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $6, %xmm3
- pslldq $10, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $6, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ xor %rax, %r8
+ or %rbx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ xor %rdx, %r9
+ or %rbx, %r9
+ sub %r11, %r9
+ jnz LABEL(tail)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %rdx, %rax
+ jne LABEL(tail)
- add $16, %r10
- jg LABEL(nibble_ashr_6) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $6, %xmm3
- pslldq $10, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $6, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ mov 8 (%rsi, %rcx), %rax
+ mov 8 (%rdi, %rcx), %rdx
+ add $8, %ecx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ sub $8, %r14
+ jl LABEL(tail)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_6)
-
- .p2align 4
-LABEL(nibble_ashr_6):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xffc0, %edx
- jnz LABEL(ashr_6_exittail)
+ mov %rbx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
-#ifdef USE_AS_STRNCMP
- cmp $9, %r11
- jbe LABEL(ashr_6_exittail)
-#endif
+ mov %rbx, %r9
+ add %rdx, %r9
+ sbb %r11, %r11
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_6)
-
- .p2align 4
-LABEL(ashr_6_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $6, %xmm0
- psrldq $6, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_7
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(9~15) n - 9 6(15 +(n - 9) - n) ashr_7
- */
- .p2align 4
-LABEL(ashr_7):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $9, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $7, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 7(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_7):
- add $16, %r10
- jg LABEL(nibble_ashr_7)
-
-LABEL(gobble_ashr_7):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $7, %xmm3
- pslldq $9, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $7, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ xor %rax, %r8
+ or %rbx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ xor %rdx, %r9
+ or %rbx, %r9
+ sub %r11, %r9
+ jnz LABEL(tail)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %rdx, %rax
+ jne LABEL(tail)
- add $16, %r10
- jg LABEL(nibble_ashr_7) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $7, %xmm3
- pslldq $9, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $7, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ mov 8 (%rsi, %rcx), %rax
+ mov 8 (%rdi, %rcx), %rdx
+ add $8, %ecx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ sub $8, %r14
+ jl LABEL(tail)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_7)
-
- .p2align 4
-LABEL(nibble_ashr_7):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xff80, %edx
- jnz LABEL(ashr_7_exittail)
+ mov %rbx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
-#ifdef USE_AS_STRNCMP
- cmp $8, %r11
- jbe LABEL(ashr_7_exittail)
-#endif
+ mov %rbx, %r9
+ add %rdx, %r9
+ sbb %r11, %r11
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_7)
-
- .p2align 4
-LABEL(ashr_7_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $7, %xmm0
- psrldq $7, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_8
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(8~15) n - 8 7(15 +(n - 8) - n) ashr_8
- */
- .p2align 4
-LABEL(ashr_8):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $8, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $8, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 8(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_8):
- add $16, %r10
- jg LABEL(nibble_ashr_8)
-
-LABEL(gobble_ashr_8):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $8, %xmm3
- pslldq $8, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $8, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ xor %rax, %r8
+ or %rbx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ xor %rdx, %r9
+ or %rbx, %r9
+ sub %r11, %r9
+ jnz LABEL(tail)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %rdx, %rax
+ jne LABEL(tail)
- add $16, %r10
- jg LABEL(nibble_ashr_8) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $8, %xmm3
- pslldq $8, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $8, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ mov 8 (%rsi, %rcx), %rax
+ mov 8 (%rdi, %rcx), %rdx
+ add $8, %ecx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ sub $8, %r14
+ jl LABEL(tail)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_8)
-
- .p2align 4
-LABEL(nibble_ashr_8):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xff00, %edx
- jnz LABEL(ashr_8_exittail)
+ mov %rbx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
-#ifdef USE_AS_STRNCMP
- cmp $7, %r11
- jbe LABEL(ashr_8_exittail)
-#endif
+ mov %rbx, %r9
+ add %rdx, %r9
+ sbb %r11, %r11
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_8)
-
- .p2align 4
-LABEL(ashr_8_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $8, %xmm0
- psrldq $8, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_9
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(7~15) n - 7 8(15 +(n - 7) - n) ashr_9
- */
- .p2align 4
-LABEL(ashr_9):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $7, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $9, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 9(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_9):
- add $16, %r10
- jg LABEL(nibble_ashr_9)
-
-LABEL(gobble_ashr_9):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $9, %xmm3
- pslldq $7, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $9, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ xor %rax, %r8
+ or %rbx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ xor %rdx, %r9
+ or %rbx, %r9
+ sub %r11, %r9
+ jnz LABEL(tail)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %rdx, %rax
+ jne LABEL(tail)
- add $16, %r10
- jg LABEL(nibble_ashr_9) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $9, %xmm3
- pslldq $7, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $9, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ mov 8 (%rsi, %rcx), %rax
+ mov 8 (%rdi, %rcx), %rdx
+ add $8, %ecx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ sub $8, %r14
+ jl LABEL(tail)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3 /* store for next cycle */
- jmp LABEL(loop_ashr_9)
-
- .p2align 4
-LABEL(nibble_ashr_9):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xfe00, %edx
- jnz LABEL(ashr_9_exittail)
+ mov %rbx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
-#ifdef USE_AS_STRNCMP
- cmp $6, %r11
- jbe LABEL(ashr_9_exittail)
-#endif
+ mov %rbx, %r9
+ add %rdx, %r9
+ sbb %r11, %r11
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_9)
-
- .p2align 4
-LABEL(ashr_9_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $9, %xmm0
- psrldq $9, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_10
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(6~15) n - 6 9(15 +(n - 6) - n) ashr_10
- */
- .p2align 4
-LABEL(ashr_10):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $6, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $10, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 10(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_10):
- add $16, %r10
- jg LABEL(nibble_ashr_10)
-
-LABEL(gobble_ashr_10):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $10, %xmm3
- pslldq $6, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $10, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ xor %rax, %r8
+ or %rbx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ xor %rdx, %r9
+ or %rbx, %r9
+ sub %r11, %r9
+ jnz LABEL(tail)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %rdx, %rax
+ jne LABEL(tail)
- add $16, %r10
- jg LABEL(nibble_ashr_10) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $10, %xmm3
- pslldq $6, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $10, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ mov 8 (%rsi, %rcx), %rax
+ mov 8 (%rdi, %rcx), %rdx
+ add $8, %ecx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ sub $8, %r14
+ jl LABEL(tail)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_10)
-
- .p2align 4
-LABEL(nibble_ashr_10):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xfc00, %edx
- jnz LABEL(ashr_10_exittail)
+ mov %rbx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
-#ifdef USE_AS_STRNCMP
- cmp $5, %r11
- jbe LABEL(ashr_10_exittail)
-#endif
+ mov %rbx, %r9
+ add %rdx, %r9
+ sbb %r11, %r11
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_10)
-
- .p2align 4
-LABEL(ashr_10_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $10, %xmm0
- psrldq $10, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_11
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(5~15) n - 5 10(15 +(n - 5) - n) ashr_11
- */
- .p2align 4
-LABEL(ashr_11):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $5, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $11, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 11(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_11):
- add $16, %r10
- jg LABEL(nibble_ashr_11)
-
-LABEL(gobble_ashr_11):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $11, %xmm3
- pslldq $5, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $11, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ xor %rax, %r8
+ or %rbx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ xor %rdx, %r9
+ or %rbx, %r9
+ sub %r11, %r9
+ jnz LABEL(tail)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %rdx, %rax
+ jne LABEL(tail)
- add $16, %r10
- jg LABEL(nibble_ashr_11) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $11, %xmm3
- pslldq $5, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $11, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ mov 8 (%rsi, %rcx), %rax
+ mov 8 (%rdi, %rcx), %rdx
+ add $8, %ecx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ sub $8, %r14
+ jl LABEL(tail)
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_11)
-
- .p2align 4
-LABEL(nibble_ashr_11):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xf800, %edx
- jnz LABEL(ashr_11_exittail)
+ mov %rbx, %r8
+ add %rax, %r8
+ sbb %r10, %r10
-#ifdef USE_AS_STRNCMP
- cmp $4, %r11
- jbe LABEL(ashr_11_exittail)
-#endif
+ mov %rbx, %r9
+ add %rdx, %r9
+ sbb %r11, %r11
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_11)
-
- .p2align 4
-LABEL(ashr_11_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $11, %xmm0
- psrldq $11, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_12
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(4~15) n - 4 11(15 +(n - 4) - n) ashr_12
- */
- .p2align 4
-LABEL(ashr_12):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $4, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $12, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 12(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_12):
- add $16, %r10
- jg LABEL(nibble_ashr_12)
-
-LABEL(gobble_ashr_12):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $12, %xmm3
- pslldq $4, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $12, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ xor %rax, %r8
+ or %rbx, %r8
+ sub %r10, %r8
+ jnz LABEL(tail)
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ xor %rdx, %r9
+ or %rbx, %r9
+ sub %r11, %r9
+ jnz LABEL(tail)
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %rdx, %rax
+ jne LABEL(tail)
- add $16, %r10
- jg LABEL(nibble_ashr_12) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $12, %xmm3
- pslldq $4, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $12, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ add $8, %ecx
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+ jmp LABEL(64loop)
+
+LABEL(64after):
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_12)
-
- .p2align 4
-LABEL(nibble_ashr_12):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xf000, %edx
- jnz LABEL(ashr_12_exittail)
+LABEL(tailtry):
+# mov (%rsi, %rcx), %rax
+# mov (%rdi, %rcx), %rdx
+# add $8, %rcx
+LABEL(tail): # byte tail
#ifdef USE_AS_STRNCMP
- cmp $3, %r11
- jbe LABEL(ashr_12_exittail)
+ add $7, %r14
#endif
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_12)
-
- .p2align 4
-LABEL(ashr_12_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $12, %xmm0
- psrldq $12, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_13
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(3~15) n - 3 12(15 +(n - 3) - n) ashr_13
- */
- .p2align 4
-LABEL(ashr_13):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $3, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $13, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 13(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_13):
- add $16, %r10
- jg LABEL(nibble_ashr_13)
-
-LABEL(gobble_ashr_13):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $13, %xmm3
- pslldq $3, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $13, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ cmp %dl, %al # check if same character
+ jne .exit
+ test %al, %al # check if character a NUL
+ jz .exit
+
+ shr $8, %rax
+ shr $8, %rdx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ dec %r14
+ jl .exit
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %dl, %al
+ jne .exit
+ test %al, %al
+ jz .exit
- add $16, %r10
- jg LABEL(nibble_ashr_13) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $13, %xmm3
- pslldq $3, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $13, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ shr $8, %rax
+ shr $8, %rdx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ dec %r14
+ jl .exit
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_13)
-
- .p2align 4
-LABEL(nibble_ashr_13):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xe000, %edx
- jnz LABEL(ashr_13_exittail)
+ cmp %dl, %al
+ jne .exit
+ test %al, %al
+ jz .exit
+
+ shr $8, %rax
+ shr $8, %rdx
#ifdef USE_AS_STRNCMP
- cmp $2, %r11
- jbe LABEL(ashr_13_exittail)
+ dec %r14
+ jl .exit
#endif
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_13)
-
- .p2align 4
-LABEL(ashr_13_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $13, %xmm0
- psrldq $13, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_14
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(2~15) n - 2 13(15 +(n - 2) - n) ashr_14
- */
- .p2align 4
-LABEL(ashr_14):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $2, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $14, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 14(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_14):
- add $16, %r10
- jg LABEL(nibble_ashr_14)
-
-LABEL(gobble_ashr_14):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $14, %xmm3
- pslldq $2, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $14, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ cmp %dl, %al
+ jne .exit
+ test %al, %al
+ jz .exit
+
+ shr $8, %rax
+ shr $8, %rdx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ dec %r14
+ jl .exit
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %dl, %al
+ jne .exit
+ test %al, %al
+ jz .exit
- add $16, %r10
- jg LABEL(nibble_ashr_14) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $14, %xmm3
- pslldq $2, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $14, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ shr $8, %eax
+ shr $8, %edx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ dec %r14
+ jl .exit
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_14)
-
- .p2align 4
-LABEL(nibble_ashr_14):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0xc000, %edx
- jnz LABEL(ashr_14_exittail)
+ cmp %dl, %al
+ jne .exit
+ test %al, %al
+ jz .exit
+
+ shr $8, %eax
+ shr $8, %edx
#ifdef USE_AS_STRNCMP
- cmp $1, %r11
- jbe LABEL(ashr_14_exittail)
+ dec %r14
+ jl .exit
#endif
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_14)
-
- .p2align 4
-LABEL(ashr_14_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $14, %xmm0
- psrldq $14, %xmm3
- jmp LABEL(aftertail)
-
-/*
- * The following cases will be handled by ashr_15
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(1~15) n - 1 14(15 +(n - 1) - n) ashr_15
- */
- .p2align 4
-LABEL(ashr_15):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $1, %xmm2
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
-
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- pxor %xmm0, %xmm0
- mov $16, %rcx /* index for loads */
- mov $15, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 15(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
-
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- .p2align 4
-LABEL(loop_ashr_15):
- add $16, %r10
- jg LABEL(nibble_ashr_15)
-
-LABEL(gobble_ashr_15):
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $15, %xmm3
- pslldq $1, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $15, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ cmp %dl, %al
+ jne .exit
+ test %al, %al
+ jz .exit
+
+ shr $8, %eax
+ shr $8, %edx
#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ dec %r14
+ jl .exit
#endif
- add $16, %rcx
- movdqa %xmm4, %xmm3
+ cmp %dl, %al
+ jne .exit
+# test %al, %al
+# jz .exit
- add $16, %r10
- jg LABEL(nibble_ashr_15) /* cross page boundary */
-
- movdqa (%rsi, %rcx), %xmm1
- movdqa (%rdi, %rcx), %xmm2
- movdqa %xmm2, %xmm4
-
-#ifndef USE_SSSE3
- psrldq $15, %xmm3
- pslldq $1, %xmm2
- por %xmm3, %xmm2 /* merge into one 16byte value */
-#else
- palignr $15, %xmm3, %xmm2 /* merge into one 16byte value */
-#endif
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm2, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx
- jnz LABEL(exit)
+ .p2align 4,, 15
-#ifdef USE_AS_STRNCMP
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
+LABEL(tailafter):
- add $16, %rcx
- movdqa %xmm4, %xmm3
- jmp LABEL(loop_ashr_15)
-
- .p2align 4
-LABEL(nibble_ashr_15):
- pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
- pmovmskb %xmm0, %edx
- test $0x8000, %edx
- jnz LABEL(ashr_15_exittail)
+.exit:
+ mov -32 (%rsp), %r15
+ mov -24 (%rsp), %rbp
+ mov -16 (%rsp), %rbx
+ .p2align 4,, 3
+
+LABEL(exitafter):
#ifdef USE_AS_STRNCMP
- test %r11, %r11
- je LABEL(ashr_15_exittail)
+ test %r14, %r14
+ cmovl %edx, %eax
#endif
- pxor %xmm0, %xmm0
- sub $0x1000, %r10
- jmp LABEL(gobble_ashr_15)
-
- .p2align 4
-LABEL(ashr_15_exittail):
- movdqa (%rsi, %rcx), %xmm1
- psrldq $15, %xmm3
- psrldq $15, %xmm0
-
- .p2align 4
-LABEL(aftertail):
- pcmpeqb %xmm3, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- not %edx
-
- .p2align 4
-LABEL(exit):
- lea -16(%r9, %rcx), %rax /* locate the exact offset for rdi */
-LABEL(less32bytes):
- lea (%rdi, %rax), %rdi /* locate the exact address for first operand(rdi) */
- lea (%rsi, %rcx), %rsi /* locate the exact address for second operand(rsi) */
- test %r8d, %r8d
- jz LABEL(ret)
- xchg %rsi, %rdi /* recover original order according to flag(%r8d) */
-
- .p2align 4
-LABEL(ret):
-LABEL(less16bytes):
- bsf %rdx, %rdx /* find and store bit index in %rdx */
+ movzx %al, %eax
+ movzx %dl, %edx
+ sub %eax, %edx
+ xchg %edx, %eax
#ifdef USE_AS_STRNCMP
- sub %rdx, %r11
- jbe LABEL(strcmp_exitz)
+LABEL(exitz):
+ mov -8 (%rsp), %r14
#endif
- movzbl (%rsi, %rdx), %ecx
- movzbl (%rdi, %rdx), %eax
-
- sub %ecx, %eax
- ret
+ ret
-LABEL(strcmp_exitz):
- xor %eax, %eax
- ret
-
- .p2align 4
-LABEL(Byte0):
- movzx (%rsi), %ecx
- movzx (%rdi), %eax
-
- sub %ecx, %eax
- ret
-END (BP_SYM (STRCMP))
-
- .section .rodata,"a",@progbits
- .p2align 3
-LABEL(unaligned_table):
- .int LABEL(ashr_1) - LABEL(unaligned_table)
- .int LABEL(ashr_2) - LABEL(unaligned_table)
- .int LABEL(ashr_3) - LABEL(unaligned_table)
- .int LABEL(ashr_4) - LABEL(unaligned_table)
- .int LABEL(ashr_5) - LABEL(unaligned_table)
- .int LABEL(ashr_6) - LABEL(unaligned_table)
- .int LABEL(ashr_7) - LABEL(unaligned_table)
- .int LABEL(ashr_8) - LABEL(unaligned_table)
- .int LABEL(ashr_9) - LABEL(unaligned_table)
- .int LABEL(ashr_10) - LABEL(unaligned_table)
- .int LABEL(ashr_11) - LABEL(unaligned_table)
- .int LABEL(ashr_12) - LABEL(unaligned_table)
- .int LABEL(ashr_13) - LABEL(unaligned_table)
- .int LABEL(ashr_14) - LABEL(unaligned_table)
- .int LABEL(ashr_15) - LABEL(unaligned_table)
- .int LABEL(ashr_0) - LABEL(unaligned_table)
-#endif /* NOT_IN_libc */
+END (strcmp)
libc_hidden_builtin_def (STRCMP)
Index: sysdeps/x86_64/memcpy.S
===================================================================
--- sysdeps/x86_64/memcpy.S.orig
+++ sysdeps/x86_64/memcpy.S
@@ -39,7 +39,7 @@
.text
-#if defined PIC && !defined NOT_IN_libc
+#if defined PIC && !defined NOT_IN_libc && !defined USE_AS_BCOPY
ENTRY (__memcpy_chk)
cmpq %rdx, %rcx