forked from pool/glibc
Andreas Schwab
a41899225a
- math-c++-compat.patch: Add more C++ compatibility (BZ #22296) - malloc-tcache-leak.patch: Fix tcache leak after thread destruction (BZ #22111) - falkor-memcpy-memmove.patch: Optimized implementation of memcpy/memmove for Qualcomm Falkor - aarch64-cpu-features.patch: Fix glibc.tune.cpu tunable handling - nss-files-large-buffers.patch: Avoid large buffers with many host addresses (BZ #22078) - sysconf-uio-maxiov.patch: Fix missing definition of UIO_MAXIOV (BZ #22321) - glob-tilde-overflow.patch: Fix buffer overflows with GLOB_TILDE (CVE-2017-15670, CVE-2017-15671, CVE-2017-15804, bsc#1064569. bsc#1064580, bsc#1064583, BZ #22320, BZ #22325, BZ #22332) - dl-runtime-resolve-xsave.patch: Use fxsave/xsave/xsavec in _dl_runtime_resolve (BZ #21265) OBS-URL: https://build.opensuse.org/request/show/535960 OBS-URL: https://build.opensuse.org/package/show/Base:System/glibc?expand=0&rev=483
574 lines
17 KiB
Diff
574 lines
17 KiB
Diff
2017-10-10 Siddhesh Poyarekar <siddhesh@sourceware.org>
|
|
|
|
* sysdeps/aarch64/multiarch/Makefile (sysdep_routines): Add
|
|
memmove_falkor.
|
|
* sysdeps/aarch64/multiarch/ifunc-impl-list.c
|
|
(__libc_ifunc_impl_list): Likewise.
|
|
* sysdeps/aarch64/multiarch/memmove.c: Likewise.
|
|
* sysdeps/aarch64/multiarch/memmove_falkor.S: New file.
|
|
|
|
* benchtests/bench-memmove-walk.c: New file.
|
|
* benchtests/Makefile (string-benchset): Add it.
|
|
|
|
* benchtests/bench-memset-walk.c: New file.
|
|
* benchtests/Makefile (string-benchset): Add it.
|
|
|
|
* benchtests/bench-memcpy-walk.c: New file.
|
|
* benchtests/Makefile (string-benchset): Add it.
|
|
|
|
2017-10-10 Siddhesh Poyarekar <siddhesh@sourceware.org>
|
|
|
|
* manual/tunables.texi (Tunable glibc.tune.cpu): Add falkor.
|
|
* sysdeps/aarch64/multiarch/Makefile (sysdep_routines): Add
|
|
memcpy_falkor.
|
|
* sysdeps/aarch64/multiarch/ifunc-impl-list.c (MAX_IFUNC):
|
|
Bump.
|
|
(__libc_ifunc_impl_list): Add __memcpy_falkor.
|
|
* sysdeps/aarch64/multiarch/memcpy.c: Likewise.
|
|
* sysdeps/aarch64/multiarch/memcpy_falkor.S: New file.
|
|
* sysdeps/unix/sysv/linux/aarch64/cpu-features.c (cpu_list):
|
|
Add falkor.
|
|
* sysdeps/unix/sysv/linux/aarch64/cpu-features.h (IS_FALKOR):
|
|
New macro.
|
|
|
|
Index: glibc-2.26/manual/tunables.texi
|
|
===================================================================
|
|
--- glibc-2.26.orig/manual/tunables.texi
|
|
+++ glibc-2.26/manual/tunables.texi
|
|
@@ -267,7 +267,7 @@ This tunable is specific to i386 and x86
|
|
@deftp Tunable glibc.tune.cpu
|
|
The @code{glibc.tune.cpu=xxx} tunable allows the user to tell @theglibc{} to
|
|
assume that the CPU is @code{xxx} where xxx may have one of these values:
|
|
-@code{generic}, @code{thunderxt88}.
|
|
+@code{generic}, @code{falkor}, @code{thunderxt88}.
|
|
|
|
This tunable is specific to aarch64.
|
|
@end deftp
|
|
Index: glibc-2.26/sysdeps/aarch64/multiarch/Makefile
|
|
===================================================================
|
|
--- glibc-2.26.orig/sysdeps/aarch64/multiarch/Makefile
|
|
+++ glibc-2.26/sysdeps/aarch64/multiarch/Makefile
|
|
@@ -1,3 +1,4 @@
|
|
ifeq ($(subdir),string)
|
|
-sysdep_routines += memcpy_generic memcpy_thunderx
|
|
+sysdep_routines += memcpy_generic memcpy_thunderx memcpy_falkor \
|
|
+ memmove_falkor
|
|
endif
|
|
Index: glibc-2.26/sysdeps/aarch64/multiarch/ifunc-impl-list.c
|
|
===================================================================
|
|
--- glibc-2.26.orig/sysdeps/aarch64/multiarch/ifunc-impl-list.c
|
|
+++ glibc-2.26/sysdeps/aarch64/multiarch/ifunc-impl-list.c
|
|
@@ -25,7 +25,7 @@
|
|
#include <stdio.h>
|
|
|
|
/* Maximum number of IFUNC implementations. */
|
|
-#define MAX_IFUNC 2
|
|
+#define MAX_IFUNC 3
|
|
|
|
size_t
|
|
__libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
@@ -40,9 +40,11 @@ __libc_ifunc_impl_list (const char *name
|
|
/* Support sysdeps/aarch64/multiarch/memcpy.c and memmove.c. */
|
|
IFUNC_IMPL (i, name, memcpy,
|
|
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_thunderx)
|
|
+ IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_falkor)
|
|
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_generic))
|
|
IFUNC_IMPL (i, name, memmove,
|
|
IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_thunderx)
|
|
+ IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_falkor)
|
|
IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_generic))
|
|
|
|
return i;
|
|
Index: glibc-2.26/sysdeps/aarch64/multiarch/memcpy.c
|
|
===================================================================
|
|
--- glibc-2.26.orig/sysdeps/aarch64/multiarch/memcpy.c
|
|
+++ glibc-2.26/sysdeps/aarch64/multiarch/memcpy.c
|
|
@@ -30,9 +30,14 @@ extern __typeof (__redirect_memcpy) __li
|
|
|
|
extern __typeof (__redirect_memcpy) __memcpy_generic attribute_hidden;
|
|
extern __typeof (__redirect_memcpy) __memcpy_thunderx attribute_hidden;
|
|
+extern __typeof (__redirect_memcpy) __memcpy_falkor attribute_hidden;
|
|
|
|
libc_ifunc (__libc_memcpy,
|
|
- IS_THUNDERX (midr) ? __memcpy_thunderx : __memcpy_generic);
|
|
+ (IS_THUNDERX (midr)
|
|
+ ? __memcpy_thunderx
|
|
+ : (IS_FALKOR (midr)
|
|
+ ? __memcpy_falkor
|
|
+ : __memcpy_generic)));
|
|
|
|
# undef memcpy
|
|
strong_alias (__libc_memcpy, memcpy);
|
|
Index: glibc-2.26/sysdeps/aarch64/multiarch/memcpy_falkor.S
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ glibc-2.26/sysdeps/aarch64/multiarch/memcpy_falkor.S
|
|
@@ -0,0 +1,184 @@
|
|
+/* Optimized memcpy for Qualcomm Falkor processor.
|
|
+ Copyright (C) 2017 Free Software Foundation, Inc.
|
|
+
|
|
+ This file is part of the GNU C Library.
|
|
+
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
+ License as published by the Free Software Foundation; either
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
+
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ Lesser General Public License for more details.
|
|
+
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
+ License along with the GNU C Library. If not, see
|
|
+ <http://www.gnu.org/licenses/>. */
|
|
+
|
|
+#include <sysdep.h>
|
|
+
|
|
+/* Assumptions:
|
|
+
|
|
+ ARMv8-a, AArch64, falkor, unaligned accesses. */
|
|
+
|
|
+#define dstin x0
|
|
+#define src x1
|
|
+#define count x2
|
|
+#define dst x3
|
|
+#define srcend x4
|
|
+#define dstend x5
|
|
+#define A_l x6
|
|
+#define A_lw w6
|
|
+#define A_h x7
|
|
+#define A_hw w7
|
|
+#define tmp1 x14
|
|
+
|
|
+/* Copies are split into 3 main cases:
|
|
+
|
|
+ 1. Small copies of up to 32 bytes
|
|
+ 2. Medium copies of 33..128 bytes which are fully unrolled
|
|
+ 3. Large copies of more than 128 bytes.
|
|
+
|
|
+ Large copies align the sourceto a quad word and use an unrolled loop
|
|
+ processing 64 bytes per iteration.
|
|
+
|
|
+ FALKOR-SPECIFIC DESIGN:
|
|
+
|
|
+ The smallest copies (32 bytes or less) focus on optimal pipeline usage,
|
|
+ which is why the redundant copies of 0-3 bytes have been replaced with
|
|
+ conditionals, since the former would unnecessarily break across multiple
|
|
+ issue groups. The medium copy group has been enlarged to 128 bytes since
|
|
+ bumping up the small copies up to 32 bytes allows us to do that without
|
|
+ cost and also allows us to reduce the size of the prep code before loop64.
|
|
+
|
|
+ All copies are done only via two registers r6 and r7. This is to ensure
|
|
+ that all loads hit a single hardware prefetcher which can get correctly
|
|
+ trained to prefetch a single stream.
|
|
+
|
|
+ The non-temporal stores help optimize cache utilization. */
|
|
+
|
|
+#if IS_IN (libc)
|
|
+ENTRY_ALIGN (__memcpy_falkor, 6)
|
|
+
|
|
+ cmp count, 32
|
|
+ add srcend, src, count
|
|
+ add dstend, dstin, count
|
|
+ b.ls L(copy32)
|
|
+ ldp A_l, A_h, [src]
|
|
+ cmp count, 128
|
|
+ stp A_l, A_h, [dstin]
|
|
+ b.hi L(copy_long)
|
|
+
|
|
+ /* Medium copies: 33..128 bytes. */
|
|
+ sub tmp1, count, 1
|
|
+ ldp A_l, A_h, [src, 16]
|
|
+ stp A_l, A_h, [dstin, 16]
|
|
+ tbz tmp1, 6, 1f
|
|
+ ldp A_l, A_h, [src, 32]
|
|
+ stp A_l, A_h, [dstin, 32]
|
|
+ ldp A_l, A_h, [src, 48]
|
|
+ stp A_l, A_h, [dstin, 48]
|
|
+ ldp A_l, A_h, [srcend, -64]
|
|
+ stp A_l, A_h, [dstend, -64]
|
|
+ ldp A_l, A_h, [srcend, -48]
|
|
+ stp A_l, A_h, [dstend, -48]
|
|
+1:
|
|
+ ldp A_l, A_h, [srcend, -32]
|
|
+ stp A_l, A_h, [dstend, -32]
|
|
+ ldp A_l, A_h, [srcend, -16]
|
|
+ stp A_l, A_h, [dstend, -16]
|
|
+ ret
|
|
+
|
|
+ .p2align 4
|
|
+ /* Small copies: 0..32 bytes. */
|
|
+L(copy32):
|
|
+ /* 16-32 */
|
|
+ cmp count, 16
|
|
+ b.lo 1f
|
|
+ ldp A_l, A_h, [src]
|
|
+ stp A_l, A_h, [dstin]
|
|
+ ldp A_l, A_h, [srcend, -16]
|
|
+ stp A_l, A_h, [dstend, -16]
|
|
+ ret
|
|
+ .p2align 4
|
|
+1:
|
|
+ /* 8-15 */
|
|
+ tbz count, 3, 1f
|
|
+ ldr A_l, [src]
|
|
+ str A_l, [dstin]
|
|
+ ldr A_l, [srcend, -8]
|
|
+ str A_l, [dstend, -8]
|
|
+ ret
|
|
+ .p2align 4
|
|
+1:
|
|
+ /* 4-7 */
|
|
+ tbz count, 2, 1f
|
|
+ ldr A_lw, [src]
|
|
+ str A_lw, [dstin]
|
|
+ ldr A_lw, [srcend, -4]
|
|
+ str A_lw, [dstend, -4]
|
|
+ ret
|
|
+ .p2align 4
|
|
+1:
|
|
+ /* 2-3 */
|
|
+ tbz count, 1, 1f
|
|
+ ldrh A_lw, [src]
|
|
+ strh A_lw, [dstin]
|
|
+ ldrh A_lw, [srcend, -2]
|
|
+ strh A_lw, [dstend, -2]
|
|
+ ret
|
|
+ .p2align 4
|
|
+1:
|
|
+ /* 0-1 */
|
|
+ tbz count, 0, 1f
|
|
+ ldrb A_lw, [src]
|
|
+ strb A_lw, [dstin]
|
|
+1:
|
|
+ ret
|
|
+
|
|
+ /* Align SRC to 16 bytes and copy; that way at least one of the
|
|
+ accesses is aligned throughout the copy sequence.
|
|
+
|
|
+ The count is off by 0 to 15 bytes, but this is OK because we trim
|
|
+ off the last 64 bytes to copy off from the end. Due to this the
|
|
+ loop never runs out of bounds. */
|
|
+ .p2align 6
|
|
+L(copy_long):
|
|
+ sub count, count, 64 + 16
|
|
+ and tmp1, src, 15
|
|
+ bic src, src, 15
|
|
+ sub dst, dstin, tmp1
|
|
+ add count, count, tmp1
|
|
+
|
|
+L(loop64):
|
|
+ ldp A_l, A_h, [src, 16]!
|
|
+ stnp A_l, A_h, [dst, 16]
|
|
+ ldp A_l, A_h, [src, 16]!
|
|
+ subs count, count, 64
|
|
+ stnp A_l, A_h, [dst, 32]
|
|
+ ldp A_l, A_h, [src, 16]!
|
|
+ stnp A_l, A_h, [dst, 48]
|
|
+ ldp A_l, A_h, [src, 16]!
|
|
+ stnp A_l, A_h, [dst, 64]
|
|
+ add dst, dst, 64
|
|
+ b.hi L(loop64)
|
|
+
|
|
+ /* Write the last full set of 64 bytes. The remainder is at most 64
|
|
+ bytes, so it is safe to always copy 64 bytes from the end even if
|
|
+ there is just 1 byte left. */
|
|
+L(last64):
|
|
+ ldp A_l, A_h, [srcend, -64]
|
|
+ stnp A_l, A_h, [dstend, -64]
|
|
+ ldp A_l, A_h, [srcend, -48]
|
|
+ stnp A_l, A_h, [dstend, -48]
|
|
+ ldp A_l, A_h, [srcend, -32]
|
|
+ stnp A_l, A_h, [dstend, -32]
|
|
+ ldp A_l, A_h, [srcend, -16]
|
|
+ stnp A_l, A_h, [dstend, -16]
|
|
+ ret
|
|
+
|
|
+END (__memcpy_falkor)
|
|
+libc_hidden_builtin_def (__memcpy_falkor)
|
|
+#endif
|
|
Index: glibc-2.26/sysdeps/aarch64/multiarch/memmove.c
|
|
===================================================================
|
|
--- glibc-2.26.orig/sysdeps/aarch64/multiarch/memmove.c
|
|
+++ glibc-2.26/sysdeps/aarch64/multiarch/memmove.c
|
|
@@ -30,9 +30,14 @@ extern __typeof (__redirect_memmove) __l
|
|
|
|
extern __typeof (__redirect_memmove) __memmove_generic attribute_hidden;
|
|
extern __typeof (__redirect_memmove) __memmove_thunderx attribute_hidden;
|
|
+extern __typeof (__redirect_memmove) __memmove_falkor attribute_hidden;
|
|
|
|
libc_ifunc (__libc_memmove,
|
|
- IS_THUNDERX (midr) ? __memmove_thunderx : __memmove_generic);
|
|
+ (IS_THUNDERX (midr)
|
|
+ ? __memmove_thunderx
|
|
+ : (IS_FALKOR (midr)
|
|
+ ? __memmove_falkor
|
|
+ : __memmove_generic)));
|
|
|
|
# undef memmove
|
|
strong_alias (__libc_memmove, memmove);
|
|
Index: glibc-2.26/sysdeps/aarch64/multiarch/memmove_falkor.S
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ glibc-2.26/sysdeps/aarch64/multiarch/memmove_falkor.S
|
|
@@ -0,0 +1,232 @@
|
|
+/* Copyright (C) 2017 Free Software Foundation, Inc.
|
|
+
|
|
+ This file is part of the GNU C Library.
|
|
+
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
+ License as published by the Free Software Foundation; either
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
+
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ Lesser General Public License for more details.
|
|
+
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
+ License along with the GNU C Library. If not, see
|
|
+ <http://www.gnu.org/licenses/>. */
|
|
+
|
|
+#include <sysdep.h>
|
|
+
|
|
+/* Assumptions: ARMv8-a, AArch64, falkor, unaligned accesses. */
|
|
+
|
|
+#define dstin x0
|
|
+#define src x1
|
|
+#define count x2
|
|
+#define dstlen x3
|
|
+#define dst x3
|
|
+#define srcend x4
|
|
+#define dstend x5
|
|
+#define A_l x6
|
|
+#define A_lw w6
|
|
+#define A_h x7
|
|
+#define A_hw w7
|
|
+#define B_l x8
|
|
+#define B_lw w8
|
|
+#define B_h x9
|
|
+#define C_l x10
|
|
+#define C_h x11
|
|
+#define D_l x12
|
|
+#define D_h x13
|
|
+#define E_l src
|
|
+#define E_h count
|
|
+#define F_l srcend
|
|
+#define F_h dst
|
|
+#define tmp1 x14
|
|
+
|
|
+/* Alias with A_l and A_h to train the prefetcher. */
|
|
+#define Q_l x22
|
|
+#define Q_h x23
|
|
+
|
|
+/* RATIONALE:
|
|
+
|
|
+ The copy has 4 distinct parts:
|
|
+ * Small copies of 16 bytes and under
|
|
+ * Medium sized copies of 17-96 bytes
|
|
+ * Large copies where the source address is higher than the destination
|
|
+ (forward copies)
|
|
+ * Large copies where the destination address is higher than the source
|
|
+ (copy backward, or move).
|
|
+
|
|
+ We use only two registerpairs x6,x7 and x22,x23 for the copies and copy 32
|
|
+ bytes at a time to correctly train the hardware prefetcher for better
|
|
+ throughput. */
|
|
+ENTRY_ALIGN (__memmove_falkor, 6)
|
|
+
|
|
+ sub tmp1, dstin, src
|
|
+ add srcend, src, count
|
|
+ add dstend, dstin, count
|
|
+ cmp count, 96
|
|
+ ccmp tmp1, count, 2, hi
|
|
+ b.lo L(move_long)
|
|
+
|
|
+ cmp count, 16
|
|
+ b.ls L(copy16)
|
|
+ cmp count, 96
|
|
+ b.hi L(copy_long)
|
|
+
|
|
+ /* Medium copies: 17..96 bytes. */
|
|
+ sub tmp1, count, 1
|
|
+ ldp A_l, A_h, [src]
|
|
+ tbnz tmp1, 6, L(copy96)
|
|
+ ldp D_l, D_h, [srcend, -16]
|
|
+ tbz tmp1, 5, 1f
|
|
+ ldp B_l, B_h, [src, 16]
|
|
+ ldp C_l, C_h, [srcend, -32]
|
|
+ stp B_l, B_h, [dstin, 16]
|
|
+ stp C_l, C_h, [dstend, -32]
|
|
+1:
|
|
+ stp A_l, A_h, [dstin]
|
|
+ stp D_l, D_h, [dstend, -16]
|
|
+ ret
|
|
+
|
|
+ .p2align 4
|
|
+ /* Small copies: 0..16 bytes. */
|
|
+L(copy16):
|
|
+ cmp count, 8
|
|
+ b.lo 1f
|
|
+ ldr A_l, [src]
|
|
+ ldr A_h, [srcend, -8]
|
|
+ str A_l, [dstin]
|
|
+ str A_h, [dstend, -8]
|
|
+ ret
|
|
+ .p2align 4
|
|
+1:
|
|
+ /* 4-7 */
|
|
+ tbz count, 2, 1f
|
|
+ ldr A_lw, [src]
|
|
+ ldr A_hw, [srcend, -4]
|
|
+ str A_lw, [dstin]
|
|
+ str A_hw, [dstend, -4]
|
|
+ ret
|
|
+ .p2align 4
|
|
+1:
|
|
+ /* 2-3 */
|
|
+ tbz count, 1, 1f
|
|
+ ldrh A_lw, [src]
|
|
+ ldrh A_hw, [srcend, -2]
|
|
+ strh A_lw, [dstin]
|
|
+ strh A_hw, [dstend, -2]
|
|
+ ret
|
|
+ .p2align 4
|
|
+1:
|
|
+ /* 0-1 */
|
|
+ tbz count, 0, 1f
|
|
+ ldrb A_lw, [src]
|
|
+ strb A_lw, [dstin]
|
|
+1: ret
|
|
+
|
|
+ .p2align 4
|
|
+ /* Copy 64..96 bytes. Copy 64 bytes from the start and
|
|
+ 32 bytes from the end. */
|
|
+L(copy96):
|
|
+ ldp B_l, B_h, [src, 16]
|
|
+ ldp C_l, C_h, [src, 32]
|
|
+ ldp D_l, D_h, [src, 48]
|
|
+ ldp E_l, E_h, [srcend, -32]
|
|
+ ldp F_l, F_h, [srcend, -16]
|
|
+ stp A_l, A_h, [dstin]
|
|
+ stp B_l, B_h, [dstin, 16]
|
|
+ stp C_l, C_h, [dstin, 32]
|
|
+ stp D_l, D_h, [dstin, 48]
|
|
+ stp E_l, E_h, [dstend, -32]
|
|
+ stp F_l, F_h, [dstend, -16]
|
|
+ ret
|
|
+
|
|
+ /* Align SRC to 16 byte alignment so that we don't cross cache line
|
|
+ boundaries on both loads and stores. There are at least 96 bytes
|
|
+ to copy, so copy 16 bytes unaligned and then align. The loop
|
|
+ copies 32 bytes per iteration and prefetches one iteration ahead. */
|
|
+
|
|
+ .p2align 4
|
|
+L(copy_long):
|
|
+ sub count, count, 64 + 16 /* Test and readjust count. */
|
|
+ mov B_l, Q_l
|
|
+ mov B_h, Q_h
|
|
+ ldp A_l, A_h, [src]
|
|
+ and tmp1, src, 15
|
|
+ bic src, src, 15
|
|
+ sub dst, dstin, tmp1
|
|
+ add count, count, tmp1 /* Count is now 16 too large. */
|
|
+ ldp Q_l, Q_h, [src, 16]!
|
|
+ stp A_l, A_h, [dstin]
|
|
+ ldp A_l, A_h, [src, 16]!
|
|
+
|
|
+L(loop64):
|
|
+ subs count, count, 32
|
|
+ stp Q_l, Q_h, [dst, 16]
|
|
+ ldp Q_l, Q_h, [src, 16]!
|
|
+ stp A_l, A_h, [dst, 32]!
|
|
+ ldp A_l, A_h, [src, 16]!
|
|
+ b.hi L(loop64)
|
|
+
|
|
+ /* Write the last full set of 32 bytes. The remainder is at most 32
|
|
+ bytes, so it is safe to always copy 32 bytes from the end even if
|
|
+ there is just 1 byte left. */
|
|
+L(last64):
|
|
+ ldp C_l, C_h, [srcend, -32]
|
|
+ stp Q_l, Q_h, [dst, 16]
|
|
+ ldp Q_l, Q_h, [srcend, -16]
|
|
+ stp A_l, A_h, [dst, 32]
|
|
+ stp C_l, C_h, [dstend, -32]
|
|
+ stp Q_l, Q_h, [dstend, -16]
|
|
+ mov Q_l, B_l
|
|
+ mov Q_h, B_h
|
|
+ ret
|
|
+
|
|
+ .p2align 4
|
|
+L(move_long):
|
|
+ cbz tmp1, 3f
|
|
+
|
|
+ mov B_l, Q_l
|
|
+ mov B_h, Q_h
|
|
+
|
|
+ /* Align SRCEND to 16 byte alignment so that we don't cross cache line
|
|
+ boundaries on both loads and stores. There are at least 96 bytes
|
|
+ to copy, so copy 16 bytes unaligned and then align. The loop
|
|
+ copies 32 bytes per iteration and prefetches one iteration ahead. */
|
|
+
|
|
+ ldp A_l, A_h, [srcend, -16]
|
|
+ and tmp1, srcend, 15
|
|
+ sub srcend, srcend, tmp1
|
|
+ ldp Q_l, Q_h, [srcend, -16]!
|
|
+ stp A_l, A_h, [dstend, -16]
|
|
+ sub count, count, tmp1
|
|
+ ldp A_l, A_h, [srcend, -16]!
|
|
+ sub dstend, dstend, tmp1
|
|
+ sub count, count, 64
|
|
+
|
|
+1:
|
|
+ subs count, count, 32
|
|
+ stp Q_l, Q_h, [dstend, -16]
|
|
+ ldp Q_l, Q_h, [srcend, -16]!
|
|
+ stp A_l, A_h, [dstend, -32]!
|
|
+ ldp A_l, A_h, [srcend, -16]!
|
|
+ b.hi 1b
|
|
+
|
|
+ /* Write the last full set of 32 bytes. The remainder is at most 32
|
|
+ bytes, so it is safe to always copy 32 bytes from the start even if
|
|
+ there is just 1 byte left. */
|
|
+2:
|
|
+ ldp C_l, C_h, [src, 16]
|
|
+ stp Q_l, Q_h, [dstend, -16]
|
|
+ ldp Q_l, Q_h, [src]
|
|
+ stp A_l, A_h, [dstend, -32]
|
|
+ stp C_l, C_h, [dstin, 16]
|
|
+ stp Q_l, Q_h, [dstin]
|
|
+ mov Q_l, B_l
|
|
+ mov Q_h, B_h
|
|
+3: ret
|
|
+
|
|
+END (__memmove_falkor)
|
|
+libc_hidden_builtin_def (__memmove_falkor)
|
|
Index: glibc-2.26/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
|
|
===================================================================
|
|
--- glibc-2.26.orig/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
|
|
+++ glibc-2.26/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
|
|
@@ -28,6 +28,7 @@ struct cpu_list
|
|
};
|
|
|
|
static struct cpu_list cpu_list[] = {
|
|
+ {"falkor", 0x510FC000},
|
|
{"thunderxt88", 0x430F0A10},
|
|
{"generic", 0x0}
|
|
};
|
|
Index: glibc-2.26/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
|
|
===================================================================
|
|
--- glibc-2.26.orig/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
|
|
+++ glibc-2.26/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
|
|
@@ -41,6 +41,9 @@
|
|
#define IS_THUNDERX(midr) (MIDR_IMPLEMENTOR(midr) == 'C' \
|
|
&& MIDR_PARTNUM(midr) == 0x0a1)
|
|
|
|
+#define IS_FALKOR(midr) (MIDR_IMPLEMENTOR(midr) == 'Q' \
|
|
+ && MIDR_PARTNUM(midr) == 0xc00)
|
|
+
|
|
struct cpu_features
|
|
{
|
|
uint64_t midr_el1;
|