This is the mail archive of the
glibc-cvs@sourceware.org
mailing list for the glibc project.
GNU C Library master sources branch hjl/erms/nt created. glibc-2.23-160-g261ed5d
- From: hjl at sourceware dot org
- To: glibc-cvs at sourceware dot org
- Date: 6 Apr 2016 19:31:18 -0000
- Subject: GNU C Library master sources branch hjl/erms/nt created. glibc-2.23-160-g261ed5d
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".
The branch, hjl/erms/nt has been created
at 261ed5dff04164841575f46e46851952c68a28df (commit)
- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=261ed5dff04164841575f46e46851952c68a28df
commit 261ed5dff04164841575f46e46851952c68a28df
Author: H.J. Lu <hjl.tools@gmail.com>
Date: Sun Apr 3 17:21:45 2016 -0700
X86-64: Use non-temporal store in memmove on large data
memcpy/memmove benchmarks with large data shows that there is a
regression with large data on Haswell machine. non-temporal store
in memmove on large data can improve performance significantly. This
patch adds a threshold to use non temporal store which is 4 times of
shared cache size. When size is above the threshold, non temporal
store will be used.
For size below 8 vector register width, we load all data into registers
and store them together. Only forward and backward loops, which move 4
vector registers at a time, are used to support overlapping addresses.
For forward loop, we load the last 4 vector register width of data and
the first vector register width of data into vector registers before the
loop and store them after the loop. For backward loop, we load the first
4 vector register width of data and the last vector register width of
data into vector registers before the loop and store them after the loop.
* sysdeps/x86_64/cacheinfo.c (__x86_shared_non_temporal_threshold):
New.
(init_cacheinfo): Set __x86_shared_non_temporal_threshold to
4 times of shared cache size.
* sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S
(PREFETCHNT): New.
(VMOVNT): Likewise.
* sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
(PREFETCHNT): Likewise.
(VMOVNT): Likewise.
* sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S
(PREFETCHNT): Likewise.
(VMOVNT): Likewise.
(VMOVU): Changed to movups for smaller code sizes.
(VMOVA): Changed to movaps for smaller code sizes.
* sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S: Update
comments.
(PREFETCH_SIZE): New.
(PREFETCHED_LOAD_SIZE): Likewise.
(PREFETCH_ONE_SET): Likewise.
Rewrite to use forward and backward loops, which move 4 vector
registers at a time, to support overlapping addresses and use
non temporal store if size is above the threshold.
diff --git a/sysdeps/x86_64/cacheinfo.c b/sysdeps/x86_64/cacheinfo.c
index 96463df..cae9907 100644
--- a/sysdeps/x86_64/cacheinfo.c
+++ b/sysdeps/x86_64/cacheinfo.c
@@ -464,6 +464,10 @@ long int __x86_raw_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
/* Similar to __x86_shared_cache_size, but not rounded. */
long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;
+/* Threshold to use non temporal stores. */
+long int __x86_shared_non_temporal_threshold attribute_hidden
+ = 1024 * 1024 * 4;
+
#ifndef DISABLE_PREFETCHW
/* PREFETCHW support flag for use in memory and string routines. */
int __x86_prefetchw attribute_hidden;
@@ -661,5 +665,6 @@ init_cacheinfo (void)
shared = shared & ~255L;
__x86_shared_cache_size_half = shared / 2;
__x86_shared_cache_size = shared;
+ __x86_shared_non_temporal_threshold = shared * 4;
}
}
diff --git a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S
index 44711c3..94201b3 100644
--- a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S
@@ -1,6 +1,8 @@
#if IS_IN (libc)
# define VEC_SIZE 32
# define VEC(i) ymm##i
+# define PREFETCHNT prefetchnta
+# define VMOVNT vmovntdq
# define VMOVU vmovdqu
# define VMOVA vmovdqa
diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
index c2c5293..1ebb9a3 100644
--- a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
@@ -1,6 +1,8 @@
#if defined HAVE_AVX512_ASM_SUPPORT && IS_IN (libc)
# define VEC_SIZE 64
# define VEC(i) zmm##i
+# define PREFETCHNT prefetchnta
+# define VMOVNT vmovntdq
# define VMOVU vmovdqu64
# define VMOVA vmovdqa64
diff --git a/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S
index 85214fe..84b1263 100644
--- a/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S
@@ -1,8 +1,11 @@
#if IS_IN (libc)
# define VEC_SIZE 16
# define VEC(i) xmm##i
-# define VMOVU movdqu
-# define VMOVA movdqa
+# define PREFETCHNT prefetchnta
+# define VMOVNT movntdq
+/* Use movups and movaps for smaller code sizes. */
+# define VMOVU movups
+# define VMOVA movaps
# define SECTION(p) p
# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index 8a60d0f..8e28719 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -18,19 +18,22 @@
/* memmove/memcpy/mempcpy is implemented as:
1. Use overlapping load and store to avoid branch.
- 2. Use 8-bit or 32-bit displacements for branches and nop paddings
- to avoid long nop between instructions.
+ 2. Use 8-bit or 32-bit displacements and nop paddings to avoid long
+ nop between instructions.
3. Load all sources into registers and store them together to avoid
possible address overflap between source and destination.
- 4. If size is 2 * VEC_SIZE or less, load all sources into registers
+ 4. If size is 8 * VEC_SIZE or less, load all sources into registers
and store them together.
- 5. If there is no address overflap, copy from both ends with
- 4 * VEC_SIZE at a time.
- 6. If size is 8 * VEC_SIZE or less, load all sources into registers
- and store them together.
- 7. If address of destination > address of source, backward copy
- 8 * VEC_SIZE at a time.
- 8. Otherwise, forward copy 8 * VEC_SIZE at a time. */
+ 5. If address of destination > address of source, backward copy
+ 4 * VEC_SIZE at a time with unaligned load and aligned store.
+ Load the first 4 * VEC and last VEC before the loop and store
+ them after the loop to support overlapping addresses.
+ 6. Otherwise, forward copy 4 * VEC_SIZE at a time with unaligned
+ load and aligned store. Load the last 4 * VEC and first VEC
+ before the loop and store them after the loop to support
+ overlapping addresses.
+ 7. If size >= __x86_shared_non_temporal_threshold, use non-temporal
+ store instead of aligned store. */
#include <sysdep.h>
@@ -65,6 +68,34 @@
# define REP_MOVSB_THRESHOLD (2048 * (VEC_SIZE / 16))
#endif
+/* Assume 64-byte prefetch size. */
+#ifndef PREFETCH_SIZE
+# define PREFETCH_SIZE 64
+#endif
+
+#define PREFETCHED_LOAD_SIZE (VEC_SIZE * 4)
+
+#if PREFETCH_SIZE == 64
+# if PREFETCHED_LOAD_SIZE == PREFETCH_SIZE
+# define PREFETCH_ONE_SET(dir, base, offset) \
+ PREFETCHNT (offset)base
+# elif PREFETCHED_LOAD_SIZE == 2 * PREFETCH_SIZE
+# define PREFETCH_ONE_SET(dir, base, offset) \
+ PREFETCHNT (offset)base; \
+ PREFETCHNT (offset + dir * PREFETCH_SIZE)base
+# elif PREFETCHED_LOAD_SIZE == 4 * PREFETCH_SIZE
+# define PREFETCH_ONE_SET(dir, base, offset) \
+ PREFETCHNT (offset)base; \
+ PREFETCHNT (offset + dir * PREFETCH_SIZE)base; \
+ PREFETCHNT (offset + dir * PREFETCH_SIZE * 2)base; \
+ PREFETCHNT (offset + dir * PREFETCH_SIZE * 3)base
+# else
+# error Unsupported PREFETCHED_LOAD_SIZE!
+# endif
+#else
+# error Unsupported PREFETCH_SIZE!
+#endif
+
#ifndef SECTION
# error SECTION is not defined!
#endif
@@ -201,97 +232,8 @@ L(movsb):
rep movsb
L(nop):
ret
-
- .p2align 4
-L(movsb_more_2x_vec):
- cmpq $REP_MOVSB_THRESHOLD, %rdx
- /* Force 32-bit displacement to avoid long nop between
- instructions. */
- ja.d32 L(movsb)
#endif
- .p2align 4
-L(more_2x_vec):
- /* More than 2 * VEC. */
- cmpq %rsi, %rdi
- jb L(copy_forward)
- /* Source == destination is less common. */
- je L(nop)
- leaq (%rsi,%rdx), %rcx
- cmpq %rcx, %rdi
- jb L(more_2x_vec_overlap)
-L(copy_forward):
- leaq (%rdi,%rdx), %rcx
- cmpq %rcx, %rsi
- jb L(more_2x_vec_overlap)
- VMOVU (%rsi), %VEC(0)
- VMOVU VEC_SIZE(%rsi), %VEC(1)
- VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(2)
- VMOVU -(VEC_SIZE * 2)(%rsi,%rdx), %VEC(3)
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(1), VEC_SIZE(%rdi)
- VMOVU %VEC(2), -VEC_SIZE(%rdi,%rdx)
- VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi,%rdx)
- cmpq $(VEC_SIZE * 4), %rdx
- /* Force 32-bit displacement to avoid long nop between
- instructions. */
- jbe.d32 L(return)
- VMOVU (VEC_SIZE * 2)(%rsi), %VEC(0)
- VMOVU (VEC_SIZE * 3)(%rsi), %VEC(1)
- VMOVU -(VEC_SIZE * 3)(%rsi,%rdx), %VEC(2)
- VMOVU -(VEC_SIZE * 4)(%rsi,%rdx), %VEC(3)
- VMOVU %VEC(0), (VEC_SIZE * 2)(%rdi)
- VMOVU %VEC(1), (VEC_SIZE * 3)(%rdi)
- VMOVU %VEC(2), -(VEC_SIZE * 3)(%rdi,%rdx)
- VMOVU %VEC(3), -(VEC_SIZE * 4)(%rdi,%rdx)
- cmpq $(VEC_SIZE * 8), %rdx
-#if VEC_SIZE == 16
-# if defined USE_MULTIARCH && IS_IN (libc)
- jbe L(return)
-# else
- /* Use 32-bit displacement to avoid long nop between
- instructions. */
- jbe.d32 L(return)
-# endif
-#else
- /* Use 8-bit displacement to avoid long nop between
- instructions. */
- jbe L(return_disp8)
-#endif
- leaq (VEC_SIZE * 4)(%rdi), %rcx
- addq %rdi, %rdx
- andq $-(VEC_SIZE * 4), %rdx
- andq $-(VEC_SIZE * 4), %rcx
- movq %rcx, %r11
- subq %rdi, %r11
- addq %r11, %rsi
- cmpq %rdx, %rcx
- /* Use 8-bit displacement to avoid long nop between
- instructions. */
- je L(return_disp8)
- movq %rsi, %r10
- subq %rcx, %r10
- leaq VEC_SIZE(%r10), %r9
- leaq (VEC_SIZE * 2)(%r10), %r8
- leaq (VEC_SIZE * 3)(%r10), %r11
- .p2align 4
-L(loop):
- VMOVU (%rcx,%r10), %VEC(0)
- VMOVU (%rcx,%r9), %VEC(1)
- VMOVU (%rcx,%r8), %VEC(2)
- VMOVU (%rcx,%r11), %VEC(3)
- VMOVA %VEC(0), (%rcx)
- VMOVA %VEC(1), VEC_SIZE(%rcx)
- VMOVA %VEC(2), (VEC_SIZE * 2)(%rcx)
- VMOVA %VEC(3), (VEC_SIZE * 3)(%rcx)
- addq $(VEC_SIZE * 4), %rcx
- cmpq %rcx, %rdx
- jne L(loop)
-#if !defined USE_MULTIARCH || !IS_IN (libc)
-L(return):
-#endif
-L(return_disp8):
- VZEROUPPER
- ret
+
L(less_vec):
/* Less than 1 VEC. */
#if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
@@ -357,11 +299,23 @@ L(between_2_3):
movw %si, (%rdi)
ret
-#if VEC_SIZE > 16
- /* Align to 16 bytes to avoid long nop between instructions. */
.p2align 4
+L(more_2x_vec):
+#if IS_IN (libc)
+ cmpq __x86_shared_non_temporal_threshold(%rip), %rdx
+ jae L(large_data)
+# ifdef USE_MULTIARCH
+ jmp L(start_more_2x_vec)
+
+ .p2align 4
+L(movsb_more_2x_vec):
+ cmpq __x86_shared_non_temporal_threshold(%rip), %rdx
+ jae L(large_data)
+ cmpq $REP_MOVSB_THRESHOLD, %rdx
+ ja L(movsb)
+L(start_more_2x_vec):
+# endif
#endif
-L(more_2x_vec_overlap):
/* More than 2 * VEC and there is overlap bewteen destination
and source. */
cmpq $(VEC_SIZE * 8), %rdx
@@ -400,84 +354,261 @@ L(last_4x_vec):
VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi,%rdx)
VZEROUPPER
ret
-L(between_0_and_4x_vec):
- /* Copy from 0 to 4 * VEC. */
- cmpl $(VEC_SIZE * 2), %edx
- jae L(last_4x_vec)
- /* Copy from 0 to 2 * VEC. */
- cmpl $VEC_SIZE, %edx
- jae L(last_2x_vec)
- /* Copy from 0 to VEC. */
- VZEROUPPER
- jmp L(less_vec)
L(more_8x_vec):
cmpq %rsi, %rdi
ja L(more_8x_vec_backward)
-
+ /* Source == destination is less common. */
+ je L(nop)
+ /* Load the first VEC and last 4 * VEC to support overlapping
+ addresses. */
+ /* Force 32-bit displacement to avoid long nop between
+ instructions. */
+#if IS_IN (libc) && (VEC_SIZE == 16 || VEC_SIZE == 64)
+ VMOVU.d32 (%rsi), %VEC(4)
+#else
+ VMOVU (%rsi), %VEC(4)
+#endif
+#if defined USE_MULTIARCH && IS_IN (libc) && VEC_SIZE == 16
+ VMOVU.d32 -VEC_SIZE(%rsi, %rdx), %VEC(5)
+#else
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(5)
+#endif
+ VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(6)
+ VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(7)
+ VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(8)
+ /* Save start and stop of the destination buffer. */
+ movq %rdi, %r10
+#if VEC_SIZE == 32
+ leaq -VEC_SIZE(%rdi, %rdx), %rcx
+#else
+ /* Force 32-bit displacement to avoid long nop between
+ instructions. */
+ leaq.d32 -VEC_SIZE(%rdi, %rdx), %rcx
+#endif
+ /* Align destination for aligned stores in the loop. Compute
+ how much destination is misaligned. */
+ movq %rdi, %r8
+ andq $(VEC_SIZE - 1), %r8
+ /* Get the negative of offset for alignment. */
+ subq $VEC_SIZE, %r8
+ /* Adjust source. */
+ subq %r8, %rsi
+ /* Adjust destination which should be aligned now. */
+ subq %r8, %rdi
+ /* Adjust length. */
+ addq %r8, %rdx
.p2align 4
-L(loop_8x_vec_forward):
- /* Copy 8 * VEC a time forward. */
+L(loop_4x_vec_forward):
+ /* Copy 4 * VEC a time forward. */
VMOVU (%rsi), %VEC(0)
VMOVU VEC_SIZE(%rsi), %VEC(1)
VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
- VMOVU (VEC_SIZE * 4)(%rsi), %VEC(4)
- VMOVU (VEC_SIZE * 5)(%rsi), %VEC(5)
- VMOVU (VEC_SIZE * 6)(%rsi), %VEC(6)
- VMOVU (VEC_SIZE * 7)(%rsi), %VEC(7)
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(1), VEC_SIZE(%rdi)
- VMOVU %VEC(2), (VEC_SIZE * 2)(%rdi)
- VMOVU %VEC(3), (VEC_SIZE * 3)(%rdi)
- VMOVU %VEC(4), (VEC_SIZE * 4)(%rdi)
- VMOVU %VEC(5), (VEC_SIZE * 5)(%rdi)
- VMOVU %VEC(6), (VEC_SIZE * 6)(%rdi)
- VMOVU %VEC(7), (VEC_SIZE * 7)(%rdi)
- addq $(VEC_SIZE * 8), %rdi
- addq $(VEC_SIZE * 8), %rsi
- subq $(VEC_SIZE * 8), %rdx
- cmpq $(VEC_SIZE * 8), %rdx
- je L(between_4x_vec_and_8x_vec)
- ja L(loop_8x_vec_forward)
- /* Less than 8 * VEC to copy. */
+ addq $(VEC_SIZE * 4), %rsi
+ subq $(VEC_SIZE * 4), %rdx
+ VMOVA %VEC(0), (%rdi)
+ VMOVA %VEC(1), VEC_SIZE(%rdi)
+ VMOVA %VEC(2), (VEC_SIZE * 2)(%rdi)
+ VMOVA %VEC(3), (VEC_SIZE * 3)(%rdi)
+ addq $(VEC_SIZE * 4), %rdi
cmpq $(VEC_SIZE * 4), %rdx
- jb L(between_0_and_4x_vec)
- jmp L(between_4x_vec_and_8x_vec)
+ ja L(loop_4x_vec_forward)
+ /* Store the last 4 * VEC. */
+ VMOVU %VEC(5), (%rcx)
+ VMOVU %VEC(6), -VEC_SIZE(%rcx)
+ VMOVU %VEC(7), -(VEC_SIZE * 2)(%rcx)
+ VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx)
+ /* Store the first VEC. */
+ VMOVU %VEC(4), (%r10)
+ VZEROUPPER
+ ret
- .p2align 4
L(more_8x_vec_backward):
- leaq -VEC_SIZE(%rsi, %rdx), %rcx
- leaq -VEC_SIZE(%rdi, %rdx), %r9
-
+ /* Load the first 4 * VEC and last VEC to support overlapping
+ addresses. */
+ /* Force 32-bit displacement to avoid long nop between
+ instructions. */
+ VMOVU.d32 (%rsi), %VEC(4)
+#if VEC_SIZE == 32
+ VMOVU.d32 VEC_SIZE(%rsi), %VEC(5)
+#else
+ VMOVU VEC_SIZE(%rsi), %VEC(5)
+#endif
+ VMOVU (VEC_SIZE * 2)(%rsi), %VEC(6)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VEC(7)
+ VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(8)
+ /* Save stop of the destination buffer. */
+ leaq -VEC_SIZE(%rdi, %rdx), %r11
+ /* Align destination end for aligned stores in the loop. Compute
+ how much destination end is misaligned. */
+ /* Force 32-bit displacement to avoid long nop between
+ instructions. */
+ leaq.d32 -VEC_SIZE(%rsi, %rdx), %rcx
+ movq %r11, %r9
+ movq %r11, %r8
+ andq $(VEC_SIZE - 1), %r8
+ /* Adjust source. */
+ subq %r8, %rcx
+ /* Adjust the end of destination which should be aligned now. */
+ subq %r8, %r9
+ /* Adjust length. */
+ subq %r8, %rdx
.p2align 4
-L(loop_8x_vec_backward):
- /* Copy 8 * VEC a time backward. */
+L(loop_4x_vec_backward):
+ /* Copy 4 * VEC a time backward. */
VMOVU (%rcx), %VEC(0)
VMOVU -VEC_SIZE(%rcx), %VEC(1)
VMOVU -(VEC_SIZE * 2)(%rcx), %VEC(2)
VMOVU -(VEC_SIZE * 3)(%rcx), %VEC(3)
- VMOVU -(VEC_SIZE * 4)(%rcx), %VEC(4)
- VMOVU -(VEC_SIZE * 5)(%rcx), %VEC(5)
- VMOVU -(VEC_SIZE * 6)(%rcx), %VEC(6)
- VMOVU -(VEC_SIZE * 7)(%rcx), %VEC(7)
- VMOVU %VEC(0), (%r9)
- VMOVU %VEC(1), -VEC_SIZE(%r9)
- VMOVU %VEC(2), -(VEC_SIZE * 2)(%r9)
- VMOVU %VEC(3), -(VEC_SIZE * 3)(%r9)
- VMOVU %VEC(4), -(VEC_SIZE * 4)(%r9)
- VMOVU %VEC(5), -(VEC_SIZE * 5)(%r9)
- VMOVU %VEC(6), -(VEC_SIZE * 6)(%r9)
- VMOVU %VEC(7), -(VEC_SIZE * 7)(%r9)
- subq $(VEC_SIZE * 8), %rcx
- subq $(VEC_SIZE * 8), %r9
- subq $(VEC_SIZE * 8), %rdx
- cmpq $(VEC_SIZE * 8), %rdx
- je L(between_4x_vec_and_8x_vec)
- ja L(loop_8x_vec_backward)
- /* Less than 8 * VEC to copy. */
+ subq $(VEC_SIZE * 4), %rcx
+ subq $(VEC_SIZE * 4), %rdx
+ VMOVA %VEC(0), (%r9)
+ VMOVA %VEC(1), -VEC_SIZE(%r9)
+ VMOVA %VEC(2), -(VEC_SIZE * 2)(%r9)
+ VMOVA %VEC(3), -(VEC_SIZE * 3)(%r9)
+ subq $(VEC_SIZE * 4), %r9
cmpq $(VEC_SIZE * 4), %rdx
- jb L(between_0_and_4x_vec)
- jmp L(between_4x_vec_and_8x_vec)
+ ja L(loop_4x_vec_backward)
+ /* Store the first 4 * VEC. */
+ VMOVU %VEC(4), (%rdi)
+ VMOVU %VEC(5), VEC_SIZE(%rdi)
+ VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi)
+ VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi)
+ /* Store the last VEC. */
+ VMOVU %VEC(8), (%r11)
+L(nop_disp8):
+ VZEROUPPER
+ ret
+
+L(large_data):
+ /* Copy very large data with non-temporal stores. */
+ cmpq %rsi, %rdi
+ ja L(copy_large_backward)
+ /* Source == destination is less common. */
+ /* Force 8-bit displacement to avoid long nop between
+ instructions. */
+#if VEC_SIZE == 16 || VEC_SIZE == 32
+ je L(nop_disp8)
+#else
+ je L(nop)
+#endif
+ PREFETCH_ONE_SET (1, (%rsi), 0)
+ PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE)
+ /* Load the first VEC and last 4 * VEC to support overlapping
+ addresses. */
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(5)
+ VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(6)
+ VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(7)
+ VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(8)
+ /* Force 32-bit displacement to avoid long nop between
+ instructions. */
+#if VEC_SIZE == 32 || VEC_SIZE == 64
+ VMOVU.d32 (%rsi), %VEC(4)
+#else
+ VMOVU (%rsi), %VEC(4)
+#endif
+ /* Save start and stop of the destination buffer. */
+ movq %rdi, %r10
+ leaq -VEC_SIZE(%rdi, %rdx), %rcx
+ /* Align destination for non-temporal stores in the loop.
+ Compute how much destination is misaligned. */
+ movq %rdi, %r8
+ andq $(VEC_SIZE - 1), %r8
+ /* Get the negative of offset for alignment. */
+ subq $VEC_SIZE, %r8
+ /* Adjust source. */
+ subq %r8, %rsi
+ /* Adjust destination which should be aligned now. */
+ subq %r8, %rdi
+ /* Adjust length. */
+ addq %r8, %rdx
+ .p2align 4
+L(loop_large_forward):
+ /* Copy 4 * VEC a time forward with non-temporal stores. */
+ PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 2)
+ PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 4)
+ PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 6)
+ VMOVU (%rsi), %VEC(0)
+ VMOVU VEC_SIZE(%rsi), %VEC(1)
+ VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
+ addq $PREFETCHED_LOAD_SIZE, %rsi
+ subq $PREFETCHED_LOAD_SIZE, %rdx
+ VMOVNT %VEC(0), (%rdi)
+ VMOVNT %VEC(1), VEC_SIZE(%rdi)
+ VMOVNT %VEC(2), (VEC_SIZE * 2)(%rdi)
+ VMOVNT %VEC(3), (VEC_SIZE * 3)(%rdi)
+ addq $PREFETCHED_LOAD_SIZE, %rdi
+ cmpq $PREFETCHED_LOAD_SIZE, %rdx
+ ja L(loop_large_forward)
+ sfence
+ /* Store the last 4 * VEC. */
+ VMOVU %VEC(5), (%rcx)
+ VMOVU %VEC(6), -VEC_SIZE(%rcx)
+ VMOVU %VEC(7), -(VEC_SIZE * 2)(%rcx)
+ VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx)
+ /* Store the first VEC. */
+ VMOVU %VEC(4), (%r10)
+ VZEROUPPER
+ ret
+
+L(copy_large_backward):
+ /* Load the first 4 * VEC and last VEC to support overlapping
+ addresses. */
+ /* Force 32-bit displacement to avoid long nop between
+ instructions. */
+ VMOVU.d32 (%rsi), %VEC(4)
+ VMOVU.d32 VEC_SIZE(%rsi), %VEC(5)
+ VMOVU.d32 (VEC_SIZE * 2)(%rsi), %VEC(6)
+#if VEC_SIZE == 16 || VEC_SIZE == 32
+ VMOVU.d32 (VEC_SIZE * 3)(%rsi), %VEC(7)
+#else
+ VMOVU (VEC_SIZE * 3)(%rsi), %VEC(7)
+#endif
+ VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(8)
+ /* Save stop of the destination buffer. */
+ leaq -VEC_SIZE(%rdi, %rdx), %r11
+ /* Align the destination end for non-temporal stores in the loop.
+ Compute how much destination end is misaligned. */
+ leaq -VEC_SIZE(%rsi, %rdx), %rcx
+ movq %r11, %r9
+ movq %r11, %r8
+ andq $(VEC_SIZE - 1), %r8
+ /* Adjust source. */
+ subq %r8, %rcx
+ /* Adjust the end of destination which should be aligned now. */
+ subq %r8, %r9
+ /* Adjust length. */
+ subq %r8, %rdx
+ .p2align 4
+L(loop_large_backward):
+ /* Copy 4 * VEC a time backward with non-temporal stores. */
+ PREFETCH_ONE_SET (-1, (%rsi), -PREFETCHED_LOAD_SIZE * 2)
+ PREFETCH_ONE_SET (-1, (%rsi), -PREFETCHED_LOAD_SIZE * 4)
+ PREFETCH_ONE_SET (-1, (%rsi), -PREFETCHED_LOAD_SIZE * 6)
+ VMOVU (%rcx), %VEC(0)
+ VMOVU -VEC_SIZE(%rcx), %VEC(1)
+ VMOVU -(VEC_SIZE * 2)(%rcx), %VEC(2)
+ VMOVU -(VEC_SIZE * 3)(%rcx), %VEC(3)
+ subq $PREFETCHED_LOAD_SIZE, %rcx
+ subq $PREFETCHED_LOAD_SIZE, %rdx
+ VMOVNT %VEC(0), (%r9)
+ VMOVNT %VEC(1), -VEC_SIZE(%r9)
+ VMOVNT %VEC(2), -(VEC_SIZE * 2)(%r9)
+ VMOVNT %VEC(3), -(VEC_SIZE * 3)(%r9)
+ subq $PREFETCHED_LOAD_SIZE, %r9
+ cmpq $PREFETCHED_LOAD_SIZE, %rdx
+ ja L(loop_large_backward)
+ sfence
+ /* Store the first 4 * VEC. */
+ VMOVU %VEC(4), (%rdi)
+ VMOVU %VEC(5), VEC_SIZE(%rdi)
+ VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi)
+ VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi)
+ /* Store the last VEC. */
+ VMOVU %VEC(8), (%r11)
+ VZEROUPPER
+ ret
END (MEMMOVE_SYMBOL (__memmove, unaligned_erms))
#ifdef SHARED
-----------------------------------------------------------------------
hooks/post-receive
--
GNU C Library master sources