[AArch64] Optimized memcpy.
This is an optimized memcpy for AArch64. Copies are split into 3 main cases: small copies of up to 16 bytes, medium copies of 17..96 bytes which are fully unrolled. Large copies of more than 96 bytes align the destination and use an unrolled loop processing 64 bytes per iteration. In order to share code with memmove, small and medium copies read all data before writing, allowing any kind of overlap. On a random copy test memcpy is 40.8% faster on A57 and 28.4% on A53.
This commit is contained in:
parent
9503c7f275
commit
b295f6ba44
|
@ -1,3 +1,8 @@
|
||||||
|
2015-07-13 Wilco Dijkstra <wdijkstr@arm.com>
|
||||||
|
|
||||||
|
* newlib/libc/machine/aarch64/memcpy.S (memcpy):
|
||||||
|
Rewrite of optimized memcpy.
|
||||||
|
|
||||||
2015-07-13 Wilco Dijkstra <wdijkstr@arm.com>
|
2015-07-13 Wilco Dijkstra <wdijkstr@arm.com>
|
||||||
|
|
||||||
* newlib/libc/machine/aarch64/memove.S (memmove):
|
* newlib/libc/machine/aarch64/memove.S (memmove):
|
||||||
|
|
|
@ -24,10 +24,37 @@
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2015 ARM Ltd
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
* 1. Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in the
|
||||||
|
* documentation and/or other materials provided with the distribution.
|
||||||
|
* 3. The name of the company may not be used to endorse or promote
|
||||||
|
* products derived from this software without specific prior written
|
||||||
|
* permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||||
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||||
|
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
/* Assumptions:
|
/* Assumptions:
|
||||||
*
|
*
|
||||||
* ARMv8-a, AArch64
|
* ARMv8-a, AArch64, unaligned accesses.
|
||||||
* Unaligned accesses
|
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -38,22 +65,26 @@
|
||||||
#define dstin x0
|
#define dstin x0
|
||||||
#define src x1
|
#define src x1
|
||||||
#define count x2
|
#define count x2
|
||||||
#define tmp1 x3
|
#define dst x3
|
||||||
#define tmp1w w3
|
#define srcend x4
|
||||||
#define tmp2 x4
|
#define dstend x5
|
||||||
#define tmp2w w4
|
#define A_l x6
|
||||||
#define tmp3 x5
|
#define A_lw w6
|
||||||
#define tmp3w w5
|
#define A_h x7
|
||||||
#define dst x6
|
#define A_hw w7
|
||||||
|
#define B_l x8
|
||||||
|
#define B_h x9
|
||||||
|
#define C_l x10
|
||||||
|
#define C_h x11
|
||||||
|
#define D_l x12
|
||||||
|
#define D_h x13
|
||||||
|
#define E_l src
|
||||||
|
#define E_h count
|
||||||
|
#define F_l srcend
|
||||||
|
#define F_h dst
|
||||||
|
#define tmp1 x9
|
||||||
|
|
||||||
#define A_l x7
|
#define L(l) .L ## l
|
||||||
#define A_h x8
|
|
||||||
#define B_l x9
|
|
||||||
#define B_h x10
|
|
||||||
#define C_l x11
|
|
||||||
#define C_h x12
|
|
||||||
#define D_l x13
|
|
||||||
#define D_h x14
|
|
||||||
|
|
||||||
.macro def_fn f p2align=0
|
.macro def_fn f p2align=0
|
||||||
.text
|
.text
|
||||||
|
@ -63,133 +94,129 @@
|
||||||
\f:
|
\f:
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
/* Copies are split into 3 main cases: small copies of up to 16 bytes,
|
||||||
|
medium copies of 17..96 bytes which are fully unrolled. Large copies
|
||||||
|
of more than 96 bytes align the destination and use an unrolled loop
|
||||||
|
processing 64 bytes per iteration.
|
||||||
|
Small and medium copies read all data before writing, allowing any
|
||||||
|
kind of overlap, and memmove tailcalls memcpy for these cases as
|
||||||
|
well as non-overlapping copies.
|
||||||
|
*/
|
||||||
|
|
||||||
def_fn memcpy p2align=6
|
def_fn memcpy p2align=6
|
||||||
|
add srcend, src, count
|
||||||
|
add dstend, dstin, count
|
||||||
|
cmp count, 96
|
||||||
|
b.hi L(copy_long)
|
||||||
|
cmp count, 16
|
||||||
|
b.hs L(copy_medium)
|
||||||
|
|
||||||
mov dst, dstin
|
/* Small copies: 0..16 bytes. */
|
||||||
cmp count, #64
|
L(copy16):
|
||||||
b.ge .Lcpy_not_short
|
tbz count, 3, 1f
|
||||||
cmp count, #15
|
ldr A_l, [src]
|
||||||
b.le .Ltail15tiny
|
ldr A_h, [srcend, -8]
|
||||||
|
str A_l, [dstin]
|
||||||
/* Deal with small copies quickly by dropping straight into the
|
str A_h, [dstend, -8]
|
||||||
* exit block. */
|
|
||||||
.Ltail63:
|
|
||||||
/* Copy up to 48 bytes of data. At this point we only need the
|
|
||||||
* bottom 6 bits of count to be accurate. */
|
|
||||||
ands tmp1, count, #0x30
|
|
||||||
b.eq .Ltail15
|
|
||||||
add dst, dst, tmp1
|
|
||||||
add src, src, tmp1
|
|
||||||
cmp tmp1w, #0x20
|
|
||||||
b.eq 1f
|
|
||||||
b.lt 2f
|
|
||||||
ldp A_l, A_h, [src, #-48]
|
|
||||||
stp A_l, A_h, [dst, #-48]
|
|
||||||
1:
|
|
||||||
ldp A_l, A_h, [src, #-32]
|
|
||||||
stp A_l, A_h, [dst, #-32]
|
|
||||||
2:
|
|
||||||
ldp A_l, A_h, [src, #-16]
|
|
||||||
stp A_l, A_h, [dst, #-16]
|
|
||||||
|
|
||||||
.Ltail15:
|
|
||||||
ands count, count, #15
|
|
||||||
beq 1f
|
|
||||||
add src, src, count
|
|
||||||
ldp A_l, A_h, [src, #-16]
|
|
||||||
add dst, dst, count
|
|
||||||
stp A_l, A_h, [dst, #-16]
|
|
||||||
1:
|
|
||||||
ret
|
ret
|
||||||
|
|
||||||
.Ltail15tiny:
|
|
||||||
/* Copy up to 15 bytes of data. Does not assume additional data
|
|
||||||
being copied. */
|
|
||||||
tbz count, #3, 1f
|
|
||||||
ldr tmp1, [src], #8
|
|
||||||
str tmp1, [dst], #8
|
|
||||||
1:
|
|
||||||
tbz count, #2, 1f
|
|
||||||
ldr tmp1w, [src], #4
|
|
||||||
str tmp1w, [dst], #4
|
|
||||||
1:
|
|
||||||
tbz count, #1, 1f
|
|
||||||
ldrh tmp1w, [src], #2
|
|
||||||
strh tmp1w, [dst], #2
|
|
||||||
1:
|
|
||||||
tbz count, #0, 1f
|
|
||||||
ldrb tmp1w, [src]
|
|
||||||
strb tmp1w, [dst]
|
|
||||||
1:
|
1:
|
||||||
|
tbz count, 2, 1f
|
||||||
|
ldr A_lw, [src]
|
||||||
|
ldr A_hw, [srcend, -4]
|
||||||
|
str A_lw, [dstin]
|
||||||
|
str A_hw, [dstend, -4]
|
||||||
ret
|
ret
|
||||||
|
.p2align 4
|
||||||
|
1:
|
||||||
|
cbz count, 2f
|
||||||
|
ldrb A_lw, [src]
|
||||||
|
tbz count, 1, 1f
|
||||||
|
ldrh A_hw, [srcend, -2]
|
||||||
|
strh A_hw, [dstend, -2]
|
||||||
|
1: strb A_lw, [dstin]
|
||||||
|
2: ret
|
||||||
|
|
||||||
.Lcpy_not_short:
|
.p2align 4
|
||||||
/* We don't much care about the alignment of DST, but we want SRC
|
/* Medium copies: 17..96 bytes. */
|
||||||
* to be 128-bit (16 byte) aligned so that we don't cross cache line
|
L(copy_medium):
|
||||||
* boundaries on both loads and stores. */
|
|
||||||
neg tmp2, src
|
|
||||||
ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
|
|
||||||
b.eq 2f
|
|
||||||
sub count, count, tmp2
|
|
||||||
/* Copy more data than needed; it's faster than jumping
|
|
||||||
* around copying sub-Quadword quantities. We know that
|
|
||||||
* it can't overrun. */
|
|
||||||
ldp A_l, A_h, [src]
|
ldp A_l, A_h, [src]
|
||||||
add src, src, tmp2
|
tbnz count, 6, L(copy96)
|
||||||
stp A_l, A_h, [dst]
|
ldp D_l, D_h, [srcend, -16]
|
||||||
add dst, dst, tmp2
|
tbz count, 5, 1f
|
||||||
/* There may be less than 63 bytes to go now. */
|
ldp B_l, B_h, [src, 16]
|
||||||
cmp count, #63
|
ldp C_l, C_h, [srcend, -32]
|
||||||
b.le .Ltail63
|
stp B_l, B_h, [dstin, 16]
|
||||||
2:
|
stp C_l, C_h, [dstend, -32]
|
||||||
subs count, count, #128
|
|
||||||
b.ge .Lcpy_body_large
|
|
||||||
/* Less than 128 bytes to copy, so handle 64 here and then jump
|
|
||||||
* to the tail. */
|
|
||||||
ldp A_l, A_h, [src]
|
|
||||||
ldp B_l, B_h, [src, #16]
|
|
||||||
ldp C_l, C_h, [src, #32]
|
|
||||||
ldp D_l, D_h, [src, #48]
|
|
||||||
stp A_l, A_h, [dst]
|
|
||||||
stp B_l, B_h, [dst, #16]
|
|
||||||
stp C_l, C_h, [dst, #32]
|
|
||||||
stp D_l, D_h, [dst, #48]
|
|
||||||
tst count, #0x3f
|
|
||||||
add src, src, #64
|
|
||||||
add dst, dst, #64
|
|
||||||
b.ne .Ltail63
|
|
||||||
ret
|
|
||||||
|
|
||||||
/* Critical loop. Start at a new cache line boundary. Assuming
|
|
||||||
* 64 bytes per line this ensures the entire loop is in one line. */
|
|
||||||
.p2align 6
|
|
||||||
.Lcpy_body_large:
|
|
||||||
/* There are at least 128 bytes to copy. */
|
|
||||||
ldp A_l, A_h, [src, #0]
|
|
||||||
sub dst, dst, #16 /* Pre-bias. */
|
|
||||||
ldp B_l, B_h, [src, #16]
|
|
||||||
ldp C_l, C_h, [src, #32]
|
|
||||||
ldp D_l, D_h, [src, #48]! /* src += 64 - Pre-bias. */
|
|
||||||
1:
|
1:
|
||||||
stp A_l, A_h, [dst, #16]
|
stp A_l, A_h, [dstin]
|
||||||
ldp A_l, A_h, [src, #16]
|
stp D_l, D_h, [dstend, -16]
|
||||||
stp B_l, B_h, [dst, #32]
|
|
||||||
ldp B_l, B_h, [src, #32]
|
|
||||||
stp C_l, C_h, [dst, #48]
|
|
||||||
ldp C_l, C_h, [src, #48]
|
|
||||||
stp D_l, D_h, [dst, #64]!
|
|
||||||
ldp D_l, D_h, [src, #64]!
|
|
||||||
subs count, count, #64
|
|
||||||
b.ge 1b
|
|
||||||
stp A_l, A_h, [dst, #16]
|
|
||||||
stp B_l, B_h, [dst, #32]
|
|
||||||
stp C_l, C_h, [dst, #48]
|
|
||||||
stp D_l, D_h, [dst, #64]
|
|
||||||
add src, src, #16
|
|
||||||
add dst, dst, #64 + 16
|
|
||||||
tst count, #0x3f
|
|
||||||
b.ne .Ltail63
|
|
||||||
ret
|
ret
|
||||||
.size memcpy, .-memcpy
|
|
||||||
|
|
||||||
|
.p2align 4
|
||||||
|
/* Copy 64..96 bytes. Copy 64 bytes from the start and
|
||||||
|
32 bytes from the end. */
|
||||||
|
L(copy96):
|
||||||
|
ldp B_l, B_h, [src, 16]
|
||||||
|
ldp C_l, C_h, [src, 32]
|
||||||
|
ldp D_l, D_h, [src, 48]
|
||||||
|
ldp E_l, E_h, [srcend, -32]
|
||||||
|
ldp F_l, F_h, [srcend, -16]
|
||||||
|
stp A_l, A_h, [dstin]
|
||||||
|
stp B_l, B_h, [dstin, 16]
|
||||||
|
stp C_l, C_h, [dstin, 32]
|
||||||
|
stp D_l, D_h, [dstin, 48]
|
||||||
|
stp E_l, E_h, [dstend, -32]
|
||||||
|
stp F_l, F_h, [dstend, -16]
|
||||||
|
ret
|
||||||
|
|
||||||
|
/* Align DST to 16 byte alignment so that we don't cross cache line
|
||||||
|
boundaries on both loads and stores. There are at least 96 bytes
|
||||||
|
to copy, so copy 16 bytes unaligned and then align. The loop
|
||||||
|
copies 64 bytes per iteration and prefetches one iteration ahead. */
|
||||||
|
|
||||||
|
.p2align 4
|
||||||
|
L(copy_long):
|
||||||
|
and tmp1, dstin, 15
|
||||||
|
bic dst, dstin, 15
|
||||||
|
ldp D_l, D_h, [src]
|
||||||
|
sub src, src, tmp1
|
||||||
|
add count, count, tmp1 /* Count is now 16 too large. */
|
||||||
|
ldp A_l, A_h, [src, 16]
|
||||||
|
stp D_l, D_h, [dstin]
|
||||||
|
ldp B_l, B_h, [src, 32]
|
||||||
|
ldp C_l, C_h, [src, 48]
|
||||||
|
ldp D_l, D_h, [src, 64]!
|
||||||
|
subs count, count, 128 + 16 /* Test and readjust count. */
|
||||||
|
b.ls 2f
|
||||||
|
1:
|
||||||
|
stp A_l, A_h, [dst, 16]
|
||||||
|
ldp A_l, A_h, [src, 16]
|
||||||
|
stp B_l, B_h, [dst, 32]
|
||||||
|
ldp B_l, B_h, [src, 32]
|
||||||
|
stp C_l, C_h, [dst, 48]
|
||||||
|
ldp C_l, C_h, [src, 48]
|
||||||
|
stp D_l, D_h, [dst, 64]!
|
||||||
|
ldp D_l, D_h, [src, 64]!
|
||||||
|
subs count, count, 64
|
||||||
|
b.hi 1b
|
||||||
|
|
||||||
|
/* Write the last full set of 64 bytes. The remainder is at most 64
|
||||||
|
bytes, so it is safe to always copy 64 bytes from the end even if
|
||||||
|
there is just 1 byte left. */
|
||||||
|
2:
|
||||||
|
ldp E_l, E_h, [srcend, -64]
|
||||||
|
stp A_l, A_h, [dst, 16]
|
||||||
|
ldp A_l, A_h, [srcend, -48]
|
||||||
|
stp B_l, B_h, [dst, 32]
|
||||||
|
ldp B_l, B_h, [srcend, -32]
|
||||||
|
stp C_l, C_h, [dst, 48]
|
||||||
|
ldp C_l, C_h, [srcend, -16]
|
||||||
|
stp D_l, D_h, [dst, 64]
|
||||||
|
stp E_l, E_h, [dstend, -64]
|
||||||
|
stp A_l, A_h, [dstend, -48]
|
||||||
|
stp B_l, B_h, [dstend, -32]
|
||||||
|
stp C_l, C_h, [dstend, -16]
|
||||||
|
ret
|
||||||
|
|
||||||
|
.size memcpy, . - memcpy
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue