* libc/machine/arm/Makefile.am (lib_a_SOURCES): Add memchr-stub.c,

memchr.S.
	* libc/machine/arm/arm_asm.h: Add ifdef to allow it to be included
	in .S files.
	* libc/machine/arm/memchr-stub.c: New file - just selects what to
	compile.
	* libc/machine/arm/memchr.S: New file - ARMv6t2/v7 version.

	* libc/machine/arm/Makefile.am (lib_a_SOURCES): Add strlen-armv7.S.
	* libc/machine/arm/strlen-armv7.S: New file.
	* libc/machine/arm/strlen.c: Add ifdef optimised code so it isn't
	for v7 or 6t2.

	* libc/machine/arm/Makefile.in: Regenerate.
This commit is contained in:
Nick Clifton 2011-10-18 11:35:17 +00:00
parent 53609fd238
commit d6f6573a5f
8 changed files with 399 additions and 3 deletions

View File

@ -1,3 +1,20 @@
2011-10-18 Dr David Alan Gilbert <david.gilbert@linaro.org>
* libc/machine/arm/Makefile.am (lib_a_SOURCES): Add memchr-stub.c,
memchr.S.
* libc/machine/arm/arm_asm.h: Add ifdef to allow it to be included
in .S files.
* libc/machine/arm/memchr-stub.c: New file - just selects what to
compile.
* libc/machine/arm/memchr.S: New file - ARMv6t2/v7 version.
* libc/machine/arm/Makefile.am (lib_a_SOURCES): Add strlen-armv7.S.
* libc/machine/arm/strlen-armv7.S: New file.
* libc/machine/arm/strlen.c: Add ifdef optimised code so it isn't
for v7 or 6t2.
* libc/machine/arm/Makefile.in: Regenerate.
2011-10-11 Steven Abner <pheonix@zoomtown.com>
* libc/time/mktm_r.c: (__tzcalc_limits) Fix Julian day calculation.

View File

@ -8,7 +8,9 @@ AM_CCASFLAGS = $(INCLUDES)
noinst_LIBRARIES = lib.a
lib_a_SOURCES = setjmp.S access.c strlen.c strcmp.c strcpy.c memcpy.S memcpy-stub.c
lib_a_SOURCES = setjmp.S access.c strlen.c strcmp.c strcpy.c \
memcpy.S memcpy-stub.c memchr-stub.c memchr.S \
strlen.c strlen-armv7.S
lib_a_CCASFLAGS=$(AM_CCASFLAGS)
lib_a_CFLAGS = $(AM_CFLAGS)

View File

@ -55,7 +55,9 @@ lib_a_LIBADD =
am_lib_a_OBJECTS = lib_a-setjmp.$(OBJEXT) lib_a-access.$(OBJEXT) \
lib_a-strlen.$(OBJEXT) lib_a-strcmp.$(OBJEXT) \
lib_a-strcpy.$(OBJEXT) lib_a-memcpy.$(OBJEXT) \
lib_a-memcpy-stub.$(OBJEXT)
lib_a-memcpy-stub.$(OBJEXT) lib_a-memchr-stub.$(OBJEXT) \
lib_a-memchr.$(OBJEXT) lib_a-strlen.$(OBJEXT) \
lib_a-strlen-armv7.$(OBJEXT)
lib_a_OBJECTS = $(am_lib_a_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@
depcomp =
@ -176,7 +178,10 @@ AUTOMAKE_OPTIONS = cygnus
INCLUDES = $(NEWLIB_CFLAGS) $(CROSS_CFLAGS) $(TARGET_CFLAGS)
AM_CCASFLAGS = $(INCLUDES)
noinst_LIBRARIES = lib.a
lib_a_SOURCES = setjmp.S access.c strlen.c strcmp.c strcpy.c memcpy.S memcpy-stub.c
lib_a_SOURCES = setjmp.S access.c strlen.c strcmp.c strcpy.c \
memcpy.S memcpy-stub.c memchr-stub.c memchr.S \
strlen.c strlen-armv7.S
lib_a_CCASFLAGS = $(AM_CCASFLAGS)
lib_a_CFLAGS = $(AM_CFLAGS)
ACLOCAL_AMFLAGS = -I ../../.. -I ../../../..
@ -251,6 +256,18 @@ lib_a-memcpy.o: memcpy.S
lib_a-memcpy.obj: memcpy.S
$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-memcpy.obj `if test -f 'memcpy.S'; then $(CYGPATH_W) 'memcpy.S'; else $(CYGPATH_W) '$(srcdir)/memcpy.S'; fi`
lib_a-memchr.o: memchr.S
$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-memchr.o `test -f 'memchr.S' || echo '$(srcdir)/'`memchr.S
lib_a-memchr.obj: memchr.S
$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-memchr.obj `if test -f 'memchr.S'; then $(CYGPATH_W) 'memchr.S'; else $(CYGPATH_W) '$(srcdir)/memchr.S'; fi`
lib_a-strlen-armv7.o: strlen-armv7.S
$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-strlen-armv7.o `test -f 'strlen-armv7.S' || echo '$(srcdir)/'`strlen-armv7.S
lib_a-strlen-armv7.obj: strlen-armv7.S
$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-strlen-armv7.obj `if test -f 'strlen-armv7.S'; then $(CYGPATH_W) 'strlen-armv7.S'; else $(CYGPATH_W) '$(srcdir)/strlen-armv7.S'; fi`
.c.o:
$(COMPILE) -c $<
@ -287,6 +304,12 @@ lib_a-memcpy-stub.o: memcpy-stub.c
lib_a-memcpy-stub.obj: memcpy-stub.c
$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memcpy-stub.obj `if test -f 'memcpy-stub.c'; then $(CYGPATH_W) 'memcpy-stub.c'; else $(CYGPATH_W) '$(srcdir)/memcpy-stub.c'; fi`
lib_a-memchr-stub.o: memchr-stub.c
$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memchr-stub.o `test -f 'memchr-stub.c' || echo '$(srcdir)/'`memchr-stub.c
lib_a-memchr-stub.obj: memchr-stub.c
$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memchr-stub.obj `if test -f 'memchr-stub.c'; then $(CYGPATH_W) 'memchr-stub.c'; else $(CYGPATH_W) '$(srcdir)/memchr-stub.c'; fi`
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \

View File

@ -62,7 +62,22 @@
/* Now some macros for common instruction sequences. */
#ifdef __ASSEMBLER__
.macro RETURN cond=
#if defined (_ISA_ARM_4T) || defined (_ISA_THUMB_1)
bx\cond lr
#else
mov\cond pc, lr
#endif
.endm
.macro optpld base, offset=#0
#if defined (_ISA_ARM_7)
pld [\base, \offset]
#endif
.endm
#else
asm(".macro RETURN cond=\n\t"
#if defined (_ISA_ARM_4T) || defined (_ISA_THUMB_1)
"bx\\cond lr\n\t"
@ -78,5 +93,6 @@ asm(".macro optpld base, offset=#0\n\t"
#endif
".endm"
);
#endif
#endif /* ARM_ASM__H */

View File

@ -0,0 +1,42 @@
/* Copyright (c) 2010-2011, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm_asm.h"
/* Note: This ifdef MUST match the one in memchr.S */
#if defined(_ISA_ARM_7) || defined(__ARM_ARCH_6T2__)
/* Do nothing - the memchr.S will get used */
#else
/* For an older CPU we just fall back to the .c code */
#include "../../string/memchr.c"
#endif

View File

@ -0,0 +1,166 @@
/* Copyright (c) 2010-2011, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Written by Dave Gilbert <david.gilbert@linaro.org>
This memchr routine is optimised on a Cortex-A9 and should work on
all ARMv7 processors. It has a fast path for short sizes, and has
an optimised path for large data sets; the worst case is finding the
match early in a large data set. */
@ 2011-02-07 david.gilbert@linaro.org
@ Extracted from local git a5b438d861
@ 2011-07-14 david.gilbert@linaro.org
@ Import endianness fix from local git ea786f1b
@ 2011-10-11 david.gilbert@linaro.org
@ Import from cortex-strings bzr rev 63
@ Flip to ldrd (as suggested by Greta Yorsh)
@ Make conditional on CPU type
@ tidy
.syntax unified
.arch armv7-a
#include "arm_asm.h"
@ NOTE: This ifdef MUST match the one in memchr-stub.c
#if defined(_ISA_ARM_7) || defined(__ARM_ARCH_6T2__)
@ this lets us check a flag in a 00/ff byte easily in either endianness
#ifdef __ARMEB__
#define CHARTSTMASK(c) 1<<(31-(c*8))
#else
#define CHARTSTMASK(c) 1<<(c*8)
#endif
.text
.thumb
@ ---------------------------------------------------------------------------
.thumb_func
.align 2
.p2align 4,,15
.global memchr
.type memchr,%function
memchr:
@ r0 = start of memory to scan
@ r1 = character to look for
@ r2 = length
@ returns r0 = pointer to character or NULL if not found
and r1,r1,#0xff @ Don't trust the caller to pass a char
cmp r2,#16 @ If short don't bother with anything clever
blt 20f
tst r0, #7 @ If it's already aligned skip the next bit
beq 10f
@ Work up to an aligned point
5:
ldrb r3, [r0],#1
subs r2, r2, #1
cmp r3, r1
beq 50f @ If it matches exit found
tst r0, #7
cbz r2, 40f @ If we run off the end, exit not found
bne 5b @ If not aligned yet then do next byte
10:
@ We are aligned, we know we have at least 8 bytes to work with
push {r4,r5,r6,r7}
orr r1, r1, r1, lsl #8 @ expand the match word across all bytes
orr r1, r1, r1, lsl #16
bic r4, r2, #7 @ Number of double words to work with * 8
mvns r7, #0 @ all F's
movs r3, #0
15:
ldrd r5,r6,[r0],#8
subs r4, r4, #8
eor r5,r5, r1 @ r5,r6 have 00's where bytes match the target
eor r6,r6, r1
uadd8 r5, r5, r7 @ Par add 0xff - sets GE bits for bytes!=0
sel r5, r3, r7 @ bytes are 00 for none-00 bytes,
@ or ff for 00 bytes - NOTE INVERSION
uadd8 r6, r6, r7 @ Par add 0xff - sets GE bits for bytes!=0
sel r6, r5, r7 @ chained....bytes are 00 for none-00 bytes
@ or ff for 00 bytes - NOTE INVERSION
cbnz r6, 60f
bne 15b @ (Flags from the subs above)
pop {r4,r5,r6,r7}
and r1,r1,#0xff @ r1 back to a single character
and r2,r2,#7 @ Leave the count remaining as the number
@ after the double words have been done
20:
cbz r2, 40f @ 0 length or hit the end already then not found
21: @ Post aligned section, or just a short call
ldrb r3,[r0],#1
subs r2,r2,#1
eor r3,r3,r1 @ r3 = 0 if match - doesn't break flags from sub
cbz r3, 50f
bne 21b @ on r2 flags
40:
movs r0,#0 @ not found
bx lr
50:
subs r0,r0,#1 @ found
bx lr
60: @ We're here because the fast path found a hit
@ now we have to track down exactly which word it was
@ r0 points to the start of the double word after the one tested
@ r5 has the 00/ff pattern for the first word, r6 has the chained value
cmp r5, #0
itte eq
moveq r5, r6 @ the end is in the 2nd word
subeq r0,r0,#3 @ Points to 2nd byte of 2nd word
subne r0,r0,#7 @ or 2nd byte of 1st word
@ r0 currently points to the 2nd byte of the word containing the hit
tst r5, # CHARTSTMASK(0) @ 1st character
bne 61f
adds r0,r0,#1
tst r5, # CHARTSTMASK(1) @ 2nd character
ittt eq
addeq r0,r0,#1
tsteq r5, # (3<<15) @ 2nd & 3rd character
@ If not the 3rd must be the last one
addeq r0,r0,#1
61:
pop {r4,r5,r6,r7}
subs r0,r0,#1
bx lr
#endif

View File

@ -0,0 +1,127 @@
/* Copyright (c) 2010-2011, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Written by Dave Gilbert <david.gilbert@linaro.org>
This strlen routine is optimised on a Cortex-A9 and should work on
all ARMv7 processors. This routine is reasonably fast for short
strings, but is probably slower than a simple implementation if all
your strings are very short */
@ 2011-02-08 david.gilbert@linaro.org
@ Extracted from local git 6848613a
@ 2011-10-13 david.gilbert@linaro.org
@ Extracted from cortex-strings bzr rev 63
@ Integrate to newlib, flip to ldrd
@ Pull in Endian macro from my memchr
#include "arm_asm.h"
@ NOTE: This ifdef MUST match the ones in arm/strlen.c
@ We fallback to the one in arm/strlen.c for size optimised or
@ for older arch's
#if defined(_ISA_ARM_7) || defined(__ARM_ARCH_6T2__) && \
!(defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || \
(defined (__thumb__) && !defined (__thumb2__)))
@ this lets us check a flag in a 00/ff byte easily in either endianness
#ifdef __ARMEB__
#define CHARTSTMASK(c) 1<<(31-(c*8))
#else
#define CHARTSTMASK(c) 1<<(c*8)
#endif
@------------------------------------------------------------------------------
.syntax unified
.arch armv7-a
.thumb_func
.align 2
.p2align 4,,15
.global strlen
.type strlen,%function
strlen:
@ r0 = string
@ returns count of bytes in string not including terminator
mov r1, r0
push { r4,r6 }
mvns r6, #0 @ all F
movs r4, #0
tst r0, #7
beq 2f
1:
ldrb r2, [r1], #1
tst r1, #7 @ Hit alignment yet?
cbz r2, 10f @ Exit if we found the 0
bne 1b
@ So we're now aligned
2:
ldrd r2,r3,[r1],#8
uadd8 r2, r2, r6 @ Par add 0xff - sets the GE bits for bytes!=0
sel r2, r4, r6 @ bytes are 00 for none-00 bytes,
@ or ff for 00 bytes - NOTE INVERSION
uadd8 r3, r3, r6 @ Par add 0xff - sets the GE bits for bytes!=0
sel r3, r2, r6 @ chained...bytes are 00 for none-00 bytes,
@ or ff for 00 bytes - NOTE INVERSION
cmp r3, #0
beq 2b
strlenendtmp:
@ One (or more) of the bytes we loaded was 0 - but which one?
@ r2 has the mask corresponding to the first loaded word
@ r3 has a combined mask of the two words - but if r2 was all-non 0
@ then it's just the 2nd words
cmp r2, #0
itte eq
moveq r2, r3 @ the end is in the 2nd word
subeq r1,r1,#3
subne r1,r1,#7
@ r1 currently points to the 2nd byte of the word containing the 0
tst r2, # CHARTSTMASK(0) @ 1st character
bne 10f
adds r1,r1,#1
tst r2, # CHARTSTMASK(1) @ 2nd character
ittt eq
addeq r1,r1,#1
tsteq r2, # (3<<15) @ 2nd & 3rd character
@ If not the 3rd must be the last one
addeq r1,r1,#1
10:
@ r0 is still at the beginning, r1 is pointing 1 byte after the nul
sub r0, r1, r0
subs r0, r0, #1
pop { r4, r6 }
bx lr
#endif

View File

@ -60,6 +60,8 @@ strlen (const char* str)
}
#else
#if !(defined(_ISA_ARM_7) || defined(__ARM_ARCH_6T2__))
size_t __attribute__((naked))
strlen (const char* str)
{
@ -178,3 +180,4 @@ strlen (const char* str)
"RETURN");
}
#endif
#endif