diff options
author | Matthew Gretton-Dann <matthew.gretton-dann@linaro.org> | 2013-01-16 20:57:30 +0000 |
---|---|---|
committer | Matthew Gretton-Dann <matthew.gretton-dann@linaro.org> | 2013-01-16 20:57:30 +0000 |
commit | 3f92e22221aff7771cf4f6e257b8aea30fa68424 (patch) | |
tree | 0fd904f53592515319189856357d54ac5aca1fca | |
parent | 58a5bb3790167e2599888e7a38139f3868ac6240 (diff) | |
parent | 9426e0d845bdc14aac7509cd7859e39f96d4de7f (diff) |
Merge two bug fixes strnlen and strncmp.
-rw-r--r-- | src/aarch64/strncmp.S | 31 | ||||
-rw-r--r-- | src/aarch64/strnlen.S | 39 |
2 files changed, 47 insertions, 23 deletions
diff --git a/src/aarch64/strncmp.S b/src/aarch64/strncmp.S index 16c51ee..2136787 100644 --- a/src/aarch64/strncmp.S +++ b/src/aarch64/strncmp.S @@ -77,8 +77,10 @@ def_fn strncmp b.ne .Lmisaligned8 ands tmp1, src1, #7 b.ne .Lmutual_align - add limit_wd, limit, #7 - lsr limit_wd, limit_wd, #3 + /* Calculate the number of full and partial words -1. */ + sub limit_wd, limit, #1 /* limit != 0, so no underflow. */ + lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */ + /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and can be done in parallel across the entire word. */ @@ -91,14 +93,14 @@ def_fn strncmp sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f eor diff, data1, data2 /* Non-zero if differences found. */ - csinv endloop, diff, xzr, ne /* Last Dword or differences. */ + csinv endloop, diff, xzr, pl /* Last Dword or differences. */ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ ccmp endloop, #0, #0, eq b.eq .Lloop_aligned /* End of performance-critical section -- one 64B cache line. */ /* Not reached the limit, must have found the end or a diff. */ - cbnz limit_wd, .Lnot_limit + tbz limit_wd, #63, .Lnot_limit /* Limit % 8 == 0 => all bytes significant. */ ands limit, limit, #7 @@ -173,26 +175,31 @@ def_fn strncmp .Lmutual_align: /* Sources are mutually aligned, but are not currently at an alignment boundary. Round down the addresses and then mask off - the bytes that precede the start point. */ + the bytes that precede the start point. + We also need to adjust the limit calculations, but without + overflowing if the limit is near ULONG_MAX. */ bic src1, src1, #7 bic src2, src2, #7 - add limit, limit, tmp1 /* Adjust the limit for the extra. */ - lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */ ldr data1, [src1], #8 - neg tmp1, tmp1 /* Bits to alignment -64. */ + neg tmp3, tmp1, lsl #3 /* 64 - bits(bytes beyond align). */ ldr data2, [src2], #8 mov tmp2, #~0 + sub limit_wd, limit, #1 /* limit != 0, so no underflow. */ #ifdef __AARCH64EB__ /* Big-endian. Early bytes are at MSB. */ - lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */ + lsl tmp2, tmp2, tmp3 /* Shift (tmp1 & 63). */ #else /* Little-endian. Early bytes are at LSB. */ - lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */ + lsr tmp2, tmp2, tmp3 /* Shift (tmp1 & 63). */ #endif - add limit_wd, limit, #7 + and tmp3, limit_wd, #7 + lsr limit_wd, limit_wd, #3 + /* Adjust the limit. Only low 3 bits used, so overflow irrelevant. */ + add limit, limit, tmp1 + add tmp3, tmp3, tmp1 orr data1, data1, tmp2 orr data2, data2, tmp2 - lsr limit_wd, limit_wd, #3 + add limit_wd, limit_wd, tmp3, lsr #3 b .Lstart_realigned .Lret0: diff --git a/src/aarch64/strnlen.S b/src/aarch64/strnlen.S index 058fae2..c0e6098 100644 --- a/src/aarch64/strnlen.S +++ b/src/aarch64/strnlen.S @@ -81,8 +81,10 @@ def_fn strnlen bic src, srcin, #15 ands tmp1, srcin, #15 b.ne .Lmisaligned - add limit_wd, limit, #15 - lsr limit_wd, limit_wd, #4 + /* Calculate the number of full and partial words -1. */ + sub limit_wd, limit, #1 /* Limit != 0, so no underflow. */ + lsr limit_wd, limit_wd, #4 /* Convert to Qwords. */ + /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and can be done in parallel across the entire word. */ @@ -103,7 +105,7 @@ def_fn strnlen bic has_nul2, tmp3, tmp4 subs limit_wd, limit_wd, #1 orr tmp1, has_nul1, has_nul2 - ccmp tmp1, #0, #0, ne /* NZCV = 0000 */ + ccmp tmp1, #0, #0, pl /* NZCV = 0000 */ b.eq .Lloop /* End of critical section -- keep to one 64Byte cache line. */ @@ -141,23 +143,38 @@ def_fn strnlen ret .Lmisaligned: - add tmp3, limit, tmp1 + /* Deal with a partial first word. + We're doing two things in parallel here; + 1) Calculate the number of words (but avoiding overflow if + limit is near ULONG_MAX) - to do this we need to work out + limit + tmp1 - 1 as a 65-bit value before shifting it; + 2) Load and mask the initial data words - we force the bytes + before the ones we are interested in to 0xff - this ensures + early bytes will not hit any zero detection. */ + sub limit_wd, limit, #1 + neg tmp4, tmp1 cmp tmp1, #8 - neg tmp1, tmp1 - ldp data1, data2, [src], #16 - add limit_wd, tmp3, #15 - lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */ - mov tmp2, #~0 + + and tmp3, limit_wd, #15 lsr limit_wd, limit_wd, #4 + mov tmp2, #~0 + + ldp data1, data2, [src], #16 + lsl tmp4, tmp4, #3 /* Bytes beyond alignment -> bits. */ + add tmp3, tmp3, tmp1 + #ifdef __AARCH64EB__ /* Big-endian. Early bytes are at MSB. */ - lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */ + lsl tmp2, tmp2, tmp4 /* Shift (tmp1 & 63). */ #else /* Little-endian. Early bytes are at LSB. */ - lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */ + lsr tmp2, tmp2, tmp4 /* Shift (tmp1 & 63). */ #endif + add limit_wd, limit_wd, tmp3, lsr #4 + orr data1, data1, tmp2 orr data2a, data2, tmp2 + csinv data1, data1, xzr, le csel data2, data2, data2a, le b .Lrealigned |