From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 97595 invoked by alias); 19 Dec 2019 19:44:49 -0000 Mailing-List: contact glibc-cvs-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: , Sender: glibc-cvs-owner@sourceware.org List-Subscribe: Received: (qmail 97459 invoked by uid 9943); 19 Dec 2019 19:44:49 -0000 Date: Thu, 19 Dec 2019 19:44:00 -0000 Message-ID: <20191219194449.97457.qmail@sourceware.org> Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Adhemerval Zanella To: glibc-cvs@sourceware.org Subject: [glibc] aarch64: Optimized implementation of strnlen X-Act-Checkin: glibc X-Git-Author: Xuelei Zhang X-Git-Refname: refs/heads/master X-Git-Oldrev: 0237b61526e716fa9597f521643908a4fda3b46a X-Git-Newrev: 2911cb68ed3d6c515ad1979237e74e1fefab3674 X-SW-Source: 2019-q4/txt/msg00638.txt.bz2 https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=2911cb68ed3d6c515ad1979237e74e1fefab3674 commit 2911cb68ed3d6c515ad1979237e74e1fefab3674 Author: Xuelei Zhang Date: Thu Dec 19 13:49:46 2019 +0000 aarch64: Optimized implementation of strnlen Optimize the strlen implementation by using vector operations and loop unrooling in main loop. Compared to aarch64/strnlen.S, it reduces latency of cases in bench-strnlen by 11%~24% when the length of src is greater than 64 bytes, with gains throughout the benchmark. Checked on aarch64-linux-gnu. Reviewed-by: Wilco Dijkstra Diff: --- sysdeps/aarch64/strnlen.S | 52 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/sysdeps/aarch64/strnlen.S b/sysdeps/aarch64/strnlen.S index 70283c8..a57753b 100644 --- a/sysdeps/aarch64/strnlen.S +++ b/sysdeps/aarch64/strnlen.S @@ -45,6 +45,11 @@ #define pos x13 #define limit_wd x14 +#define dataq q2 +#define datav v2 +#define datab2 b3 +#define dataq2 q3 +#define datav2 v3 #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 @@ -71,7 +76,7 @@ ENTRY_ALIGN_AND_PAD (__strnlen, 6, 9) cycle, as we get much better parallelism out of the operations. */ /* Start of critial section -- keep to one 64Byte cache line. */ -L(loop): + ldp data1, data2, [src], #16 L(realigned): sub tmp1, data1, zeroones @@ -119,6 +124,51 @@ L(nul_in_data2): csel len, len, limit, ls /* Return the lower value. */ RET +L(loop): + ldr dataq, [src], #16 + uminv datab2, datav.16b + mov tmp1, datav2.d[0] + subs limit_wd, limit_wd, #1 + ccmp tmp1, #0, #4, pl /* NZCV = 0000 */ + b.eq L(loop_end) + ldr dataq, [src], #16 + uminv datab2, datav.16b + mov tmp1, datav2.d[0] + subs limit_wd, limit_wd, #1 + ccmp tmp1, #0, #4, pl /* NZCV = 0000 */ + b.ne L(loop) +L(loop_end): + /* End of critical section -- keep to one 64Byte cache line. */ + + cbnz tmp1, L(hit_limit) /* No null in final Qword. */ + + /* We know there's a null in the final Qword. The easiest thing + to do now is work out the length of the string and return + MIN (len, limit). */ + +#ifdef __AARCH64EB__ + rev64 datav.16b, datav.16b +#endif + /* Set te NULL byte as 0xff and the rest as 0x00, move the data into a + pair of scalars and then compute the length from the earliest NULL + byte. */ + + cmeq datav.16b, datav.16b, #0 + mov data1, datav.d[0] + mov data2, datav.d[1] + cmp data1, 0 + csel data1, data1, data2, ne + sub len, src, srcin + sub len, len, #16 + rev data1, data1 + add tmp2, len, 8 + clz tmp1, data1 + csel len, len, tmp2, ne + add len, len, tmp1, lsr 3 + cmp len, limit + csel len, len, limit, ls /* Return the lower value. */ + RET + L(misaligned): /* Deal with a partial first word. We're doing two things in parallel here;