From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 57513 invoked by alias); 18 Aug 2017 05:13:15 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Received: (qmail 57478 invoked by uid 89); 18 Aug 2017 05:13:13 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-26.0 required=5.0 tests=AWL,BAYES_00,GIT_PATCH_0,GIT_PATCH_1,GIT_PATCH_2,GIT_PATCH_3,KAM_LAZY_DOMAIN_SECURITY,RCVD_IN_DNSWL_LOW autolearn=ham version=3.3.2 spammy= X-HELO: mx0a-001b2d01.pphosted.com From: Rajalakshmi Srinivasaraghavan To: libc-alpha@sourceware.org Cc: Rajalakshmi Srinivasaraghavan Subject: [PATCH] powerpc: Use aligned stores in memset Date: Fri, 18 Aug 2017 05:13:00 -0000 X-TM-AS-MML: disable x-cbid: 17081805-0040-0000-0000-0000034FF87B X-IBM-AV-DETECTION: SAVI=unused REMOTE=unused XFE=unused x-cbparentid: 17081805-0041-0000-0000-00000CCDBB5F Message-Id: <1503033107-20047-1-git-send-email-raji@linux.vnet.ibm.com> X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10432:,, definitions=2017-08-18_01:,, signatures=0 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 spamscore=0 suspectscore=1 malwarescore=0 phishscore=0 adultscore=0 bulkscore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.0.1-1707230000 definitions=main-1708180082 X-SW-Source: 2017-08/txt/msg00838.txt.bz2 The powerpc hardware does not allow unaligned accesses on non cacheable memory. This patch avoids misaligned stores for sizes less than 8 in memset to avoid such cases. Tested on powerpc64 and powerpc64le. 2017-08-17 Rajalakshmi Srinivasaraghavan * sysdeps/powerpc/powerpc64/power8/memset.S: Store byte by byte for unaligned inputs if size is less than 8. --- sysdeps/powerpc/powerpc64/power8/memset.S | 68 ++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/sysdeps/powerpc/powerpc64/power8/memset.S b/sysdeps/powerpc/powerpc64/power8/memset.S index 7ad3bb1b00..504bab0841 100644 --- a/sysdeps/powerpc/powerpc64/power8/memset.S +++ b/sysdeps/powerpc/powerpc64/power8/memset.S @@ -377,7 +377,8 @@ L(write_LT_32): subf r5,r0,r5 2: bf 30,1f - sth r4,0(r10) + stb r4,0(r10) + stb r4,1(r10) addi r10,r10,2 1: bf 31,L(end_4bytes_alignment) @@ -437,11 +438,74 @@ L(tail5): /* Handles copies of 0~8 bytes. */ .align 4 L(write_LE_8): - bne cr6,L(tail4) + /* Use stb instead of sth which is safe for + both aligned and unaligned inputs. */ + bne cr6,L(LE7_tail4) + /* If input is word aligned, use stw, Else use stb. */ + andi. r0,r10,3 + bne L(8_unalign) stw r4,0(r10) stw r4,4(r10) blr + + /* Unaligned input and size is 8. */ + .align 4 +L(8_unalign): + andi. r0,r10,1 + beq L(8_hwalign) + stb r4,0(r10) + sth r4,1(r10) + sth r4,3(r10) + sth r4,5(r10) + stb r4,7(r10) + blr + + /* Halfword aligned input and size is 8. */ + .align 4 +L(8_hwalign): + sth r4,0(r10) + sth r4,2(r10) + sth r4,4(r10) + sth r4,6(r10) + blr + + .align 4 + /* Copies 4~7 bytes. */ +L(LE7_tail4): + bf 29,L(LE7_tail2) + stb r4,0(r10) + stb r4,1(r10) + stb r4,2(r10) + stb r4,3(r10) + bf 30,L(LE7_tail5) + stb r4,4(r10) + stb r4,5(r10) + bflr 31 + stb r4,6(r10) + blr + + .align 4 + /* Copies 2~3 bytes. */ +L(LE7_tail2): + bf 30,1f + stb r4,0(r10) + stb r4,1(r10) + bflr 31 + stb r4,2(r10) + blr + + .align 4 +L(LE7_tail5): + bflr 31 + stb r4,4(r10) + blr + + .align 4 +1: bflr 31 + stb r4,0(r10) + blr + END_GEN_TB (MEMSET,TB_TOCLESS) libc_hidden_builtin_def (memset) -- 2.11.0