From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 48) id B333E3858C31; Wed, 11 Jan 2023 19:04:00 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org B333E3858C31 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gcc.gnu.org; s=default; t=1673463840; bh=qCS91wIOsbX/kpZ2zM8mjyHWS/duxnsEF/e4VwE0rDI=; h=From:To:Subject:Date:In-Reply-To:References:From; b=RB+HwSEHYI1suZfZ84ObO4f4X9tfpz3RlrIn3E/rh4dsKdcf+fdf8KkuFt89wGxPA pAwdccnCfQav4hmPsJb+5ZSt9hiq64iva/yF9YoNNX2cfzuqcs1ENFHaWcpOtK+nLe 6a5eQQsMSwSCb90SqIvG0blbEjZul1D3JEecwX8Y= From: "hubicka at gcc dot gnu.org" To: gcc-bugs@gcc.gnu.org Subject: [Bug tree-optimization/99408] s3251 benchmark of TSVC vectorized by clang runs about 7 times faster compared to gcc Date: Wed, 11 Jan 2023 19:03:59 +0000 X-Bugzilla-Reason: CC X-Bugzilla-Type: changed X-Bugzilla-Watch-Reason: None X-Bugzilla-Product: gcc X-Bugzilla-Component: tree-optimization X-Bugzilla-Version: 11.0 X-Bugzilla-Keywords: missed-optimization X-Bugzilla-Severity: enhancement X-Bugzilla-Who: hubicka at gcc dot gnu.org X-Bugzilla-Status: UNCONFIRMED X-Bugzilla-Resolution: X-Bugzilla-Priority: P3 X-Bugzilla-Assigned-To: unassigned at gcc dot gnu.org X-Bugzilla-Target-Milestone: --- X-Bugzilla-Flags: X-Bugzilla-Changed-Fields: Message-ID: In-Reply-To: References: Content-Type: text/plain; charset="UTF-8" Content-Transfer-Encoding: quoted-printable X-Bugzilla-URL: http://gcc.gnu.org/bugzilla/ Auto-Submitted: auto-generated MIME-Version: 1.0 List-Id: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=3D99408 --- Comment #3 from Jan Hubicka --- with zen4 gcc build loop takes 19s, while aocc 6.6. aocc: .LBB0_1: # %for.cond22.preheader # =3D>This Loop Header: Depth=3D1 # Child Loop BB0_2 Depth 2 vbroadcastss a(%rip), %zmm20 xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_2: # %vector.body # Parent Loop BB0_1 Depth=3D1 # =3D> This Inner Loop Header: Dep= th=3D2 vmovups c(%rcx), %zmm13 vmovaps %zmm20, %zmm12 vmovups e(%rcx), %zmm0 vaddps b(%rcx), %zmm13, %zmm20 vmulps %zmm13, %zmm0, %zmm13 vmovaps %zmm20, %zmm15 vpermt2ps %zmm12, %zmm29, %zmm15 vmovups %zmm20, a+4(%rcx) vmovups %zmm13, b(%rcx) vmulps %zmm0, %zmm15, %zmm12 vmovups %zmm12, d(%rcx) addq $64, %rcx cmpq $127936, %rcx # imm =3D 0x1F3C0 jne .LBB0_2 # %bb.3: # %middle.block vextractf32x4 $3, %zmm20, %xmm5 vmovss -4(%rsp), %xmm2 # 4-byte Reload # xmm2 =3D mem[0],zero,zero,zero vmovss -12(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero incl %eax vaddss b+127936(%rip), %xmm2, %xmm2 vpermilps $231, %xmm5, %xmm5 # xmm5 =3D xmm5[3,1,2,3] vmulss -8(%rsp), %xmm5, %xmm5 # 4-byte Folded Reload vmovss %xmm0, b+127936(%rip) vmovss -16(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vmovss %xmm2, a+127940(%rip) vmulss -20(%rsp), %xmm2, %xmm2 # 4-byte Folded Reload vmovss %xmm5, d+127936(%rip) vaddss b+127940(%rip), %xmm0, %xmm5 vmovss -24(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vmovss %xmm0, b+127940(%rip) vmovss -28(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vmovss %xmm2, d+127940(%rip) vaddss b+127944(%rip), %xmm0, %xmm2 vmovss -36(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vmovss %xmm5, a+127944(%rip) vmulss -32(%rsp), %xmm5, %xmm5 # 4-byte Folded Reload vmovss %xmm0, b+127944(%rip) vmovss -40(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vmovss %xmm2, a+127948(%rip) vmulss %xmm22, %xmm2, %xmm2 vmovss %xmm5, d+127944(%rip) vaddss b+127948(%rip), %xmm21, %xmm5 vmovss %xmm2, d+127948(%rip) vmovss %xmm0, b+127948(%rip) vmovss -44(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127952(%rip), %xmm24, %xmm2 vmovss %xmm5, a+127952(%rip) vmulss %xmm25, %xmm5, %xmm5 vmovss %xmm0, b+127952(%rip) vmovss -48(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vmovss %xmm5, d+127952(%rip) vaddss b+127956(%rip), %xmm27, %xmm5 vmovss %xmm2, a+127956(%rip) vmulss %xmm28, %xmm2, %xmm2 vmovss %xmm2, d+127956(%rip) vmovss %xmm0, b+127956(%rip) vmovss -52(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127960(%rip), %xmm30, %xmm2 vmovss %xmm5, a+127960(%rip) vmulss %xmm31, %xmm5, %xmm5 vmovss %xmm5, d+127960(%rip) vmovss %xmm0, b+127960(%rip) vmovss -56(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127964(%rip), %xmm16, %xmm5 vmovss %xmm2, a+127964(%rip) vmulss %xmm18, %xmm2, %xmm2 vmovss %xmm2, d+127964(%rip) vmovss %xmm0, b+127964(%rip) vmovss -60(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127968(%rip), %xmm19, %xmm2 vmovss %xmm5, a+127968(%rip) vmulss %xmm1, %xmm5, %xmm5 vmovss %xmm5, d+127968(%rip) vmovss %xmm0, b+127968(%rip) vmovss -64(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127972(%rip), %xmm3, %xmm5 vmovss %xmm2, a+127972(%rip) vmulss %xmm4, %xmm2, %xmm2 vmovss %xmm2, d+127972(%rip) vmovss %xmm0, b+127972(%rip) vmovss -68(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127976(%rip), %xmm6, %xmm2 vmovss %xmm5, a+127976(%rip) vmulss %xmm7, %xmm5, %xmm5 vmovss %xmm5, d+127976(%rip) vmovss %xmm0, b+127976(%rip) vmovss -72(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127980(%rip), %xmm9, %xmm5 vmovss %xmm2, a+127980(%rip) vmulss %xmm2, %xmm10, %xmm2 vmovss %xmm2, d+127980(%rip) vmovss %xmm0, b+127980(%rip) vmovss -76(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127984(%rip), %xmm14, %xmm2 vmovss %xmm5, a+127984(%rip) vmulss %xmm17, %xmm5, %xmm5 vmovss %xmm5, d+127984(%rip) vmovss %xmm0, b+127984(%rip) vmovss -80(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vaddss b+127988(%rip), %xmm23, %xmm5 vmovss %xmm2, a+127988(%rip) vmulss %xmm26, %xmm2, %xmm2 vmovss %xmm2, d+127988(%rip) vmovss %xmm0, b+127988(%rip) vaddss b+127992(%rip), %xmm8, %xmm2 vmulss %xmm5, %xmm11, %xmm0 vmovss %xmm5, a+127992(%rip) vmovss %xmm0, d+127992(%rip) vmovss -84(%rsp), %xmm0 # 4-byte Reload # xmm0 =3D mem[0],zero,zero,zero vmovss %xmm2, a+127996(%rip) vmovss %xmm0, b+127992(%rip) cmpl $1000000, %eax # imm =3D 0xF4240 jne .LBB0_1 gcc: .L2: vmovdqa32 %zmm5, %zmm1 addq $320, %rax vpaddd %zmm2, %zmm5, %zmm5 vmovdqa32 %zmm6, %zmm0 vpaddd %zmm2, %zmm6, %zmm6 vpaddd %zmm24, %zmm1, %zmm25 vpaddd %zmm23, %zmm1, %zmm1 valignq $3, %ymm25, %ymm25, %ymm26 vmovq %xmm25, -320(%rax) vpextrq $1, %xmm25, -300(%rax) vmovq %xmm1, -160(%rax) vpextrq $1, %xmm1, -140(%rax) vextracti64x2 $1, %ymm25, %xmm27 vextracti64x4 $0x1, %zmm25, %ymm25 vmovq %xmm26, -260(%rax) vmovq %xmm25, -240(%rax) vpextrq $1, %xmm25, -220(%rax) vextracti64x2 $1, %ymm25, %xmm26 vmovq %xmm27, -280(%rax) valignq $3, %ymm25, %ymm25, %ymm25 vmovq %xmm26, -200(%rax) vmovq %xmm25, -180(%rax) valignq $3, %ymm1, %ymm1, %ymm25 vextracti64x2 $1, %ymm1, %xmm26 vextracti64x4 $0x1, %zmm1, %ymm1 vmovq %xmm25, -100(%rax) vmovq %xmm1, -80(%rax) vpextrq $1, %xmm1, -60(%rax) vextracti64x2 $1, %ymm1, %xmm25 vmovq %xmm26, -120(%rax) vmovdqa32 %zmm0, %zmm26 valignq $3, %ymm1, %ymm1, %ymm1 vmovq %xmm25, -40(%rax) vpaddd %zmm3, %zmm0, %zmm25 vmovq %xmm1, -20(%rax) vpaddd %zmm4, %zmm0, %zmm1 vpermt2d %zmm1, %zmm22, %zmm26 vmovq %xmm26, -312(%rax) vmovdqa32 %zmm0, %zmm26 vpermt2d %zmm1, %zmm21, %zmm26 vmovq %xmm26, %rdx vmovdqa32 %zmm0, %zmm26 movq %rdx, -292(%rax) vpermt2d %zmm1, %zmm20, %zmm26 vmovq %xmm26, -272(%rax) vmovdqa32 %zmm0, %zmm26 vpermt2d %zmm1, %zmm19, %zmm26 vmovq %xmm26, %rdx vmovdqa32 %zmm0, %zmm26 movq %rdx, -252(%rax) vpermt2d %zmm1, %zmm18, %zmm26 vmovq %xmm26, -232(%rax) vmovdqa32 %zmm0, %zmm26 vpermt2d %zmm1, %zmm17, %zmm26 vmovq %xmm26, %rdx vmovdqa32 %zmm0, %zmm26 movq %rdx, -212(%rax) vpermt2d %zmm1, %zmm16, %zmm26 vmovq %xmm26, -192(%rax) vmovdqa32 %zmm0, %zmm26 vpermt2d %zmm1, %zmm15, %zmm26 vmovq %xmm26, %rdx vmovdqa32 %zmm0, %zmm26 movq %rdx, -172(%rax) vpermt2d %zmm1, %zmm14, %zmm26 vmovq %xmm26, -152(%rax) vmovdqa32 %zmm0, %zmm26 vpermt2d %zmm1, %zmm13, %zmm26 vmovq %xmm26, %rdx vmovdqa32 %zmm0, %zmm26 movq %rdx, -132(%rax) vpermt2d %zmm1, %zmm12, %zmm26 vmovq %xmm26, -112(%rax) vmovdqa32 %zmm0, %zmm26 vpermt2d %zmm1, %zmm11, %zmm26 vmovq %xmm26, %rdx vmovdqa32 %zmm0, %zmm26 movq %rdx, -92(%rax) vpermt2d %zmm1, %zmm10, %zmm26 vmovq %xmm26, -72(%rax) vmovdqa32 %zmm0, %zmm26 vpermt2d %zmm1, %zmm9, %zmm26 vmovq %xmm26, %rdx vmovdqa32 %zmm0, %zmm26 vpermt2d %zmm1, %zmm7, %zmm0 vmovq %xmm0, -12(%rax) movq %rdx, -52(%rax) vmovdqa32 %ymm25, %ymm0 vpermt2d %zmm1, %zmm8, %zmm26 vextracti32x4 $1, %ymm25, %xmm1 vmovq %xmm26, -32(%rax) vmovd %xmm25, -304(%rax) vpextrd $1, %xmm0, -284(%rax) vpextrd $2, %xmm0, -264(%rax) vmovd %xmm1, -224(%rax) valignd $5, %ymm25, %ymm25, %ymm1 vpextrd $3, %xmm0, -244(%rax) valignd $7, %ymm25, %ymm25, %ymm0 vmovd %xmm1, -204(%rax) valignd $6, %ymm25, %ymm25, %ymm1 vmovd %xmm0, -164(%rax) vextracti32x8 $0x1, %zmm25, %ymm0 vmovd %xmm0, -144(%rax) vpextrd $1, %xmm0, -124(%rax) vmovd %xmm1, -184(%rax) vextracti32x4 $1, %ymm0, %xmm1 vpextrd $2, %xmm0, -104(%rax) vpextrd $3, %xmm0, -84(%rax) vmovd %xmm1, -64(%rax) valignd $5, %ymm0, %ymm0, %ymm1 vmovd %xmm1, -44(%rax) valignd $6, %ymm0, %ymm0, %ymm1 valignd $7, %ymm0, %ymm0, %ymm0 vmovd %xmm1, -24(%rax) vmovd %xmm0, -4(%rax) cmpq %rcx, %rax jne .L2=