public inbox for gcc-bugs@sourceware.org
help / color / mirror / Atom feed
* [Bug target/98934] New: Very poor code generation for SSE 8-bit vector right shift
@ 2021-02-02 15:39 gabravier at gmail dot com
2021-02-02 16:05 ` [Bug target/98934] " rguenth at gcc dot gnu.org
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: gabravier at gmail dot com @ 2021-02-02 15:39 UTC (permalink / raw)
To: gcc-bugs
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98934
Bug ID: 98934
Summary: Very poor code generation for SSE 8-bit vector right
shift
Product: gcc
Version: 11.0
Status: UNCONFIRMED
Severity: normal
Priority: P3
Component: target
Assignee: unassigned at gcc dot gnu.org
Reporter: gabravier at gmail dot com
Target Milestone: ---
typedef char __attribute__((vector_size(16))) v16i8;
v16i8 f(v16i8 x, v16i8 y)
{
return x >> y;
}
With -O3, LLVM outputs this:
f(char __vector(16), char __vector(16)):
punpckhbw xmm2, xmm0 # xmm2 =
xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
psllw xmm1, 5
punpckhbw xmm4, xmm1 # xmm4 =
xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
pxor xmm3, xmm3
pxor xmm5, xmm5
pcmpgtw xmm5, xmm4
movdqa xmm6, xmm5
pandn xmm6, xmm2
psraw xmm2, 4
pand xmm2, xmm5
por xmm2, xmm6
paddw xmm4, xmm4
pxor xmm5, xmm5
pcmpgtw xmm5, xmm4
movdqa xmm6, xmm5
pandn xmm6, xmm2
psraw xmm2, 2
pand xmm2, xmm5
por xmm2, xmm6
paddw xmm4, xmm4
pxor xmm5, xmm5
pcmpgtw xmm5, xmm4
movdqa xmm4, xmm5
pandn xmm4, xmm2
psraw xmm2, 1
pand xmm2, xmm5
por xmm2, xmm4
psrlw xmm2, 8
punpcklbw xmm0, xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklbw xmm1, xmm1 # xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pxor xmm4, xmm4
pcmpgtw xmm4, xmm1
movdqa xmm5, xmm4
pandn xmm5, xmm0
psraw xmm0, 4
pand xmm0, xmm4
por xmm0, xmm5
paddw xmm1, xmm1
pxor xmm4, xmm4
pcmpgtw xmm4, xmm1
movdqa xmm5, xmm4
pandn xmm5, xmm0
psraw xmm0, 2
pand xmm0, xmm4
por xmm0, xmm5
paddw xmm1, xmm1
pcmpgtw xmm3, xmm1
movdqa xmm1, xmm3
pandn xmm1, xmm0
psraw xmm0, 1
pand xmm0, xmm3
por xmm0, xmm1
psrlw xmm0, 8
packuswb xmm0, xmm2
ret
GCC outputs this:
f(char __vector(16), char __vector(16)):
push r15
movd edx, xmm0
movd ecx, xmm1
push r14
sar dl, cl
push r13
movzx edx, dl
push r12
push rbp
push rbx
sub rsp, 400
movaps XMMWORD PTR [rsp+376], xmm0
movzx ebx, BYTE PTR [rsp+377]
movaps XMMWORD PTR [rsp+360], xmm1
movzx ecx, BYTE PTR [rsp+361]
movaps XMMWORD PTR [rsp+344], xmm0
movzx ebp, BYTE PTR [rsp+346]
sar bl, cl
movaps XMMWORD PTR [rsp+328], xmm1
movzx ecx, BYTE PTR [rsp+330]
movaps XMMWORD PTR [rsp+312], xmm0
movzx ebx, bl
movzx r12d, BYTE PTR [rsp+315]
sar bpl, cl
movaps XMMWORD PTR [rsp+296], xmm1
movzx ecx, BYTE PTR [rsp+299]
movaps XMMWORD PTR [rsp+280], xmm0
movzx ebp, bpl
movzx r13d, BYTE PTR [rsp+284]
sar r12b, cl
movaps XMMWORD PTR [rsp+264], xmm1
movzx ecx, BYTE PTR [rsp+268]
movaps XMMWORD PTR [rsp+248], xmm0
movzx r12d, r12b
movzx r14d, BYTE PTR [rsp+253]
sar r13b, cl
movaps XMMWORD PTR [rsp+232], xmm1
movzx ecx, BYTE PTR [rsp+237]
movaps XMMWORD PTR [rsp+216], xmm0
movzx r13d, r13b
movzx r15d, BYTE PTR [rsp+222]
sar r14b, cl
movaps XMMWORD PTR [rsp+200], xmm1
movzx ecx, BYTE PTR [rsp+206]
movaps XMMWORD PTR [rsp+184], xmm0
movzx r14d, r14b
movaps XMMWORD PTR [rsp+168], xmm1
sar r15b, cl
movzx eax, BYTE PTR [rsp+191]
movzx ecx, BYTE PTR [rsp+175]
movaps XMMWORD PTR [rsp+152], xmm0
movzx esi, BYTE PTR [rsp+160]
movzx r15d, r15b
sar al, cl
movaps XMMWORD PTR [rsp+136], xmm1
movzx ecx, BYTE PTR [rsp+144]
movaps XMMWORD PTR [rsp+120], xmm0
movzx edi, BYTE PTR [rsp+129]
sar sil, cl
movaps XMMWORD PTR [rsp+104], xmm1
movzx ecx, BYTE PTR [rsp+113]
movaps XMMWORD PTR [rsp+88], xmm0
sar dil, cl
mov BYTE PTR [rsp-89], sil
movaps XMMWORD PTR [rsp+72], xmm1
movzx esi, dil
movzx ecx, BYTE PTR [rsp+82]
movzx edi, BYTE PTR [rsp+98]
movaps XMMWORD PTR [rsp+56], xmm0
movzx r8d, BYTE PTR [rsp+67]
sar dil, cl
movaps XMMWORD PTR [rsp+40], xmm1
movzx ecx, BYTE PTR [rsp+51]
movaps XMMWORD PTR [rsp+24], xmm0
movzx r9d, BYTE PTR [rsp+36]
movzx edi, dil
sar r8b, cl
movaps XMMWORD PTR [rsp+8], xmm1
movzx ecx, BYTE PTR [rsp+20]
movaps XMMWORD PTR [rsp-8], xmm0
movzx r10d, BYTE PTR [rsp+5]
movzx r8d, r8b
sar r9b, cl
movaps XMMWORD PTR [rsp-24], xmm1
movzx ecx, BYTE PTR [rsp-11]
movaps XMMWORD PTR [rsp-40], xmm0
movzx r11d, BYTE PTR [rsp-26]
movzx r9d, r9b
sar r10b, cl
movaps XMMWORD PTR [rsp-56], xmm1
movzx ecx, BYTE PTR [rsp-42]
mov BYTE PTR [rsp-120], al
movzx r10d, r10b
movaps XMMWORD PTR [rsp-72], xmm0
sar r11b, cl
movzx eax, BYTE PTR [rsp-57]
movaps XMMWORD PTR [rsp-88], xmm1
movzx ecx, BYTE PTR [rsp-73]
movzx r11d, r11b
sar al, cl
movzx ecx, al
movzx eax, BYTE PTR [rsp-120]
sal rcx, 8
sal rax, 8
or rcx, r11
or rax, r15
sal rax, 8
or rax, r14
sal rax, 8
or rax, r13
sal rax, 8
or rax, r12
sal rax, 8
or rax, rbp
sal rax, 8
or rax, rbx
movzx ebx, BYTE PTR [rsp-89]
sal rax, 8
sal rcx, 8
or rcx, r10
or rax, rdx
sal rcx, 8
mov QWORD PTR [rsp-120], rax
or rcx, r9
sal rcx, 8
or rcx, r8
sal rcx, 8
or rcx, rdi
sal rcx, 8
or rcx, rsi
sal rcx, 8
or rcx, rbx
mov QWORD PTR [rsp-112], rcx
movdqa xmm0, XMMWORD PTR [rsp-120]
add rsp, 400
pop rbx
pop rbp
pop r12
pop r13
pop r14
pop r15
ret
I have not studied the generated code much, but I can tell at a glance that
GCC's code spills to memory and does the entire operation with normal
registers, whereas LLVM manages to keep it all in xmm registers, no spills to
memory and 3 times less assembly code.
^ permalink raw reply [flat|nested] 5+ messages in thread
* [Bug target/98934] Very poor code generation for SSE 8-bit vector right shift
2021-02-02 15:39 [Bug target/98934] New: Very poor code generation for SSE 8-bit vector right shift gabravier at gmail dot com
@ 2021-02-02 16:05 ` rguenth at gcc dot gnu.org
2021-08-25 5:26 ` pinskia at gcc dot gnu.org
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: rguenth at gcc dot gnu.org @ 2021-02-02 16:05 UTC (permalink / raw)
To: gcc-bugs
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98934
Richard Biener <rguenth at gcc dot gnu.org> changed:
What |Removed |Added
----------------------------------------------------------------------------
Ever confirmed|0 |1
Status|UNCONFIRMED |NEW
Last reconfirmed| |2021-02-02
--- Comment #1 from Richard Biener <rguenth at gcc dot gnu.org> ---
Uh, it's a very inefficient lowering as well, relying on constant amount
vector word right-shift. But yeah, fancy at least ;)
^ permalink raw reply [flat|nested] 5+ messages in thread
* [Bug target/98934] Very poor code generation for SSE 8-bit vector right shift
2021-02-02 15:39 [Bug target/98934] New: Very poor code generation for SSE 8-bit vector right shift gabravier at gmail dot com
2021-02-02 16:05 ` [Bug target/98934] " rguenth at gcc dot gnu.org
@ 2021-08-25 5:26 ` pinskia at gcc dot gnu.org
2021-08-25 5:29 ` pinskia at gcc dot gnu.org
2021-08-25 7:35 ` crazylht at gmail dot com
3 siblings, 0 replies; 5+ messages in thread
From: pinskia at gcc dot gnu.org @ 2021-08-25 5:26 UTC (permalink / raw)
To: gcc-bugs
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98934
--- Comment #2 from Andrew Pinski <pinskia at gcc dot gnu.org> ---
This is really poor with -mavx512f even. We should be able to do it like (which
is what LLVM does):
vpmovzxbd %xmm1, %zmm1
vpmovzxbd %xmm0, %zmm0
vpsravd %zmm1, %zmm0, %zmm0
vpmovdb %zmm0, %xmm0
Basically zero extend it out to from char to int and then do the shift and then
truncate back down to char.
Which we can emulate:
typedef char __attribute__((vector_size(16))) v16i8;
typedef int __attribute__((vector_size(16*sizeof(int)))) v16i32;
typedef int __attribute__((vector_size(4*sizeof(int)))) v4i32;
typedef char __attribute__((vector_size(4))) v4i8;
v16i8 f1(v16i8 x, v16i8 y)
{
v16i32 x1, y1;
x1 = __builtin_convertvector(x, __typeof(x1));
y1 = __builtin_convertvector(y, __typeof(y1));
x1 = x1 >> y1;
x = __builtin_convertvector(x1, __typeof(x));
return x;
}
^ permalink raw reply [flat|nested] 5+ messages in thread
* [Bug target/98934] Very poor code generation for SSE 8-bit vector right shift
2021-02-02 15:39 [Bug target/98934] New: Very poor code generation for SSE 8-bit vector right shift gabravier at gmail dot com
2021-02-02 16:05 ` [Bug target/98934] " rguenth at gcc dot gnu.org
2021-08-25 5:26 ` pinskia at gcc dot gnu.org
@ 2021-08-25 5:29 ` pinskia at gcc dot gnu.org
2021-08-25 7:35 ` crazylht at gmail dot com
3 siblings, 0 replies; 5+ messages in thread
From: pinskia at gcc dot gnu.org @ 2021-08-25 5:29 UTC (permalink / raw)
To: gcc-bugs
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98934
--- Comment #3 from Andrew Pinski <pinskia at gcc dot gnu.org> ---
*** Bug 98399 has been marked as a duplicate of this bug. ***
^ permalink raw reply [flat|nested] 5+ messages in thread
* [Bug target/98934] Very poor code generation for SSE 8-bit vector right shift
2021-02-02 15:39 [Bug target/98934] New: Very poor code generation for SSE 8-bit vector right shift gabravier at gmail dot com
` (2 preceding siblings ...)
2021-08-25 5:29 ` pinskia at gcc dot gnu.org
@ 2021-08-25 7:35 ` crazylht at gmail dot com
3 siblings, 0 replies; 5+ messages in thread
From: crazylht at gmail dot com @ 2021-08-25 7:35 UTC (permalink / raw)
To: gcc-bugs
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98934
--- Comment #4 from Hongtao.liu <crazylht at gmail dot com> ---
(In reply to Andrew Pinski from comment #2)
> This is really poor with -mavx512f even. We should be able to do it like
(define_expand "vashr<mode>3"
[(set (match_operand:VI12_128 0 "register_operand")
(ashiftrt:VI12_128
(match_operand:VI12_128 1 "register_operand")
(match_operand:VI12_128 2 "nonimmediate_operand")))]
"TARGET_XOP || (TARGET_AVX512BW && TARGET_AVX512VL)"
We can relax condition it to TARGET_AVX512F and use do things like
ix86_expand_vecop_qihi, but with qisi.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2021-08-25 7:35 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-02 15:39 [Bug target/98934] New: Very poor code generation for SSE 8-bit vector right shift gabravier at gmail dot com
2021-02-02 16:05 ` [Bug target/98934] " rguenth at gcc dot gnu.org
2021-08-25 5:26 ` pinskia at gcc dot gnu.org
2021-08-25 5:29 ` pinskia at gcc dot gnu.org
2021-08-25 7:35 ` crazylht at gmail dot com
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).