diff --git a/gcc/testsuite/gcc.target/aarch64/pr103741.c b/gcc/testsuite/gcc.target/aarch64/pr103741.c new file mode 100644 index 0000000000000000000000000000000000000000..ef3ae66ebe5e5a44e7bea7be22b6378bc23cc538 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/pr103741.c @@ -0,0 +1,26 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8-a+sve -O1" } */ + +long int m, n; + +int +qux (int z) +{ + return 4 >> z ? z : 0; +} + +int +bar (long int y) +{ + return y ? 3 : 2; +} + +__attribute__ ((simd)) int +foo (int x) +{ + long int a = x & m; + int b = bar (x) / n; + + return qux (b) == a; +} + diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 8c427174b37e6c03c2f914c90332bcc4eac54130..ad90cdb0473a337207d6ba54c1dd0a2ecc50ab8d 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -6361,7 +6361,9 @@ vectorizable_operation (vec_info *vinfo, /* When combining two masks check if either of them is elsewhere combined with a loop mask, if that's the case we can mark that the new combined mask doesn't need to be combined with a loop mask. */ - if (masked_loop_p && code == BIT_AND_EXPR) + if (masked_loop_p + && code == BIT_AND_EXPR + && VECTOR_BOOLEAN_TYPE_P (vectype)) { if (loop_vinfo->scalar_cond_masked_set.contains ({ op0, ncopies}))