Hi All, Our zero and sign extend and extract patterns are currently very limited and only work for the original register size of the instructions. i.e. limited by GPI patterns. However these instructions extract bits and extend. This means that any register size can be used as an input as long as the extraction makes logical sense. The majority of the attached testcases fail currently to optimize. Bootstrapped Regtested on aarch64-none-linux-gnu and no issues. Ok for master? Thanks, Tamar gcc/ChangeLog: * config/aarch64/aarch64-simd.md (aarch64_get_lane): Drop reload penalty. * config/aarch64/aarch64.md (*_ashl): Renamed to... (*_ashl): ...this. (*zero_extend_lshr): Renamed to... (*zero_extend_): ...this. (*extend_ashr): Rename to... (*extend_): ...this. gcc/testsuite/ChangeLog: * gcc.target/aarch64/bitmove_1.c: New test. * gcc.target/aarch64/bitmove_2.c: New test. --- inline copy of patch -- diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 8bcc9e76b1cad4a2591fb176175db72d7a190d57..23909c62638b49722568da4555b33c71fd21337e 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -4259,7 +4259,7 @@ (define_insn "*aarch64_get_lane_zero_extend" ;; Extracting lane zero is split into a simple move when it is between SIMD ;; registers or a store. (define_insn_and_split "aarch64_get_lane" - [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=?r, w, Utv") + [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv") (vec_select: (match_operand:VALL_F16_FULL 1 "register_operand" "w, w, w") (parallel [(match_operand:SI 2 "immediate_operand" "i, i, i")])))] diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 85b400489cb382a01b0c469eff2b600a93805e31..3116feda4fe54e2a21dc3f990b6976d216874260 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -5629,13 +5629,13 @@ (define_insn "*si3_insn2_uxtw" ) (define_insn "*3_insn" - [(set (match_operand:SHORT 0 "register_operand" "=r") - (ASHIFT:SHORT (match_operand:SHORT 1 "register_operand" "r") + [(set (match_operand:ALLI 0 "register_operand" "=r") + (ASHIFT:ALLI (match_operand:ALLI 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n")))] "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { operands[3] = GEN_INT ( - UINTVAL (operands[2])); - return "\t%w0, %w1, %2, %3"; + return "\t%0, %1, %2, %3"; } [(set_attr "type" "bfx")] ) @@ -5710,40 +5710,40 @@ (define_insn "*extrsi5_insn_di" [(set_attr "type" "rotate_imm")] ) -(define_insn "*_ashl" +(define_insn "*_ashl" [(set (match_operand:GPI 0 "register_operand" "=r") (ANY_EXTEND:GPI - (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r") + (ashift:ALLX (match_operand:ALLX 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n"))))] - "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" + "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { - operands[3] = GEN_INT ( - UINTVAL (operands[2])); + operands[3] = GEN_INT ( - UINTVAL (operands[2])); return "bfiz\t%0, %1, %2, %3"; } [(set_attr "type" "bfx")] ) -(define_insn "*zero_extend_lshr" +(define_insn "*zero_extend_" [(set (match_operand:GPI 0 "register_operand" "=r") (zero_extend:GPI - (lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r") - (match_operand 2 "const_int_operand" "n"))))] - "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" + (LSHIFTRT_ONLY:ALLX (match_operand:ALLX 1 "register_operand" "r") + (match_operand 2 "const_int_operand" "n"))))] + "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { - operands[3] = GEN_INT ( - UINTVAL (operands[2])); + operands[3] = GEN_INT ( - UINTVAL (operands[2])); return "ubfx\t%0, %1, %2, %3"; } [(set_attr "type" "bfx")] ) -(define_insn "*extend_ashr" +(define_insn "*extend_" [(set (match_operand:GPI 0 "register_operand" "=r") (sign_extend:GPI - (ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r") - (match_operand 2 "const_int_operand" "n"))))] - "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" + (ASHIFTRT_ONLY:ALLX (match_operand:ALLX 1 "register_operand" "r") + (match_operand 2 "const_int_operand" "n"))))] + "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { - operands[3] = GEN_INT ( - UINTVAL (operands[2])); + operands[3] = GEN_INT ( - UINTVAL (operands[2])); return "sbfx\\t%0, %1, %2, %3"; } [(set_attr "type" "bfx")] diff --git a/gcc/testsuite/gcc.target/aarch64/bitmove_1.c b/gcc/testsuite/gcc.target/aarch64/bitmove_1.c new file mode 100644 index 0000000000000000000000000000000000000000..8b0aa8af49cd070928bacc4995a321c7bfde58a6 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/bitmove_1.c @@ -0,0 +1,76 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-O3 -std=c99" } */ +/* { dg-final { check-function-bodies "**" "" "" { target { le } } } } */ + +#include + +/* +** sfoo6: +** asr x0, x0, 16 +** ret +*/ +int64_t sfoo6 (int32_t x) +{ + return x >> 16; +} + +/* +** ufoo6: +** lsr w0, w0, 30 +** ret +*/ +uint64_t ufoo6 (uint32_t x) +{ + return x >> 30; +} + +/* +** ufoo6s: +** ubfx w0, w0, 7, 9 +** ret +*/ +uint32_t ufoo6s (uint16_t x) +{ + return x >> 7; +} + +/* +** ufoo6h: +** ubfx w0, w0, 4, 4 +** ret +*/ +uint16_t ufoo6h (uint8_t x) +{ + return x >> 4; +} + +/* +** sfoo62: +** asr x0, x0, 10 +** ret +*/ +int64_t sfoo62 (int32_t x) +{ + return x >> 10; +} + +/* +** ufoo62: +** lsr w0, w0, 10 +** ret +*/ +uint64_t ufoo62 (uint32_t x) +{ + return x >> 10; +} + +/* +** sfoo63: +** asr x0, x0, 10 +** ret +*/ +int64_t sfoo63 (int32_t x) +{ + return x >> 10; +} + diff --git a/gcc/testsuite/gcc.target/aarch64/bitmove_2.c b/gcc/testsuite/gcc.target/aarch64/bitmove_2.c new file mode 100644 index 0000000000000000000000000000000000000000..54b3071a3b4e2001f83337837e712c381683d23a --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/bitmove_2.c @@ -0,0 +1,76 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-O3 -std=c99" } */ +/* { dg-final { check-function-bodies "**" "" "" { target { le } } } } */ + +#include + +/* +** sfoo6: +** sbfiz x0, x0, 16, 16 +** ret +*/ +int64_t sfoo6 (int32_t x) +{ + return x << 16; +} + +/* +** ufoo6: +** lsl w0, w0, 30 +** ret +*/ +uint64_t ufoo6 (uint32_t x) +{ + return x << 30; +} + +/* +** ufoo6s: +** ubfiz w0, w0, 7, 16 +** ret +*/ +uint32_t ufoo6s (uint16_t x) +{ + return x << 7; +} + +/* +** ufoo6h: +** uxtb w0, w0 +** ubfiz w0, w0, 4, 12 +** ret +*/ +uint16_t ufoo6h (uint8_t x) +{ + return x << 4; +} + +/* +** sfoo62: +** sbfiz x0, x0, 10, 22 +** ret +*/ +int64_t sfoo62 (int32_t x) +{ + return x << 10; +} + +/* +** ufoo62: +** lsl w0, w0, 10 +** ret +*/ +uint64_t ufoo62 (uint32_t x) +{ + return x << 10; +} + +/* +** sfoo63: +** sbfiz x0, x0, 10, 22 +** ret +*/ +int64_t sfoo63 (int32_t x) +{ + return x << 10; +} --