diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc index acc0cfe5f94..4383e4e1d0c 100644 --- a/gcc/config/aarch64/aarch64.cc +++ b/gcc/config/aarch64/aarch64.cc @@ -21976,7 +21976,7 @@ aarch64_simd_make_constant (rtx vals) initialised to contain VALS. */ void -aarch64_expand_vector_init (rtx target, rtx vals) +aarch64_expand_vector_init_fallback (rtx target, rtx vals) { machine_mode mode = GET_MODE (target); scalar_mode inner_mode = GET_MODE_INNER (mode); @@ -22189,7 +22189,7 @@ aarch64_expand_vector_init (rtx target, rtx vals) } XVECEXP (copy, 0, i) = subst; } - aarch64_expand_vector_init (target, copy); + aarch64_expand_vector_init_fallback (target, copy); } /* Insert the variable lanes directly. */ @@ -22203,6 +22203,91 @@ aarch64_expand_vector_init (rtx target, rtx vals) } } +DEBUG_FUNCTION +static void +aarch64_expand_vector_init_debug_seq (rtx_insn *seq, const char *s) +{ + fprintf (stderr, "%s: %u\n", s, seq_cost (seq, !optimize_size)); + for (rtx_insn *i = seq; i; i = NEXT_INSN (i)) + { + debug_rtx (PATTERN (i)); + fprintf (stderr, "cost: %d\n", pattern_cost (PATTERN (i), !optimize_size)); + } +} + +static rtx +aarch64_expand_vector_init_split_vals (machine_mode mode, rtx vals, bool even_p) +{ + int n = XVECLEN (vals, 0); + machine_mode new_mode + = aarch64_simd_container_mode (GET_MODE_INNER (mode), 64); + rtvec vec = rtvec_alloc (n / 2); + for (int i = 0; i < n; i++) + RTVEC_ELT (vec, i) = (even_p) ? XVECEXP (vals, 0, 2 * i) + : XVECEXP (vals, 0, 2 * i + 1); + return gen_rtx_PARALLEL (new_mode, vec); +} + +/* +The function does the following: +(a) Generates code sequence by splitting VALS into even and odd halves, + and recursively calling itself to initialize them and then merge using + zip1. +(b) Generate code sequence directly using aarch64_expand_vector_init_fallback. +(c) Compare the cost of code sequences generated by (a) and (b), and choose + the more efficient one. +*/ + +void +aarch64_expand_vector_init (rtx target, rtx vals) +{ + machine_mode mode = GET_MODE (target); + int n_elts = XVECLEN (vals, 0); + + if (n_elts < 8 + || known_eq (GET_MODE_BITSIZE (mode), 64)) + { + aarch64_expand_vector_init_fallback (target, vals); + return; + } + + start_sequence (); + rtx dest[2]; + unsigned costs[2]; + for (int i = 0; i < 2; i++) + { + start_sequence (); + dest[i] = gen_reg_rtx (mode); + rtx new_vals + = aarch64_expand_vector_init_split_vals (mode, vals, (i % 2) == 0); + rtx tmp_reg = gen_reg_rtx (GET_MODE (new_vals)); + aarch64_expand_vector_init (tmp_reg, new_vals); + dest[i] = gen_rtx_SUBREG (mode, tmp_reg, 0); + rtx_insn *rec_seq = get_insns (); + end_sequence (); + costs[i] = seq_cost (rec_seq, !optimize_size); + emit_insn (rec_seq); + } + + rtvec v = gen_rtvec (2, dest[0], dest[1]); + rtx_insn *zip1_insn + = emit_set_insn (target, gen_rtx_UNSPEC (mode, v, UNSPEC_ZIP1)); + unsigned seq_total_cost + = (!optimize_size) ? std::max (costs[0], costs[1]) : costs[0] + costs[1]; + seq_total_cost += insn_cost (zip1_insn, !optimize_size); + + rtx_insn *seq = get_insns (); + end_sequence (); + + start_sequence (); + aarch64_expand_vector_init_fallback (target, vals); + rtx_insn *fallback_seq = get_insns (); + unsigned fallback_seq_cost = seq_cost (fallback_seq, !optimize_size); + end_sequence (); + + emit_insn (seq_total_cost < fallback_seq_cost ? seq : fallback_seq); +} + /* Emit RTL corresponding to: insr TARGET, ELEM. */ diff --git a/gcc/testsuite/gcc.target/aarch64/interleave-init-1.c b/gcc/testsuite/gcc.target/aarch64/vec-init-18.c similarity index 82% rename from gcc/testsuite/gcc.target/aarch64/interleave-init-1.c rename to gcc/testsuite/gcc.target/aarch64/vec-init-18.c index ee775048589..e812d3946de 100644 --- a/gcc/testsuite/gcc.target/aarch64/interleave-init-1.c +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-18.c @@ -7,8 +7,8 @@ /* ** foo: ** ... -** dup v[0-9]+\.8h, w[0-9]+ -** dup v[0-9]+\.8h, w[0-9]+ +** dup v[0-9]+\.4h, w[0-9]+ +** dup v[0-9]+\.4h, w[0-9]+ ** zip1 v[0-9]+\.8h, v[0-9]+\.8h, v[0-9]+\.8h ** ... ** ret @@ -23,8 +23,8 @@ int16x8_t foo(int16_t x, int y) /* ** foo2: ** ... -** dup v[0-9]+\.8h, w[0-9]+ -** movi v[0-9]+\.8h, 0x1 +** dup v[0-9]+\.4h, w[0-9]+ +** movi v[0-9]+\.4h, 0x1 ** zip1 v[0-9]+\.8h, v[0-9]+\.8h, v[0-9]+\.8h ** ... ** ret diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-19.c b/gcc/testsuite/gcc.target/aarch64/vec-init-19.c new file mode 100644 index 00000000000..e28fdcda29d --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-19.c @@ -0,0 +1,21 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +#include + +/* +** f_s8: +** ... +** dup v[0-9]+\.8b, w[0-9]+ +** adrp x[0-9]+, \.LC[0-9]+ +** ldr d[0-9]+, \[x[0-9]+, #:lo12:.LC[0-9]+\] +** zip1 v[0-9]+\.16b, v[0-9]+\.16b, v[0-9]+\.16b +** ret +*/ + +int8x16_t f_s8(int8_t x) +{ + return (int8x16_t) { x, 1, x, 2, x, 3, x, 4, + x, 5, x, 6, x, 7, x, 8 }; +} diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-20.c b/gcc/testsuite/gcc.target/aarch64/vec-init-20.c new file mode 100644 index 00000000000..9366ca349b6 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-20.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +#include + +/* +** f_s8: +** ... +** adrp x[0-9]+, \.LC[0-9]+ +** dup v[0-9]+\.8b, w[0-9]+ +** ldr d[0-9]+, \[x[0-9]+, #:lo12:\.LC[0-9]+\] +** ins v0\.b\[0\], w0 +** zip1 v[0-9]+\.16b, v[0-9]+\.16b, v[0-9]+\.16b +** ret +*/ + +int8x16_t f_s8(int8_t x, int8_t y) +{ + return (int8x16_t) { x, y, 1, y, 2, y, 3, y, + 4, y, 5, y, 6, y, 7, y }; +} diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-21.c b/gcc/testsuite/gcc.target/aarch64/vec-init-21.c new file mode 100644 index 00000000000..e16459486d7 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-21.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +#include + +/* +** f_s8: +** ... +** adrp x[0-9]+, \.LC[0-9]+ +** ldr q[0-9]+, \[x[0-9]+, #:lo12:\.LC[0-9]+\] +** ins v0\.b\[0\], w0 +** ins v0\.b\[1\], w1 +** ... +** ret +*/ + +int8x16_t f_s8(int8_t x, int8_t y) +{ + return (int8x16_t) { x, y, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14 }; +} diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-22-size.c b/gcc/testsuite/gcc.target/aarch64/vec-init-22-size.c new file mode 100644 index 00000000000..8f35854c008 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-22-size.c @@ -0,0 +1,24 @@ +/* { dg-do compile } */ +/* { dg-options "-Os" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +/* Verify that fallback code-sequence is chosen over + recursively generated code-sequence merged with zip1. */ + +/* +** f_s16: +** ... +** sxth w0, w0 +** fmov s0, w0 +** ins v0\.h\[1\], w1 +** ins v0\.h\[2\], w2 +** ins v0\.h\[3\], w3 +** ins v0\.h\[4\], w4 +** ins v0\.h\[5\], w5 +** ins v0\.h\[6\], w6 +** ins v0\.h\[7\], w7 +** ... +** ret +*/ + +#include "vec-init-22.h" diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-22-speed.c b/gcc/testsuite/gcc.target/aarch64/vec-init-22-speed.c new file mode 100644 index 00000000000..172d56ffdf1 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-22-speed.c @@ -0,0 +1,27 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +/* Verify that we recursively generate code for even and odd halves + instead of fallback code. This is so despite the longer code-gen + because it has fewer dependencies and thus has lesser cost. */ + +/* +** f_s16: +** ... +** sxth w0, w0 +** sxth w1, w1 +** fmov d0, x0 +** fmov d1, x1 +** ins v[0-9]+\.h\[1\], w2 +** ins v[0-9]+\.h\[1\], w3 +** ins v[0-9]+\.h\[2\], w4 +** ins v[0-9]+\.h\[2\], w5 +** ins v[0-9]+\.h\[3\], w6 +** ins v[0-9]+\.h\[3\], w7 +** zip1 v[0-9]+\.8h, v[0-9]+\.8h, v[0-9]+\.8h +** ... +** ret +*/ + +#include "vec-init-22.h" diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-22.h b/gcc/testsuite/gcc.target/aarch64/vec-init-22.h new file mode 100644 index 00000000000..15b889d4097 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-22.h @@ -0,0 +1,7 @@ +#include + +int16x8_t f_s16 (int16_t x0, int16_t x1, int16_t x2, int16_t x3, + int16_t x4, int16_t x5, int16_t x6, int16_t x7) +{ + return (int16x8_t) { x0, x1, x2, x3, x4, x5, x6, x7 }; +}