diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc index 17c1e23e5b5..0090fb47d98 100644 --- a/gcc/config/aarch64/aarch64.cc +++ b/gcc/config/aarch64/aarch64.cc @@ -15065,6 +15065,11 @@ cost_plus: return false; case UNSPEC: + /* FIXME: What cost to use for zip1 ? + Currently using default cost. */ + if (XINT (x, 1) == UNSPEC_ZIP1) + break; + /* The floating point round to integer frint* instructions. */ if (aarch64_frint_unspec_p (XINT (x, 1))) { @@ -21972,11 +21977,44 @@ aarch64_simd_make_constant (rtx vals) return NULL_RTX; } +/* The algorithm will fill matches[*][0] with the earliest matching element, + and matches[X][1] with the count of duplicate elements (if X is the + earliest element which has duplicates). */ + +static void +aarch64_expand_vector_init_get_most_repeating_elem (rtx vals, int n, + int (*matches)[2], + int &maxv, int &maxelement) +{ + memset (matches, 0, 16 * 2 * sizeof (int)); + for (int i = 0; i < n; i++) + { + for (int j = 0; j <= i; j++) + { + if (rtx_equal_p (XVECEXP (vals, 0, i), XVECEXP (vals, 0, j))) + { + matches[i][0] = j; + matches[j][1]++; + break; + } + } + } + + maxelement = 0; + maxv = 0; + for (int i = 0; i < n; i++) + if (matches[i][1] > maxv) + { + maxelement = i; + maxv = matches[i][1]; + } +} + /* Expand a vector initialisation sequence, such that TARGET is initialised to contain VALS. */ -void -aarch64_expand_vector_init (rtx target, rtx vals) +static void +aarch64_expand_vector_init_fallback (rtx target, rtx vals) { machine_mode mode = GET_MODE (target); scalar_mode inner_mode = GET_MODE_INNER (mode); @@ -22036,38 +22074,6 @@ aarch64_expand_vector_init (rtx target, rtx vals) return; } - /* Check for interleaving case. - For eg if initializer is (int16x8_t) {x, y, x, y, x, y, x, y}. - Generate following code: - dup v0.h, x - dup v1.h, y - zip1 v0.h, v0.h, v1.h - for "large enough" initializer. */ - - if (n_elts >= 8) - { - int i; - for (i = 2; i < n_elts; i++) - if (!rtx_equal_p (XVECEXP (vals, 0, i), XVECEXP (vals, 0, i % 2))) - break; - - if (i == n_elts) - { - machine_mode mode = GET_MODE (target); - rtx dest[2]; - - for (int i = 0; i < 2; i++) - { - rtx x = expand_vector_broadcast (mode, XVECEXP (vals, 0, i)); - dest[i] = force_reg (mode, x); - } - - rtvec v = gen_rtvec (2, dest[0], dest[1]); - emit_set_insn (target, gen_rtx_UNSPEC (mode, v, UNSPEC_ZIP1)); - return; - } - } - enum insn_code icode = optab_handler (vec_set_optab, mode); gcc_assert (icode != CODE_FOR_nothing); @@ -22075,33 +22081,15 @@ aarch64_expand_vector_init (rtx target, rtx vals) the insertion using dup for the most common element followed by insertions. */ - /* The algorithm will fill matches[*][0] with the earliest matching element, - and matches[X][1] with the count of duplicate elements (if X is the - earliest element which has duplicates). */ if (n_var == n_elts && n_elts <= 16) { - int matches[16][2] = {0}; - for (int i = 0; i < n_elts; i++) - { - for (int j = 0; j <= i; j++) - { - if (rtx_equal_p (XVECEXP (vals, 0, i), XVECEXP (vals, 0, j))) - { - matches[i][0] = j; - matches[j][1]++; - break; - } - } - } - int maxelement = 0; - int maxv = 0; - for (int i = 0; i < n_elts; i++) - if (matches[i][1] > maxv) - { - maxelement = i; - maxv = matches[i][1]; - } + int matches[16][2]; + int maxelement, maxv; + aarch64_expand_vector_init_get_most_repeating_elem (vals, n_elts, + matches, + maxv, + maxelement); /* Create a duplicate of the most common element, unless all elements are equally useless to us, in which case just immediately set the @@ -22189,7 +22177,7 @@ aarch64_expand_vector_init (rtx target, rtx vals) } XVECEXP (copy, 0, i) = subst; } - aarch64_expand_vector_init (target, copy); + aarch64_expand_vector_init_fallback (target, copy); } /* Insert the variable lanes directly. */ @@ -22203,6 +22191,126 @@ aarch64_expand_vector_init (rtx target, rtx vals) } } +/* Function to pad elements in VALS as described in the comment + for aarch64_expand_vector_init_split_vals. */ + +static rtx +aarch64_expand_vector_init_get_padded_elem (rtx vals, int n) +{ + for (int i = 0; i < n; i++) + { + rtx elem = XVECEXP (vals, 0, i); + if (CONST_INT_P (elem) || CONST_DOUBLE_P (elem)) + return elem; + } + + int matches[16][2]; + int maxv, maxelement; + aarch64_expand_vector_init_get_most_repeating_elem (vals, n, matches, maxv, maxelement); + return XVECEXP (vals, 0, maxelement); +} + +/* +Split vals into even or odd half, however since the mode remains same, +we have to pad up with extra elements to fill vector length. +The function uses couple of heuristics for padding: +(1) If the split portion contains a constant, pad the vector with + constant elem. + For eg if split portion is {x, 1, 2, 3} and mode is V8HI + then the result is {x, 1, 2, 3, 1, 1, 1, 1} +(2) If the split portion is entirely of variables, then use the + most frequently repeating variable as padding element. + For eg if split portion is {x, x, x, y} and mode is V8HI, + then the result is {x, x, x, x, y, x, x, x} + We use the most frequenty repeating variable so dup will initialize + most of the vector and then use insr to insert remaining ones, + which will be done in aarch64_expand_vector_init_fallback. +*/ + +static rtx +aarch64_expand_vector_init_split_vals (rtx vals, bool even_p) +{ + rtx new_vals = copy_rtx (vals); + int n = XVECLEN (vals, 0); + int i; + for (i = 0; i < n / 2; i++) + XVECEXP (new_vals, 0, i) + = XVECEXP (new_vals, 0, (even_p) ? 2 * i : 2 * i + 1); + + rtx padded_val + = aarch64_expand_vector_init_get_padded_elem (new_vals, n / 2); + for (; i < n; i++) + XVECEXP (new_vals, 0, i) = padded_val; + return new_vals; +} + +DEBUG_FUNCTION +static void +aarch64_expand_vector_init_debug_seq (rtx_insn *seq, const char *s) +{ + fprintf (stderr, "%s: %u\n", s, seq_cost (seq, !optimize_size)); + for (rtx_insn *i = seq; i; i = NEXT_INSN (i)) + { + debug_rtx (PATTERN (i)); + fprintf (stderr, "cost: %d\n", pattern_cost (PATTERN (i), !optimize_size)); + } +} + +/* +The function does the following: +(a) Generates code sequence by splitting VALS into even and odd halves, + and recursively calling itself to initialize them and then merge using + zip1. +(b) Generate code sequence directly using aarch64_expand_vector_init_fallback. +(c) Compare the cost of code sequences generated by (a) and (b), and choose + the more efficient one. +*/ + +void +aarch64_expand_vector_init_1 (rtx target, rtx vals, int n_elts) +{ + if (n_elts < 8) + { + aarch64_expand_vector_init_fallback (target, vals); + return; + } + + machine_mode mode = GET_MODE (target); + + start_sequence (); + rtx dest[2]; + for (int i = 0; i < 2; i++) + { + dest[i] = gen_reg_rtx (mode); + rtx new_vals + = aarch64_expand_vector_init_split_vals (vals, (i % 2) == 0); + aarch64_expand_vector_init_1 (dest[i], new_vals, n_elts / 2); + } + + rtvec v = gen_rtvec (2, dest[0], dest[1]); + emit_set_insn (target, gen_rtx_UNSPEC (mode, v, UNSPEC_ZIP1)); + + rtx_insn *seq = get_insns (); + end_sequence (); + + start_sequence (); + aarch64_expand_vector_init_fallback (target, vals); + rtx_insn *fallback_seq = get_insns (); + end_sequence (); + + emit_insn (seq_cost (seq, !optimize_size) + < seq_cost (fallback_seq, !optimize_size) + ? seq : fallback_seq); +} + +/* Wrapper around aarch64_expand_vector_init_1. */ + +void +aarch64_expand_vector_init (rtx target, rtx vals) +{ + aarch64_expand_vector_init_1 (target, vals, XVECLEN (vals, 0)); +} + /* Emit RTL corresponding to: insr TARGET, ELEM. */ diff --git a/gcc/testsuite/gcc.target/aarch64/interleave-init-1.c b/gcc/testsuite/gcc.target/aarch64/vec-init-18.c similarity index 100% rename from gcc/testsuite/gcc.target/aarch64/interleave-init-1.c rename to gcc/testsuite/gcc.target/aarch64/vec-init-18.c diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-19.c b/gcc/testsuite/gcc.target/aarch64/vec-init-19.c new file mode 100644 index 00000000000..d204c7e1f8b --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-19.c @@ -0,0 +1,21 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +#include + +/* +** f_s8: +** ... +** dup v[0-9]+\.16b, w[0-9]+ +** adrp x[0-9]+, \.LC[0-9]+ +** ldr q[0-9]+, \[x[0-9]+, #:lo12:.LC[0-9]+\] +** zip1 v[0-9]+\.16b, v[0-9]+\.16b, v[0-9]+\.16b +** ret +*/ + +int8x16_t f_s8(int8_t x) +{ + return (int8x16_t) { x, 1, x, 2, x, 3, x, 4, + x, 5, x, 6, x, 7, x, 8 }; +} diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-20.c b/gcc/testsuite/gcc.target/aarch64/vec-init-20.c new file mode 100644 index 00000000000..c2c97469940 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-20.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +#include + +/* +** f_s8: +** ... +** adrp x[0-9]+, \.LC[0-9]+ +** dup v[0-9]+\.16b, w[0-9]+ +** ldr q[0-9]+, \[x[0-9]+, #:lo12:\.LC[0-9]+\] +** ins v0\.b\[0\], w0 +** zip1 v[0-9]+\.16b, v[0-9]+\.16b, v[0-9]+\.16b +** ret +*/ + +int8x16_t f_s8(int8_t x, int8_t y) +{ + return (int8x16_t) { x, y, 1, y, 2, y, 3, y, + 4, y, 5, y, 6, y, 7, y }; +} diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-21.c b/gcc/testsuite/gcc.target/aarch64/vec-init-21.c new file mode 100644 index 00000000000..e16459486d7 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-21.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +#include + +/* +** f_s8: +** ... +** adrp x[0-9]+, \.LC[0-9]+ +** ldr q[0-9]+, \[x[0-9]+, #:lo12:\.LC[0-9]+\] +** ins v0\.b\[0\], w0 +** ins v0\.b\[1\], w1 +** ... +** ret +*/ + +int8x16_t f_s8(int8_t x, int8_t y) +{ + return (int8x16_t) { x, y, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14 }; +} diff --git a/gcc/testsuite/gcc.target/aarch64/vec-init-22.c b/gcc/testsuite/gcc.target/aarch64/vec-init-22.c new file mode 100644 index 00000000000..e5016a47a3b --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vec-init-22.c @@ -0,0 +1,30 @@ +/* { dg-do compile } */ +/* { dg-options "-O3" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +#include + +/* Verify that fallback code-sequence is chosen over + recursively generated code-sequence merged with zip1. */ + +/* +** f_s16: +** ... +** sxth w0, w0 +** fmov s0, w0 +** ins v0\.h\[1\], w1 +** ins v0\.h\[2\], w2 +** ins v0\.h\[3\], w3 +** ins v0\.h\[4\], w4 +** ins v0\.h\[5\], w5 +** ins v0\.h\[6\], w6 +** ins v0\.h\[7\], w7 +** ... +** ret +*/ + +int16x8_t f_s16 (int16_t x0, int16_t x1, int16_t x2, int16_t x3, + int16_t x4, int16_t x5, int16_t x6, int16_t x7) +{ + return (int16x8_t) { x0, x1, x2, x3, x4, x5, x6, x7 }; +}