diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 8e1afe1704490cf399ba46e68acc8131cc932259..7d917584d5bce612ce2ebff68fd3e67b57e0d403 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -3211,7 +3211,7 @@ ;; In this insn, operand 1 should be low, and operand 2 the high part of the ;; dest vector. -(define_insn "*aarch64_combinez" +(define_insn "@aarch64_combinez" [(set (match_operand: 0 "register_operand" "=w,w,w") (vec_concat: (match_operand:VDC 1 "general_operand" "w,?r,m") @@ -3225,7 +3225,7 @@ (set_attr "arch" "simd,fp,simd")] ) -(define_insn "*aarch64_combinez_be" +(define_insn "@aarch64_combinez_be" [(set (match_operand: 0 "register_operand" "=w,w,w") (vec_concat: (match_operand:VDC 2 "aarch64_simd_or_scalar_imm_zero") @@ -5954,6 +5954,15 @@ DONE; }) +(define_expand "vec_init" + [(match_operand:VQ_NO2E 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SIMD" +{ + aarch64_expand_vector_init (operands[0], operands[1]); + DONE; +}) + (define_insn "*aarch64_simd_ld1r" [(set (match_operand:VALL_F16 0 "register_operand" "=w") (vec_duplicate:VALL_F16 diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 0c2c17ed8269923723d066b250974ee1ff423d26..52c933cfdac20c5c566c13ae2528f039efda4c46 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -15075,6 +15075,43 @@ aarch64_expand_vector_init (rtx target, rtx vals) rtx v0 = XVECEXP (vals, 0, 0); bool all_same = true; + /* This is a special vec_init where N is not an element mode but a + vector mode with half the elements of M. We expect to find two entries + of mode N in VALS and we must put their concatentation into TARGET. */ + if (XVECLEN (vals, 0) == 2 && VECTOR_MODE_P (GET_MODE (XVECEXP (vals, 0, 0)))) + { + rtx lo = XVECEXP (vals, 0, 0); + rtx hi = XVECEXP (vals, 0, 1); + machine_mode narrow_mode = GET_MODE (lo); + gcc_assert (GET_MODE_INNER (narrow_mode) == inner_mode); + gcc_assert (narrow_mode == GET_MODE (hi)); + + /* When we want to concatenate a half-width vector with zeroes we can + use the aarch64_combinez[_be] patterns. Just make sure that the + zeroes are in the right half. */ + if (BYTES_BIG_ENDIAN + && aarch64_simd_imm_zero (lo, narrow_mode) + && general_operand (hi, narrow_mode)) + emit_insn (gen_aarch64_combinez_be (narrow_mode, target, hi, lo)); + else if (!BYTES_BIG_ENDIAN + && aarch64_simd_imm_zero (hi, narrow_mode) + && general_operand (lo, narrow_mode)) + emit_insn (gen_aarch64_combinez (narrow_mode, target, lo, hi)); + else + { + /* Else create the two half-width registers and combine them. */ + if (!REG_P (lo)) + lo = force_reg (GET_MODE (lo), lo); + if (!REG_P (hi)) + hi = force_reg (GET_MODE (hi), hi); + + if (BYTES_BIG_ENDIAN) + std::swap (lo, hi); + emit_insn (gen_aarch64_simd_combine (narrow_mode, target, lo, hi)); + } + return; + } + /* Count the number of variable elements to initialise. */ for (int i = 0; i < n_elts; ++i) { diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index d59d72833c527da80af1ac8dd7264c8dd86047c7..c495e76b3b73e3a19603330989121fce51c3fe35 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -771,6 +771,7 @@ ;; Half modes of all vector modes, in lower-case. (define_mode_attr Vhalf [(V8QI "v4qi") (V16QI "v8qi") (V4HI "v2hi") (V8HI "v4hi") + (V8HF "v4hf") (V2SI "si") (V4SI "v2si") (V2DI "di") (V2SF "sf") (V4SF "v2sf") (V2DF "df")])