public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH][RFC] aarch64: Scheduling classification for AArch64 SVE patterns
@ 2022-11-23  9:49 Kyrylo Tkachov
  0 siblings, 0 replies; only message in thread
From: Kyrylo Tkachov @ 2022-11-23  9:49 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 2075 bytes --]

Hi all,

This patch adds scheduling types classification for SVE (and SVE2) instructions
to the aarch64 backend. It adds a new sve_type define_attr that can be attached
to MD patterns and used in scheduling descriptions.
sve_type contains the bulk of SVE and SVE2 instructions. A few other small attributes
are defined to describe things like gather/scatter addressing modes,
first-faulting and non-faulting loads etc.
This saves us having to define a cross-product of sve_type values for certain
classes of instructions.

While writing this patch I was referring to several Software Optimization
Guides for Arm Cortex and Neoverse cores and I think the classification in this
patch is sufficiently granular to capture all the most important
instruction groupings for latency and throughput without having an
unmaintainably large set of sve_type values.

I did play around with writing a scheduling description for an SVE core that
uses these types, but my description did not give any performance improvements
so I'm not proposing it.

After an offline discussion with Richard, I don't think it makes much sense to
push this without a CPU model to accompany it that shows a useful performance uplift.
My hope is that this patch can be re-used by someone who wants to write a scheduling model
that shows a meaningful performance benefit.
In that case it'd make sense to investigate whether the same uplift can be achieved with
a more coarse-grained classification (to save us maintaining more types than necessary).
So this patch is not for committing for GCC 13.1, but it may be of use to folks.

Bootstrapped and tested on aarch64-none-linux-gnu.

Thanks,
Kyrill
gcc/ChangeLog:

	* config/aarch64/aarch64-sched-types.md: New file.
	* config/aarch64/aarch64-sve.md: Update patterns with scheduling
	attributes.
	* config/aarch64/aarch64-sve2.md: Likewise.
	* config/aarch64/aarch64.md: Include aarch64-sched-types.md.
	* config/aarch64/iterators.md (sve_sched_code): Define.
	(sve_sched_fp_code): Likewise.
	(sve_sched): Likewise.

[-- Attachment #2: sve-sched-types.patch --]
[-- Type: application/octet-stream, Size: 154499 bytes --]

diff --git a/gcc/config/aarch64/aarch64-sched-types.md b/gcc/config/aarch64/aarch64-sched-types.md
new file mode 100644
index 0000000000000000000000000000000000000000..227255fc76e6c0f6e03074b865a9511d1faaae14
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-sched-types.md
@@ -0,0 +1,157 @@
+;; Instruction Classification for AArch64 SVE instructions
+
+;; Copyright (C) 2022 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Attribute describing if an SVE load has First-Faulting
+;; or Non-Faulting properties.
+(define_attr "sve_load_ff" "ff,nf,normal" (const_string "normal"))
+
+;; Attribute describing if a memory operation is non-termporal.
+(define_attr "memop_nontemporal" "yes,no" (const_string "no"))
+
+;; Attribute describing different SVE gather/scatter addressing modes for
+;; scheduling purposes
+(define_attr "sve_gather_scatter_vec_offset_size" "s,d,na" (const_string "na"))
+(define_attr "sve_gather_scatter_vec_offset_packed" "packed,unpacked,na" (const_string "na"))
+(define_attr "sve_gather_scatter_vec_offset_scaled" "scaled,unscaled,na" (const_string "na"))
+
+;; Attribute describing different operand types for SVE INDEX instructions.
+(define_attr "sve_index_operands" "reg_imm,imm_reg,reg_reg,imm_imm" (const_string "reg_imm"))
+
+(define_attr "sve_type"
+ "mov_z, mov_p,
+  ldr_z, ldr_p,
+  str_z, str_p,
+  mov_z_imm,
+  ptrues,
+  rdffr, rdffrs, setffr, wrffr,
+  ld1, st1,
+  ld2, st2,
+  ld3, st3,
+  ld4, st4,
+  ld1_extend, st1_truncate,
+  ld1r, ld1ro,
+  load_gather_s_imm, load_gather_d_imm,
+  load_gather_s_scalar, load_gather_d_scalar,
+  load_gather_s_vec, load_gather_d_vec,
+  store_scatter_s_imm, store_scatter_d_imm,
+  store_scatter_s_scalar, store_scatter_d_scalar,
+  store_scatter_s_vec, store_scatter_d_vec,
+  prefetch, prefetch_gather,
+  mov_from_gp, mov_from_simd, dup_from_q,
+  insr_gp,insr_simd,
+  index_b, index_h, index_s, index_d,
+  dup_imm, dup_index, dup_gp,
+  ext,
+  lastab_simd, lastab_gp,
+  int_simple_unary,
+  int_simple_binary,
+  simple_shift,
+  rev, rev_p,
+  extend,
+  fexpa, ftmad, ftsmul, ftssel,
+  flogb_h, flogb_s, flogb_d,
+  frint_h, frint_s, frint_d,
+  frec_estimate_h, frec_estimate_s, frec_estimate_d,
+  frec_step_h, frec_step_s, frec_step_d,
+  fsqrt_h, fsqrt_s, fsqrt_d,
+  not_p,
+  mul_b, mul_h, mul_s, mul_d,
+  mul_accum_b, mul_accum_h, mul_accum_s, mul_accum_d,
+  mul_long,
+  mul_long_accum,
+  pred_count_vec, pred_count_scalar,
+  pred_count_active_vec, pred_count_active_scalar,
+  adr,
+  abd,
+  qaddsub,
+  idiv_s, idiv_d,
+  fdiv_h, fdiv_s, fdiv_d,
+  shift_reverse,
+  asrd,
+  fscale,
+  farith_simple,
+  fmul,
+  fminmax,
+  fcadd, fcmla,
+  fabd,
+  pred_logical, pred_logical_setflags,
+  dot_b, dot_h,
+  usmmla,
+  fma,
+  bfcvt,
+  bfdot,
+  bfmmla, bfmlal,
+  fmmla,
+  sel_p,
+  fmov_imm,
+  cmp,
+  while,
+  fcm,
+  facm,
+  ptest,
+  clastab_simd, clastab_gp,
+  logical_reduce, int_reduce_b, int_reduce_h, int_reduce_s, int_reduce_d,
+  float_reduce_h, float_reduce_s, float_reduce_d,
+  fadda_h, fadda_s, fadda_d,
+  tbl, tbx,
+  compact, splice,
+  permute_z, permute_p,
+  unpk_z, unpk_p,
+  fcvtz_h, fcvtz_s, fcvtz_d,
+  fcvt_h, fcvt_s, fcvt_d,
+  cvtf_h, cvtf_s, cvtf_d,
+  brkab, brkabs,
+  brknpab, brknpabs,
+  piter,
+  int_arith_complex, int_shift_complex,
+  sqrdmulh_b, sqrdmulh_h, sqrdmulh_s, sqrdmulh_d,
+  arith_large_int,
+  int_arith_long,
+  eorbt,
+  sqrdmulh_b_accum, sqrdmulh_h_accum, sqrdmulh_s_accum, sqrdmulh_d_accum,
+  xar, bcax, eor3,
+  bsl,
+  shift_long,
+  shift_accum,
+  shift_insert,
+  abd_accum,
+  abd_long,
+  abd_long_accum,
+  fmlal,
+  usqxtun,
+  add_pairwise, minmax_pairwise,
+  fadd_pairwise, fminmax_pairwise,
+  adalp,
+  cadd,
+  cmla_b, cmla_h, cmla_s, cmla_d,
+  sqrdcmlah_b, sqrdcmlah_h, sqrdcmlah_s, sqrdcmlah_d,
+  cdot_b, cdot_h,
+  fcvtx,
+  urecpe,
+  pmul,
+  bdep,
+  histcnt,
+  match,
+  aes, aesmc,
+  rax1,
+  sm4e, sm4ekey,
+  untyped"
+   (const_string "untyped"))
+
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index b8cc47ef5fcee84f9b4c6637f99a79ca632fab61..65b958b4ccf50e1a856b744993c4512f007b19d7 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -699,6 +699,7 @@ (define_insn "*aarch64_sve_mov<mode>_ldr_str"
    str\t%1, %0
    mov\t%0.d, %1.d
    * return aarch64_output_sve_mov_immediate (operands[1]);"
+ [(set_attr "sve_type" "ldr_z, str_z, mov_z, mov_z_imm")]
 )
 
 ;; Unpredicated moves that cannot use LDR and STR, i.e. partial vectors
@@ -714,6 +715,7 @@ (define_insn "*aarch64_sve_mov<mode>_no_ldr_str"
   "@
    mov\t%0.d, %1.d
    * return aarch64_output_sve_mov_immediate (operands[1]);"
+  [(set_attr "sve_type" "mov_z, mov_z_imm")]
 )
 
 ;; Handle memory reloads for modes that can't use LDR and STR.  We use
@@ -758,6 +760,8 @@ (define_insn_and_split "@aarch64_pred_mov<mode>"
   "&& register_operand (operands[0], <MODE>mode)
    && register_operand (operands[2], <MODE>mode)"
   [(set (match_dup 0) (match_dup 2))]
+  ""
+  [(set_attr "sve_type" "mov_z, ld1, st1")]
 )
 
 ;; A pattern for optimizing SUBREGs that have a reinterpreting effect
@@ -959,6 +963,7 @@ (define_insn "*aarch64_sve_mov<mode>"
    str\t%1, %0
    ldr\t%0, %1
    * return aarch64_output_sve_mov_immediate (operands[1]);"
+  [(set_attr "sve_type" "mov_p, str_p, ldr_p, mov_z_imm")]
 )
 
 ;; Match PTRUES Pn.B when both the predicate and flags are useful.
@@ -984,6 +989,7 @@ (define_insn_and_rewrite "*aarch64_sve_ptruevnx16bi_cc"
   {
     operands[2] = operands[3] = CONSTM1_RTX (VNx16BImode);
   }
+  [(set_attr "sve_type" "ptrues")]
 )
 
 ;; Match PTRUES Pn.[HSD] when both the predicate and flags are useful.
@@ -1011,6 +1017,7 @@ (define_insn_and_rewrite "*aarch64_sve_ptrue<mode>_cc"
     operands[2] = CONSTM1_RTX (VNx16BImode);
     operands[3] = CONSTM1_RTX (<MODE>mode);
   }
+  [(set_attr "sve_type" "ptrues")]
 )
 
 ;; Match PTRUES Pn.B when only the flags result is useful (which is
@@ -1036,6 +1043,7 @@ (define_insn_and_rewrite "*aarch64_sve_ptruevnx16bi_ptest"
   {
     operands[2] = operands[3] = CONSTM1_RTX (VNx16BImode);
   }
+  [(set_attr "sve_type" "ptrues")]
 )
 
 ;; Match PTRUES Pn.[HWD] when only the flags result is useful (which is
@@ -1063,6 +1071,7 @@ (define_insn_and_rewrite "*aarch64_sve_ptrue<mode>_ptest"
     operands[2] = CONSTM1_RTX (VNx16BImode);
     operands[3] = CONSTM1_RTX (<MODE>mode);
   }
+  [(set_attr "sve_type" "ptrues")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1086,6 +1095,7 @@ (define_insn "aarch64_wrffr"
   "@
    setffr
    wrffr\t%0.b"
+  [(set_attr "sve_type" "setffr, wrffr")]
 )
 
 ;; [L2 in the block comment above about FFR handling]
@@ -1125,6 +1135,7 @@ (define_insn "aarch64_rdffr"
 	(reg:VNx16BI FFRT_REGNUM))]
   "TARGET_SVE"
   "rdffr\t%0.b"
+  [(set_attr "sve_type" "rdffr")]
 )
 
 ;; Likewise with zero predication.
@@ -1135,6 +1146,7 @@ (define_insn "aarch64_rdffr_z"
 	  (match_operand:VNx16BI 1 "register_operand" "Upa")))]
   "TARGET_SVE"
   "rdffr\t%0.b, %1/z"
+  [(set_attr "sve_type" "rdffr")]
 )
 
 ;; Read the FFR to test for a fault, without using the predicate result.
@@ -1151,6 +1163,7 @@ (define_insn "*aarch64_rdffr_z_ptest"
    (clobber (match_scratch:VNx16BI 0 "=Upa"))]
   "TARGET_SVE"
   "rdffrs\t%0.b, %1/z"
+  [(set_attr "sve_type" "rdffrs")]
 )
 
 ;; Same for unpredicated RDFFR when tested with a known PTRUE.
@@ -1165,6 +1178,7 @@ (define_insn "*aarch64_rdffr_ptest"
    (clobber (match_scratch:VNx16BI 0 "=Upa"))]
   "TARGET_SVE"
   "rdffrs\t%0.b, %1/z"
+  [(set_attr "sve_type" "rdffrs")]
 )
 
 ;; Read the FFR with zero predication and test the result.
@@ -1184,6 +1198,7 @@ (define_insn "*aarch64_rdffr_z_cc"
 	  (match_dup 1)))]
   "TARGET_SVE"
   "rdffrs\t%0.b, %1/z"
+  [(set_attr "sve_type" "rdffrs")]
 )
 
 ;; Same for unpredicated RDFFR when tested with a known PTRUE.
@@ -1199,6 +1214,7 @@ (define_insn "*aarch64_rdffr_cc"
 	(reg:VNx16BI FFRT_REGNUM))]
   "TARGET_SVE"
   "rdffrs\t%0.b, %1/z"
+  [(set_attr "sve_type" "rdffrs")]
 )
 
 ;; [R3 in the block comment above about FFR handling]
@@ -1248,6 +1264,7 @@ (define_insn "maskload<mode><vpred>"
 	  UNSPEC_LD1_SVE))]
   "TARGET_SVE"
   "ld1<Vesize>\t%0.<Vctype>, %2/z, %1"
+  [(set_attr "sve_type" "ld1")]
 )
 
 ;; Unpredicated LD[234].
@@ -1272,6 +1289,7 @@ (define_insn "vec_mask_load_lanes<mode><vsingle>"
 	  UNSPEC_LDN))]
   "TARGET_SVE"
   "ld<vector_count><Vesize>\t%0, %2/z, %1"
+  [(set_attr "sve_type" "ld<vector_count>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1303,6 +1321,7 @@ (define_insn_and_rewrite "@aarch64_load<SVE_PRED_LOAD:pred_load>_<ANY_EXTEND:opt
   {
     operands[3] = CONSTM1_RTX (<SVE_HSDI:VPRED>mode);
   }
+  [(set_attr "sve_type" "ld1_extend")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1329,6 +1348,8 @@ (define_insn "@aarch64_ld<fn>f1<mode>"
 	  SVE_LDFF1_LDNF1))]
   "TARGET_SVE"
   "ld<fn>f1<Vesize>\t%0.<Vetype>, %2/z, %1"
+  [(set_attr "sve_type" "ld1")
+   (set_attr "sve_load_ff" "<fn>f")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1367,6 +1388,8 @@ (define_insn_and_rewrite "@aarch64_ld<fn>f1_<ANY_EXTEND:optab><SVE_HSDI:mode><SV
   {
     operands[3] = CONSTM1_RTX (<SVE_HSDI:VPRED>mode);
   }
+  [(set_attr "sve_type" "ld1_extend")
+   (set_attr "sve_load_ff" "<fn>f")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1388,6 +1411,9 @@ (define_insn "@aarch64_ldnt1<mode>"
 	  UNSPEC_LDNT1_SVE))]
   "TARGET_SVE"
   "ldnt1<Vesize>\t%0.<Vetype>, %2/z, %1"
+  [(set_attr "sve_type" "ld1")
+   (set_attr "memop_nontemporal" "yes")]
+
 )
 
 ;; -------------------------------------------------------------------------
@@ -1435,6 +1461,14 @@ (define_insn "mask_gather_load<mode><v_int_container>"
    ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
    ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
    ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
+  [(set_attr "sve_type" "load_gather_s_imm,load_gather_s_imm,
+			 load_gather_s_vec,load_gather_s_vec,load_gather_s_vec,
+			 load_gather_s_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "na,na,s,s,s,s")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,packed,packed,
+						    packed,packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,unscaled,
+						    scaled,scaled")]
 )
 
 ;; Predicated gather loads for 64-bit elements.  The value of operand 3
@@ -1455,6 +1489,11 @@ (define_insn "mask_gather_load<mode><v_int_container>"
    ld1<Vesize>\t%0.d, %5/z, [%2.d, #%1]
    ld1<Vesize>\t%0.d, %5/z, [%1, %2.d]
    ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
+  [(set_attr "sve_type" "load_gather_d_imm,load_gather_d_imm,
+			 load_gather_d_vec,load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "na,na,d,d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,unpacked,unpacked")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being extended from 32 bits.
@@ -1480,6 +1519,10 @@ (define_insn_and_rewrite "*mask_gather_load<mode><v_int_container>_<su>xtw_unpac
   {
     operands[6] = CONSTM1_RTX (VNx2BImode);
   }
+ [(set_attr "sve_type" "load_gather_d_vec")
+  (set_attr "sve_gather_scatter_vec_offset_size" "d")
+  (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+  (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being truncated to 32 bits and then
@@ -1507,6 +1550,10 @@ (define_insn_and_rewrite "*mask_gather_load<mode><v_int_container>_sxtw"
   {
     operands[6] = CONSTM1_RTX (VNx2BImode);
   }
+ [(set_attr "sve_type" "load_gather_d_vec")
+  (set_attr "sve_gather_scatter_vec_offset_size" "d")
+  (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+  (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being truncated to 32 bits and then
@@ -1527,6 +1574,10 @@ (define_insn "*mask_gather_load<mode><v_int_container>_uxtw"
   "@
    ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
    ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
+  [(set_attr "sve_type" "load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1569,6 +1620,14 @@ (define_insn_and_rewrite "@aarch64_gather_load_<ANY_EXTEND:optab><SVE_4HSI:mode>
   {
     operands[6] = CONSTM1_RTX (VNx4BImode);
   }
+ [(set_attr "sve_type" "load_gather_s_imm,load_gather_s_imm,
+                        load_gather_s_vec,load_gather_s_vec,load_gather_s_vec,
+                        load_gather_s_vec")
+  (set_attr "sve_gather_scatter_vec_offset_size" "na,na,s,s,s,s")
+  (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,packed,packed,
+                                                   packed,packed")
+  (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,unscaled,
+                                                   scaled,scaled")]
 )
 
 ;; Predicated extending gather loads for 64-bit elements.  The value of
@@ -1597,6 +1656,11 @@ (define_insn_and_rewrite "@aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode
   {
     operands[6] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "load_gather_d_imm,load_gather_d_imm,
+                         load_gather_d_vec,load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "na,na,d,d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,unpacked,unpacked")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being extended from 32 bits.
@@ -1627,6 +1691,10 @@ (define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode
     operands[6] = CONSTM1_RTX (VNx2BImode);
     operands[7] = CONSTM1_RTX (VNx2BImode);
   }
+ [(set_attr "sve_type" "load_gather_d_vec")
+  (set_attr "sve_gather_scatter_vec_offset_size" "d")
+  (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+  (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being truncated to 32 bits and then
@@ -1659,6 +1727,10 @@ (define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode
     operands[6] = CONSTM1_RTX (VNx2BImode);
     operands[7] = CONSTM1_RTX (VNx2BImode);
   }
+ [(set_attr "sve_type" "load_gather_d_vec")
+  (set_attr "sve_gather_scatter_vec_offset_size" "d")
+  (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+  (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being truncated to 32 bits and then
@@ -1687,6 +1759,10 @@ (define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode
   {
     operands[7] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1718,6 +1794,16 @@ (define_insn "@aarch64_ldff1_gather<mode>"
    ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw]
    ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
    ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
+  [(set_attr "sve_type" "load_gather_s_imm,load_gather_s_imm,
+                         load_gather_s_vec,load_gather_s_vec,load_gather_s_vec,
+                         load_gather_s_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "na,na,s,s,s,s")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,packed,packed,
+                                                    packed,packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,unscaled,
+                                                    scaled,scaled")
+   (set_attr "sve_load_ff" "ff")]
+
 )
 
 ;; Predicated first-faulting gather loads for 64-bit elements.  The value
@@ -1739,6 +1825,12 @@ (define_insn "@aarch64_ldff1_gather<mode>"
    ldff1d\t%0.d, %5/z, [%2.d, #%1]
    ldff1d\t%0.d, %5/z, [%1, %2.d]
    ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
+  [(set_attr "sve_type" "load_gather_d_imm,load_gather_d_imm,
+                         load_gather_d_vec,load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "na,na,d,d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,unpacked,unpacked")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,scaled")
+   (set_attr "sve_load_ff" "ff")]
 )
 
 ;; Likewise, but with the offset being sign-extended from 32 bits.
@@ -1766,6 +1858,11 @@ (define_insn_and_rewrite "*aarch64_ldff1_gather<mode>_sxtw"
   {
     operands[6] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")
+   (set_attr "sve_load_ff" "ff")]
 )
 
 ;; Likewise, but with the offset being zero-extended from 32 bits.
@@ -1786,6 +1883,11 @@ (define_insn "*aarch64_ldff1_gather<mode>_uxtw"
   "@
    ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw]
    ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
+  [(set_attr "sve_type" "load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")
+   (set_attr "sve_load_ff" "ff")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1829,6 +1931,15 @@ (define_insn_and_rewrite "@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx4_WIDE:mod
   {
     operands[6] = CONSTM1_RTX (VNx4BImode);
   }
+  [(set_attr "sve_type" "load_gather_s_imm,load_gather_s_imm,
+                         load_gather_s_vec,load_gather_s_vec,load_gather_s_vec,
+                         load_gather_s_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "na,na,s,s,s,s")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,packed,packed,
+                                                    packed,packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,unscaled,
+                                                    scaled,scaled")
+   (set_attr "sve_load_ff" "ff")]
 )
 
 ;; Predicated extending first-faulting gather loads for 64-bit elements.
@@ -1858,6 +1969,12 @@ (define_insn_and_rewrite "@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mod
   {
     operands[6] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "load_gather_d_imm,load_gather_d_imm,
+                         load_gather_d_vec,load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "na,na,d,d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,unpacked,unpacked")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,scaled")
+   (set_attr "sve_load_ff" "ff")]
 )
 
 ;; Likewise, but with the offset being sign-extended from 32 bits.
@@ -1890,6 +2007,11 @@ (define_insn_and_rewrite "*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mod
     operands[6] = CONSTM1_RTX (VNx2BImode);
     operands[7] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")
+   (set_attr "sve_load_ff" "ff")]
 )
 
 ;; Likewise, but with the offset being zero-extended from 32 bits.
@@ -1918,6 +2040,11 @@ (define_insn_and_rewrite "*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mod
   {
     operands[7] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "load_gather_d_vec")
+   (set_attr "sve_gather_scatter_vec_offset_size" "d")
+   (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+   (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")
+   (set_attr "sve_load_ff" "ff")]
 )
 
 ;; =========================================================================
@@ -1950,6 +2077,7 @@ (define_insn "@aarch64_sve_prefetch<mode>"
     operands[1] = gen_rtx_MEM (<MODE>mode, operands[1]);
     return aarch64_output_sve_prefetch ("prf<Vesize>", operands[2], "%0, %1");
   }
+  [(set_attr "sve_type" "prefetch")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1998,6 +2126,7 @@ (define_insn "@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx4SI_ONLY:mode>"
     const char *const *parts = insns[which_alternative];
     return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
   }
+  [(set_attr "sve_type" "prefetch_gather")]
 )
 
 ;; Predicated gather prefetches for 64-bit elements.  The value of operand 3
@@ -2025,6 +2154,7 @@ (define_insn "@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>"
     const char *const *parts = insns[which_alternative];
     return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
   }
+  [(set_attr "sve_type" "prefetch_gather")]
 )
 
 ;; Likewise, but with the offset being sign-extended from 32 bits.
@@ -2058,6 +2188,7 @@ (define_insn_and_rewrite "*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_O
   {
     operands[9] = copy_rtx (operands[0]);
   }
+  [(set_attr "sve_type" "prefetch_gather")]
 )
 
 ;; Likewise, but with the offset being zero-extended from 32 bits.
@@ -2084,6 +2215,7 @@ (define_insn "*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_ux
     const char *const *parts = insns[which_alternative];
     return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
   }
+  [(set_attr "sve_type" "prefetch_gather")]
 )
 
 ;; =========================================================================
@@ -2122,6 +2254,7 @@ (define_insn "maskstore<mode><vpred>"
 	  UNSPEC_ST1_SVE))]
   "TARGET_SVE"
   "st1<Vesize>\t%1.<Vctype>, %2, %0"
+  [(set_attr "sve_type" "st1")]
 )
 
 ;; Unpredicated ST[234].  This is always a full update, so the dependence
@@ -2152,6 +2285,7 @@ (define_insn "vec_mask_store_lanes<mode><vsingle>"
 	  UNSPEC_STN))]
   "TARGET_SVE"
   "st<vector_count><Vesize>\t%1, %2, %0"
+  [(set_attr "sve_type" "st<vector_count>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2174,6 +2308,7 @@ (define_insn "@aarch64_store_trunc<VNx8_NARROW:mode><VNx8_WIDE:mode>"
 	  UNSPEC_ST1_SVE))]
   "TARGET_SVE"
   "st1<VNx8_NARROW:Vesize>\t%1.<VNx8_WIDE:Vetype>, %2, %0"
+  [(set_attr "sve_type" "st1_truncate")]
 )
 
 ;; Predicated truncate and store, with 4 elements per 128-bit block.
@@ -2187,6 +2322,7 @@ (define_insn "@aarch64_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>"
 	  UNSPEC_ST1_SVE))]
   "TARGET_SVE"
   "st1<VNx4_NARROW:Vesize>\t%1.<VNx4_WIDE:Vetype>, %2, %0"
+  [(set_attr "sve_type" "st1_truncate")]
 )
 
 ;; Predicated truncate and store, with 2 elements per 128-bit block.
@@ -2200,6 +2336,7 @@ (define_insn "@aarch64_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>"
 	  UNSPEC_ST1_SVE))]
   "TARGET_SVE"
   "st1<VNx2_NARROW:Vesize>\t%1.<VNx2_WIDE:Vetype>, %2, %0"
+ [(set_attr "sve_type" "st1_truncate")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2221,6 +2358,8 @@ (define_insn "@aarch64_stnt1<mode>"
 	  UNSPEC_STNT1_SVE))]
   "TARGET_SVE"
   "stnt1<Vesize>\t%1.<Vetype>, %2, %0"
+  [(set_attr "sve_type" "st1")
+  (set_attr "memop_nontemporal" "yes")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2268,6 +2407,14 @@ (define_insn "mask_scatter_store<mode><v_int_container>"
    st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
    st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
    st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+   [(set_attr "sve_type" "store_scatter_s_imm,store_scatter_s_imm,
+                          store_scatter_s_vec,store_scatter_s_vec,store_scatter_s_vec,
+                          store_scatter_s_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "na,na,s,s,s,s")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,packed,packed,
+                                                     packed,packed")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,unscaled,
+                                                     scaled,scaled")]
 )
 
 ;; Predicated scatter stores for 64-bit elements.  The value of operand 2
@@ -2288,6 +2435,11 @@ (define_insn "mask_scatter_store<mode><v_int_container>"
    st1<Vesize>\t%4.d, %5, [%1.d, #%0]
    st1<Vesize>\t%4.d, %5, [%0, %1.d]
    st1<Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]"
+   [(set_attr "sve_type" "store_scatter_d_imm,store_scatter_d_imm,
+                          store_scatter_d_vec,store_scatter_d_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "na,na,d,d")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,unpacked,unpacked")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being extended from 32 bits.
@@ -2313,6 +2465,10 @@ (define_insn_and_rewrite "*mask_scatter_store<mode><v_int_container>_<su>xtw_unp
   {
     operands[6] = CONSTM1_RTX (<VPRED>mode);
   }
+   [(set_attr "sve_type" "store_scatter_d_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "d")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being truncated to 32 bits and then
@@ -2340,6 +2496,10 @@ (define_insn_and_rewrite "*mask_scatter_store<mode><v_int_container>_sxtw"
   {
     operands[6] = CONSTM1_RTX (<VPRED>mode);
   }
+   [(set_attr "sve_type" "store_scatter_d_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "d")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being truncated to 32 bits and then
@@ -2360,6 +2520,10 @@ (define_insn "*mask_scatter_store<mode><v_int_container>_uxtw"
   "@
    st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
    st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]"
+   [(set_attr "sve_type" "store_scatter_d_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "d")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2392,6 +2556,14 @@ (define_insn "@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>"
    st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
    st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
    st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+   [(set_attr "sve_type" "store_scatter_s_imm,store_scatter_s_imm,
+                          store_scatter_s_vec,store_scatter_s_vec,store_scatter_s_vec,
+                          store_scatter_s_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "na,na,s,s,s,s")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,packed,packed,
+                                                     packed,packed")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,unscaled,
+                                                     scaled,scaled")]
 )
 
 ;; Predicated truncating scatter stores for 64-bit elements.  The value of
@@ -2413,6 +2585,11 @@ (define_insn "@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>"
    st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d, #%0]
    st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d]
    st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]"
+   [(set_attr "sve_type" "store_scatter_d_imm,store_scatter_d_imm,
+                          store_scatter_d_vec,store_scatter_d_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "na,na,d,d")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "na,na,unpacked,unpacked")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "na,na,unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being sign-extended from 32 bits.
@@ -2440,6 +2617,10 @@ (define_insn_and_rewrite "*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WI
   {
     operands[6] = copy_rtx (operands[5]);
   }
+   [(set_attr "sve_type" "store_scatter_d_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "d")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; Likewise, but with the offset being zero-extended from 32 bits.
@@ -2460,6 +2641,10 @@ (define_insn "*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxt
   "@
    st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
    st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]"
+   [(set_attr "sve_type" "store_scatter_d_vec")
+    (set_attr "sve_gather_scatter_vec_offset_size" "d")
+    (set_attr "sve_gather_scatter_vec_offset_packed" "packed")
+    (set_attr "sve_gather_scatter_vec_offset_scaled" "unscaled,scaled")]
 )
 
 ;; =========================================================================
@@ -2529,7 +2714,8 @@ (define_insn_and_split "*vec_duplicate<mode>_reg"
 				   CONST0_RTX (<MODE>mode)));
     DONE;
   }
-  [(set_attr "length" "4,4,8")]
+  [(set_attr "length" "4,4,8")
+   (set_attr "sve_type" "dup_gp, mov_from_simd, *")]
 )
 
 ;; Duplicate an Advanced SIMD vector to fill an SVE vector (LE version).
@@ -2542,6 +2728,7 @@ (define_insn "@aarch64_vec_duplicate_vq<mode>_le"
     operands[1] = gen_rtx_REG (<MODE>mode, REGNO (operands[1]));
     return "dup\t%0.q, %1.q[0]";
   }
+ [(set_attr "sve_type" "dup_from_q")]
 )
 
 ;; Duplicate an Advanced SIMD vector to fill an SVE vector (BE version).
@@ -2563,6 +2750,7 @@ (define_insn "@aarch64_vec_duplicate_vq<mode>_be"
     operands[1] = gen_rtx_REG (<MODE>mode, REGNO (operands[1]));
     return "dup\t%0.q, %1.q[0]";
   }
+  [(set_attr "sve_type" "dup_from_q")]
 )
 
 ;; This is used for vec_duplicate<mode>s from memory, but can also
@@ -2578,6 +2766,7 @@ (define_insn "sve_ld1r<mode>"
 	  UNSPEC_SEL))]
   "TARGET_SVE"
   "ld1r<Vesize>\t%0.<Vetype>, %1/z, %2"
+  [(set_attr "sve_type" "ld1r")]
 )
 
 ;; Load 128 bits from memory under predicate control and duplicate to
@@ -2593,6 +2782,7 @@ (define_insn "@aarch64_sve_ld1rq<mode>"
     operands[1] = gen_rtx_MEM (<VEL>mode, XEXP (operands[1], 0));
     return "ld1rq<Vesize>\t%0.<Vetype>, %2/z, %1";
   }
+  [(set_attr "sve_type" "ld1r")]
 )
 
 (define_insn "@aarch64_sve_ld1ro<mode>"
@@ -2607,6 +2797,7 @@ (define_insn "@aarch64_sve_ld1ro<mode>"
     operands[1] = gen_rtx_MEM (<VEL>mode, XEXP (operands[1], 0));
     return "ld1ro<Vesize>\t%0.<Vetype>, %2/z, %1";
   }
+  [(set_attr "sve_type" "ld1ro")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2639,7 +2830,8 @@ (define_insn "vec_shl_insert_<mode>"
    insr\t%0.<Vetype>, %<Vetype>2
    movprfx\t%0, %1\;insr\t%0.<Vetype>, %<vwcore>2
    movprfx\t%0, %1\;insr\t%0.<Vetype>, %<Vetype>2"
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,yes,yes")
+   (set_attr "sve_type" "insr_gp,insr_simd,insr_gp,insr_simd")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2659,6 +2851,8 @@ (define_insn "vec_series<mode>"
    index\t%0.<Vctype>, #%1, %<vccore>2
    index\t%0.<Vctype>, %<vccore>1, #%2
    index\t%0.<Vctype>, %<vccore>1, %<vccore>2"
+  [(set_attr "sve_type" "index_<Vctype>")
+   (set_attr "sve_index_operands" "imm_reg,reg_imm,reg_reg")]
 )
 
 ;; Optimize {x, x, x, x, ...} + {0, n, 2*n, 3*n, ...} if n is in range
@@ -2674,6 +2868,8 @@ (define_insn "*vec_series<mode>_plus"
     operands[2] = aarch64_check_zero_based_sve_index_immediate (operands[2]);
     return "index\t%0.<Vctype>, %<vccore>1, #%2";
   }
+  [(set_attr "sve_type" "index_<Vctype>")
+   (set_attr "sve_index_operands" "reg_imm")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2826,6 +3022,7 @@ (define_insn "*vec_extract<mode><Vel>_dup"
     operands[0] = gen_rtx_REG (<MODE>mode, REGNO (operands[0]));
     return "dup\t%0.<Vetype>, %1.<Vetype>[%2]";
   }
+  [(set_attr "sve_type" "dup_index")]
 )
 
 ;; Extract an element outside the range of DUP.  This pattern requires the
@@ -2843,7 +3040,8 @@ (define_insn "*vec_extract<mode><Vel>_ext"
 	    ? "ext\t%0.b, %0.b, %0.b, #%2"
 	    : "movprfx\t%0, %1\;ext\t%0.b, %0.b, %1.b, #%2");
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "ext")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2866,6 +3064,7 @@ (define_insn "@extract_<last_op>_<mode>"
   "@
    last<ab>\t%<vwcore>0, %1, %2.<Vetype>
    last<ab>\t%<Vetype>0, %1, %2.<Vetype>"
+  [(set_attr "sve_type" "lastab_gp, lastab_simd")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2935,7 +3134,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "int_simple_unary")]
 )
 
 ;; Predicated integer unary arithmetic with merging.
@@ -2963,7 +3163,8 @@ (define_insn "*cond_<optab><mode>_2"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "int_simple_unary")]
 )
 
 ;; Predicated integer unary arithmetic, merging with an independent value.
@@ -2986,7 +3187,8 @@ (define_insn "*cond_<optab><mode>_any"
    <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "int_simple_unary")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -3012,7 +3214,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "rev")]
 )
 
 ;; Another way of expressing the REVB, REVH and REVW patterns, with this
@@ -3031,7 +3234,8 @@ (define_insn "@aarch64_sve_revbhw_<SVE_ALL:mode><PRED_HSD:mode>"
   "@
    rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
    movprfx\t%0, %2\;rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "rev")]
 )
 
 ;; Predicated integer unary operations with merging.
@@ -3049,7 +3253,8 @@ (define_insn "@cond_<optab><mode>"
    <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "rev")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -3090,7 +3295,8 @@ (define_insn "*<optab><SVE_PARTIAL_I:mode><SVE_HSDI:mode>2"
   "@
    <su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
    movprfx\t%0, %2\;<su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "extend")]
 )
 
 ;; Predicated truncate-and-sign-extend operations.
@@ -3107,7 +3313,8 @@ (define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
   "@
    sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
    movprfx\t%0, %2\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "extend")]
 )
 
 ;; Predicated truncate-and-sign-extend operations with merging.
@@ -3126,7 +3333,8 @@ (define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
    sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
    movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
    movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "extend")]
 )
 
 ;; Predicated truncate-and-zero-extend operations, merging with the
@@ -3147,7 +3355,8 @@ (define_insn "*cond_uxt<mode>_2"
   "@
    uxt%e3\t%0.<Vetype>, %1/m, %0.<Vetype>
    movprfx\t%0, %2\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "extend")]
 )
 
 ;; Predicated truncate-and-zero-extend operations, merging with an
@@ -3172,7 +3381,8 @@ (define_insn "*cond_uxt<mode>_any"
    uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %4\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "extend")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -3243,7 +3453,8 @@ (define_insn "*cnot<mode>"
   "@
    cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "int_simple_unary")]
 )
 
 ;; Predicated logical inverse with merging.
@@ -3299,7 +3510,8 @@ (define_insn_and_rewrite "*cond_cnot<mode>_2"
   {
     operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "int_simple_unary")]
 )
 
 ;; Predicated logical inverse, merging with an independent value.
@@ -3336,7 +3548,8 @@ (define_insn_and_rewrite "*cond_cnot<mode>_any"
   {
     operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "int_simple_unary")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -3354,6 +3567,7 @@ (define_insn "@aarch64_sve_<optab><mode>"
 	  SVE_FP_UNARY_INT))]
   "TARGET_SVE"
   "<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>"
+  [(set_attr "sve_type" "fexpa")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -3383,6 +3597,7 @@ (define_insn "@aarch64_sve_<optab><mode>"
 	  SVE_FP_UNARY))]
   "TARGET_SVE"
   "<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Unpredicated floating-point unary operations.
@@ -3411,7 +3626,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point unary arithmetic with merging.
@@ -3449,7 +3665,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
   {
     operands[3] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
@@ -3467,7 +3684,8 @@ (define_insn "*cond_<optab><mode>_2_strict"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point unary arithmetic, merging with an independent
@@ -3498,7 +3716,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "*cond_<optab><mode>_any_strict"
@@ -3517,7 +3736,8 @@ (define_insn "*cond_<optab><mode>_any_strict"
    <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -3597,6 +3817,7 @@ (define_insn "*one_cmpl<mode>3"
 	  (match_operand:PRED_ALL 1 "register_operand" "Upa")))]
   "TARGET_SVE"
   "not\t%0.b, %1/z, %2.b"
+  [(set_attr "sve_type" "not_p")]
 )
 
 ;; =========================================================================
@@ -3667,7 +3888,8 @@ (define_insn_and_split "@aarch64_pred_<optab><mode>"
   [(set (match_dup 0)
 	(SVE_INT_BINARY_IMM:SVE_I (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,yes,yes")
+   (set_attr "sve_type" "<sve_sched_code>")]
 )
 
 ;; Unpredicated binary operations with a constant (post-RA only).
@@ -3682,7 +3904,8 @@ (define_insn "*post_ra_<optab><mode>3"
   "@
    <sve_int_op>\t%0.<Vetype>, %0.<Vetype>, #%<sve_imm_prefix>2
    movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %0.<Vetype>, #%<sve_imm_prefix>2"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched_code>")]
 )
 
 ;; Predicated integer operations with merging.
@@ -3712,7 +3935,8 @@ (define_insn "*cond_<optab><mode>_2"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched_code>")]
 )
 
 ;; Predicated integer operations, merging with the second input.
@@ -3729,7 +3953,8 @@ (define_insn "*cond_<optab><mode>_3"
   "@
    <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
    movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched_code>")]
 )
 
 ;; Predicated integer operations, merging with an independent value.
@@ -3759,7 +3984,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched_code>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -3791,7 +4017,8 @@ (define_insn "add<mode>3"
    movprfx\t%0, %1\;add\t%0.<Vetype>, %0.<Vetype>, #%D2
    movprfx\t%0, %1\;sub\t%0.<Vetype>, %0.<Vetype>, #%N2
    add\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,*,*,yes,yes,*")]
+  [(set_attr "movprfx" "*,*,*,yes,yes,*")
+   (set_attr "sve_type" "int_simple_binary,int_simple_binary,pred_count_vec,int_simple_binary,int_simple_binary,int_simple_binary")]
 )
 
 ;; Merging forms are handled through SVE_INT_BINARY.
@@ -3814,7 +4041,8 @@ (define_insn "sub<mode>3"
    sub\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
    subr\t%0.<Vetype>, %0.<Vetype>, #%D1
    movprfx\t%0, %2\;subr\t%0.<Vetype>, %0.<Vetype>, #%D1"
-  [(set_attr "movprfx" "*,*,yes")]
+  [(set_attr "movprfx" "*,*,yes")
+   (set_attr "sve_type" "int_simple_binary")]
 )
 
 ;; Merging forms are handled through SVE_INT_BINARY.
@@ -3836,6 +4064,7 @@ (define_insn "@aarch64_adr<mode>"
 	  UNSPEC_ADR))]
   "TARGET_SVE"
   "adr\t%0.<Vetype>, [%1.<Vetype>, %2.<Vetype>]"
+  [(set_attr "sve_type" "adr")]
 )
 
 ;; Same, but with the offset being sign-extended from the low 32 bits.
@@ -3856,6 +4085,7 @@ (define_insn_and_rewrite "*aarch64_adr_sxtw"
   {
     operands[3] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "adr")]
 )
 
 ;; Same, but with the offset being zero-extended from the low 32 bits.
@@ -3869,6 +4099,7 @@ (define_insn "*aarch64_adr_uxtw_unspec"
 	  UNSPEC_ADR))]
   "TARGET_SVE"
   "adr\t%0.d, [%1.d, %2.d, uxtw]"
+  [(set_attr "sve_type" "adr")]
 )
 
 ;; Same, matching as a PLUS rather than unspec.
@@ -3881,6 +4112,7 @@ (define_insn "*aarch64_adr_uxtw_and"
 	  (match_operand:VNx2DI 1 "register_operand" "w")))]
   "TARGET_SVE"
   "adr\t%0.d, [%1.d, %2.d, uxtw]"
+  [(set_attr "sve_type" "adr")]
 )
 
 ;; ADR with a nonzero shift.
@@ -3916,6 +4148,7 @@ (define_insn_and_rewrite "*aarch64_adr<mode>_shift"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
+  [(set_attr "sve_type" "adr")]
 )
 
 ;; Same, but with the index being sign-extended from the low 32 bits.
@@ -3940,6 +4173,7 @@ (define_insn_and_rewrite "*aarch64_adr_shift_sxtw"
   {
     operands[5] = operands[4] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "adr")]
 )
 
 ;; Same, but with the index being zero-extended from the low 32 bits.
@@ -3961,6 +4195,7 @@ (define_insn_and_rewrite "*aarch64_adr_shift_uxtw"
   {
     operands[5] = CONSTM1_RTX (VNx2BImode);
   }
+  [(set_attr "sve_type" "adr")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -4006,7 +4241,8 @@ (define_insn "@aarch64_pred_<su>abd<mode>"
   "@
    <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "abd")]
 )
 
 (define_expand "@aarch64_cond_<su>abd<mode>"
@@ -4093,7 +4329,8 @@ (define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_3"
   {
     operands[4] = operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "abd")]
 )
 
 ;; Predicated integer absolute difference, merging with an independent value.
@@ -4140,7 +4377,8 @@ (define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_any"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "abd")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -4165,7 +4403,8 @@ (define_insn "@aarch64_sve_<optab><mode>"
    movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
    movprfx\t%0, %1\;<binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
    <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes,yes,*")]
+  [(set_attr "movprfx" "*,*,yes,yes,*")
+   (set_attr "sve_type" "qaddsub")]
 )
 
 ;; Unpredicated saturating unsigned addition and subtraction.
@@ -4179,7 +4418,8 @@ (define_insn "@aarch64_sve_<optab><mode>"
    <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
    movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
    <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,*")]
+  [(set_attr "movprfx" "*,yes,*")
+   (set_attr "sve_type" "qaddsub")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -4220,7 +4460,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
   "@
    <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "mul_<Vetype>")]
 )
 
 ;; Predicated highpart multiplications with merging.
@@ -4257,7 +4498,8 @@ (define_insn "*cond_<optab><mode>_2"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")])
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "mul_<Vetype>")])
 
 ;; Predicated highpart multiplications, merging with zero.
 (define_insn "*cond_<optab><mode>_z"
@@ -4274,7 +4516,8 @@ (define_insn "*cond_<optab><mode>_z"
   "@
    movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "yes")])
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "mul_<Vetype>")])
 
 ;; -------------------------------------------------------------------------
 ;; ---- [INT] Division
@@ -4315,7 +4558,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
    <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  [(set_attr "movprfx" "*,*,yes")
+   (set_attr "sve_type" "idiv_<Vetype>")]
 )
 
 ;; Predicated integer division with merging.
@@ -4345,7 +4589,8 @@ (define_insn "*cond_<optab><mode>_2"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "idiv_<Vetype>")]
 )
 
 ;; Predicated integer division, merging with the second input.
@@ -4362,7 +4607,8 @@ (define_insn "*cond_<optab><mode>_3"
   "@
    <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
    movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "idiv_<Vetype>")]
 )
 
 ;; Predicated integer division, merging with an independent value.
@@ -4392,7 +4638,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "idiv_<Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -4415,7 +4662,8 @@ (define_insn "<optab><mode>3"
    <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
    movprfx\t%0, %1\;<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
    <logical>\t%0.d, %1.d, %2.d"
-  [(set_attr "movprfx" "*,yes,*")]
+  [(set_attr "movprfx" "*,yes,*")
+   (set_attr "sve_type" "int_simple_binary")]
 )
 
 ;; Merging forms are handled through SVE_INT_BINARY.
@@ -4458,6 +4706,7 @@ (define_insn_and_rewrite "*bic<mode>3"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
+  [(set_attr "sve_type" "int_simple_binary")]
 )
 
 ;; Predicated BIC with merging.
@@ -4488,7 +4737,8 @@ (define_insn "*cond_bic<mode>_2"
   "@
    bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "int_simple_binary")]
 )
 
 ;; Predicated integer BIC, merging with an independent value.
@@ -4516,7 +4766,8 @@ (define_insn_and_rewrite "*cond_bic<mode>_any"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "int_simple_binary")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -4594,7 +4845,8 @@ (define_insn_and_split "@aarch64_pred_<optab><mode>"
    && !register_operand (operands[3], <MODE>mode)"
   [(set (match_dup 0) (ASHIFT:SVE_I (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,*,yes")]
+  [(set_attr "movprfx" "*,*,*,yes")
+   (set_attr "sve_type" "simple_shift,simple_shift,shift_reverse,simple_shift")]
 )
 
 ;; Unpredicated shift operations by a constant (post-RA only).
@@ -4607,6 +4859,7 @@ (define_insn "*post_ra_v<optab><mode>3"
 	  (match_operand:SVE_I 2 "aarch64_simd_<lr>shift_imm")))]
   "TARGET_SVE && reload_completed"
   "<shift>\t%0.<Vetype>, %1.<Vetype>, #%2"
+  [(set_attr "sve_type" "simple_shift")]
 )
 
 ;; Predicated integer shift, merging with the first input.
@@ -4623,7 +4876,8 @@ (define_insn "*cond_<optab><mode>_2_const"
   "@
    <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
    movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "simple_shift")]
 )
 
 ;; Predicated integer shift, merging with an independent value.
@@ -4649,7 +4903,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_const"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "simple_shift")]
 )
 
 ;; Unpredicated shifts of narrow elements by 64-bit amounts.
@@ -4661,6 +4916,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE_SHIFT_WIDE))]
   "TARGET_SVE"
   "<sve_int_op>\t%0.<Vetype>, %1.<Vetype>, %2.d"
+  [(set_attr "sve_type" "simple_shift")]
 )
 
 ;; Merging predicated shifts of narrow elements by 64-bit amounts.
@@ -4693,7 +4949,9 @@ (define_insn "*cond_<sve_int_op><mode>_m"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d"
-  [(set_attr "movprfx" "*, yes")])
+  [(set_attr "movprfx" "*, yes")
+   (set_attr "sve_type" "simple_shift")]
+)
 
 ;; Predicated shifts of narrow elements by 64-bit amounts, merging with zero.
 (define_insn "*cond_<sve_int_op><mode>_z"
@@ -4710,7 +4968,9 @@ (define_insn "*cond_<sve_int_op><mode>_z"
   "@
    movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
    movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d"
-  [(set_attr "movprfx" "yes")])
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "simple_shift")]
+)
 
 ;; -------------------------------------------------------------------------
 ;; ---- [INT] Shifts (rounding towards 0)
@@ -4752,7 +5012,8 @@ (define_insn "*sdiv_pow2<mode>3"
   "@
    asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
    movprfx\t%0, %2\;asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")])
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "asrd")])
 
 ;; Predicated shift with merging.
 (define_expand "@cond_<sve_int_op><mode>"
@@ -4796,7 +5057,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")])
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")])
 
 ;; Predicated shift, merging with an independent value.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
@@ -4825,7 +5087,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -4847,6 +5110,7 @@ (define_insn "@aarch64_sve_<optab><mode>"
 	  SVE_FP_BINARY_INT))]
   "TARGET_SVE"
   "<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point binary operations that take an integer
@@ -4863,7 +5127,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point binary operations with merging, taking an
@@ -4905,7 +5170,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
@@ -4924,7 +5190,8 @@ (define_insn "*cond_<optab><mode>_2_strict"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point binary operations that take an integer as
@@ -4963,7 +5230,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
@@ -4992,7 +5260,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -5013,7 +5282,9 @@ (define_insn "*post_ra_<sve_fp_op><mode>3"
 	  (match_operand:SVE_FULL_F 1 "register_operand" "w")
 	  (match_operand:SVE_FULL_F 2 "register_operand" "w")))]
   "TARGET_SVE && reload_completed"
-  "<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>")
+  "<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "<sve_sched_fp_code>")]
+)
 
 ;; -------------------------------------------------------------------------
 ;; ---- [FP] General binary arithmetic corresponding to unspecs
@@ -5043,6 +5314,7 @@ (define_insn "@aarch64_sve_<optab><mode>"
 	  SVE_FP_BINARY))]
   "TARGET_SVE"
   "<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Unpredicated floating-point binary operations that need to be predicated
@@ -5075,7 +5347,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
    <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  [(set_attr "movprfx" "*,*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point operations with merging.
@@ -5115,7 +5388,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
@@ -5134,7 +5408,8 @@ (define_insn "*cond_<optab><mode>_2_strict"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Same for operations that take a 1-bit constant.
@@ -5158,7 +5433,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_2_const_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "*cond_<optab><mode>_2_const_strict"
@@ -5177,7 +5453,8 @@ (define_insn "*cond_<optab><mode>_2_const_strict"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point operations, merging with the second input.
@@ -5201,7 +5478,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_3_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "*cond_<optab><mode>_3_strict"
@@ -5220,7 +5498,8 @@ (define_insn "*cond_<optab><mode>_3_strict"
   "@
    <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
    movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point operations, merging with an independent value.
@@ -5260,7 +5539,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
@@ -5292,7 +5572,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Same for operations that take a 1-bit constant.
@@ -5328,7 +5609,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_const_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_const_strict"
@@ -5356,7 +5638,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_const_strict"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -5392,7 +5675,8 @@ (define_insn_and_split "@aarch64_pred_<optab><mode>"
    && INTVAL (operands[4]) == SVE_RELAXED_GP"
   [(set (match_dup 0) (plus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
+  [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated floating-point addition of a constant, merging with the
@@ -5419,7 +5703,8 @@ (define_insn_and_rewrite "*cond_add<mode>_2_const_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,yes,yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 
 (define_insn "*cond_add<mode>_2_const_strict"
@@ -5440,7 +5725,8 @@ (define_insn "*cond_add<mode>_2_const_strict"
    fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
    movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
    movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3"
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,yes,yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 
 ;; Predicated floating-point addition of a constant, merging with an
@@ -5480,7 +5766,8 @@ (define_insn_and_rewrite "*cond_add<mode>_any_const_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 
 (define_insn_and_rewrite "*cond_add<mode>_any_const_strict"
@@ -5511,7 +5798,8 @@ (define_insn_and_rewrite "*cond_add<mode>_any_const_strict"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 
 ;; Register merging forms are handled through SVE_COND_FP_BINARY.
@@ -5536,7 +5824,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
   "@
    fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
    movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcadd")]
 )
 
 ;; Predicated FCADD with merging.
@@ -5590,7 +5879,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcadd")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
@@ -5609,7 +5899,8 @@ (define_insn "*cond_<optab><mode>_2_strict"
   "@
    fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
    movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcadd")]
 )
 
 ;; Predicated FCADD, merging with an independent value.
@@ -5646,7 +5937,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "fcadd")]
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
@@ -5675,7 +5967,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
 					     operands[4], operands[1]));
     operands[4] = operands[2] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "fcadd")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -5710,7 +6003,8 @@ (define_insn_and_split "@aarch64_pred_<optab><mode>"
    && INTVAL (operands[4]) == SVE_RELAXED_GP"
   [(set (match_dup 0) (minus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,*,*,yes,yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 
 ;; Predicated floating-point subtraction from a constant, merging with the
@@ -5735,7 +6029,8 @@ (define_insn_and_rewrite "*cond_sub<mode>_3_const_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 
 (define_insn "*cond_sub<mode>_3_const_strict"
@@ -5754,7 +6049,8 @@ (define_insn "*cond_sub<mode>_3_const_strict"
   "@
    fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
    movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 
 ;; Predicated floating-point subtraction from a constant, merging with an
@@ -5791,7 +6087,8 @@ (define_insn_and_rewrite "*cond_sub<mode>_const_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 
 (define_insn_and_rewrite "*cond_sub<mode>_const_strict"
@@ -5819,7 +6116,8 @@ (define_insn_and_rewrite "*cond_sub<mode>_const_strict"
                                              operands[4], operands[1]));
     operands[4] = operands[3] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "farith_simple")]
 )
 ;; Register merging forms are handled through SVE_COND_FP_BINARY.
 
@@ -5867,7 +6165,8 @@ (define_insn_and_rewrite "*aarch64_pred_abd<mode>_relaxed"
   {
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fabd")]
 )
 
 (define_insn "*aarch64_pred_abd<mode>_strict"
@@ -5886,7 +6185,8 @@ (define_insn "*aarch64_pred_abd<mode>_strict"
   "@
    fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fabd")]
 )
 
 (define_expand "@aarch64_cond_abd<mode>"
@@ -5939,7 +6239,8 @@ (define_insn_and_rewrite "*aarch64_cond_abd<mode>_2_relaxed"
     operands[4] = copy_rtx (operands[1]);
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fabd")]
 )
 
 (define_insn "*aarch64_cond_abd<mode>_2_strict"
@@ -5962,7 +6263,8 @@ (define_insn "*aarch64_cond_abd<mode>_2_strict"
   "@
    fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fabd")]
 )
 
 ;; Predicated floating-point absolute difference, merging with the second
@@ -5993,7 +6295,8 @@ (define_insn_and_rewrite "*aarch64_cond_abd<mode>_3_relaxed"
     operands[4] = copy_rtx (operands[1]);
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fabd")]
 )
 
 (define_insn "*aarch64_cond_abd<mode>_3_strict"
@@ -6016,7 +6319,8 @@ (define_insn "*aarch64_cond_abd<mode>_3_strict"
   "@
    fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
    movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fabd")]
 )
 
 ;; Predicated floating-point absolute difference, merging with an
@@ -6065,7 +6369,8 @@ (define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "fabd")]
 )
 
 (define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_strict"
@@ -6101,7 +6406,8 @@ (define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_strict"
 					     operands[4], operands[1]));
     operands[4] = operands[3] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "fabd")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -6134,7 +6440,8 @@ (define_insn_and_split "@aarch64_pred_<optab><mode>"
    && INTVAL (operands[4]) == SVE_RELAXED_GP"
   [(set (match_dup 0) (mult:SVE_FULL_F (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,*,yes,yes")
+   (set_attr "sve_type" "fmul")]
 )
 
 ;; Merging forms are handled through SVE_COND_FP_BINARY and
@@ -6151,6 +6458,7 @@ (define_insn "@aarch64_mul_lane_<mode>"
 	  (match_operand:SVE_FULL_F 1 "register_operand" "w")))]
   "TARGET_SVE"
   "fmul\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]"
+  [(set_attr "sve_type" "fmul")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -6214,6 +6522,7 @@ (define_insn "*<optab><mode>3"
 	  LOGICALF))]
   "TARGET_SVE"
   "<logicalf_op>\t%0.d, %1.d, %2.d"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -6334,7 +6643,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
    <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,yes,yes")
+   (set_attr "sve_type" "fminmax")]
 )
 
 ;; Merging forms are handled through SVE_COND_FP_BINARY and
@@ -6361,6 +6671,7 @@ (define_insn "and<mode>3"
 		      (match_operand:PRED_ALL 2 "register_operand" "Upa")))]
   "TARGET_SVE"
   "and\t%0.b, %1/z, %2.b, %2.b"
+  [(set_attr "sve_type" "pred_logical")]
 )
 
 ;; Unpredicated predicate EOR and ORR.
@@ -6387,6 +6698,7 @@ (define_insn "@aarch64_pred_<optab><mode>_z"
 	  (match_operand:PRED_ALL 1 "register_operand" "Upa")))]
   "TARGET_SVE"
   "<logical>\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical")]
 )
 
 ;; Perform a logical operation on operands 2 and 3, using operand 1 as
@@ -6409,6 +6721,7 @@ (define_insn "*<optab><mode>3_cc"
 		      (match_dup 4)))]
   "TARGET_SVE"
   "<logical>s\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical_setflags")]
 )
 
 ;; Same with just the flags result.
@@ -6427,6 +6740,7 @@ (define_insn "*<optab><mode>3_ptest"
    (clobber (match_scratch:VNx16BI 0 "=Upa"))]
   "TARGET_SVE"
   "<logical>s\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical_setflags")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -6447,6 +6761,7 @@ (define_insn "aarch64_pred_<nlogical><mode>_z"
 	  (match_operand:PRED_ALL 1 "register_operand" "Upa")))]
   "TARGET_SVE"
   "<nlogical>\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical")]
 )
 
 ;; Same, but set the flags as a side-effect.
@@ -6470,6 +6785,7 @@ (define_insn "*<nlogical><mode>3_cc"
 		      (match_dup 4)))]
   "TARGET_SVE"
   "<nlogical>s\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical_setflags")]
 )
 
 ;; Same with just the flags result.
@@ -6489,6 +6805,7 @@ (define_insn "*<nlogical><mode>3_ptest"
    (clobber (match_scratch:VNx16BI 0 "=Upa"))]
   "TARGET_SVE"
   "<nlogical>s\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical_setflags")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -6509,6 +6826,7 @@ (define_insn "aarch64_pred_<logical_nn><mode>_z"
 	  (match_operand:PRED_ALL 1 "register_operand" "Upa")))]
   "TARGET_SVE"
   "<logical_nn>\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical")]
 )
 
 ;; Same, but set the flags as a side-effect.
@@ -6533,6 +6851,7 @@ (define_insn "*<logical_nn><mode>3_cc"
 		      (match_dup 4)))]
   "TARGET_SVE"
   "<logical_nn>s\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical_setflags")]
 )
 
 ;; Same with just the flags result.
@@ -6553,6 +6872,7 @@ (define_insn "*<logical_nn><mode>3_ptest"
    (clobber (match_scratch:VNx16BI 0 "=Upa"))]
   "TARGET_SVE"
   "<logical_nn>s\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "pred_logical_setflags")]
 )
 
 ;; =========================================================================
@@ -6602,7 +6922,8 @@ (define_insn "@aarch64_pred_fma<mode>"
    mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
    mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
    movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  [(set_attr "movprfx" "*,*,yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; Predicated integer addition of product with merging.
@@ -6644,7 +6965,8 @@ (define_insn "*cond_fma<mode>_2"
   "@
    mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
    movprfx\t%0, %2\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; Predicated integer addition of product, merging with the third input.
@@ -6663,7 +6985,8 @@ (define_insn "*cond_fma<mode>_4"
   "@
    mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
    movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; Predicated integer addition of product, merging with an independent value.
@@ -6697,7 +7020,8 @@ (define_insn_and_rewrite "*cond_fma<mode>_any"
 					     operands[5], operands[1]));
     operands[5] = operands[4] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -6743,7 +7067,8 @@ (define_insn "@aarch64_pred_fnma<mode>"
    msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
    mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
    movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  [(set_attr "movprfx" "*,*,yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; Predicated integer subtraction of product with merging.
@@ -6785,7 +7110,8 @@ (define_insn "*cond_fnma<mode>_2"
   "@
    msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
    movprfx\t%0, %2\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; Predicated integer subtraction of product, merging with the third input.
@@ -6804,7 +7130,8 @@ (define_insn "*cond_fnma<mode>_4"
   "@
    mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
    movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; Predicated integer subtraction of product, merging with an
@@ -6839,7 +7166,8 @@ (define_insn_and_rewrite "*cond_fnma<mode>_any"
 					     operands[5], operands[1]));
     operands[5] = operands[4] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -6865,7 +7193,8 @@ (define_insn "<sur>dot_prod<vsi2qi>"
   "@
    <sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
    movprfx\t%0, %3\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "dot_<Vetype_fourth>")]
 )
 
 ;; Four-element integer dot-product by selected lanes with accumulation.
@@ -6884,7 +7213,8 @@ (define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
   "@
    <sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
    movprfx\t%0, %4\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "dot_<Vetype_fourth>")]
 )
 
 (define_insn "@<sur>dot_prod<vsi2qi>"
@@ -6899,7 +7229,8 @@ (define_insn "@<sur>dot_prod<vsi2qi>"
   "@
    <sur>dot\\t%0.s, %1.b, %2.b
    movprfx\t%0, %3\;<sur>dot\\t%0.s, %1.b, %2.b"
-   [(set_attr "movprfx" "*,yes")]
+   [(set_attr "movprfx" "*,yes")
+    (set_attr "sve_type" "dot_<Vetype_fourth>")]
 )
 
 (define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
@@ -6917,7 +7248,8 @@ (define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
   "@
    <sur>dot\\t%0.s, %1.b, %2.b[%3]
    movprfx\t%0, %4\;<sur>dot\\t%0.s, %1.b, %2.b[%3]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "dot_<Vetype_fourth>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -6971,7 +7303,8 @@ (define_insn "@aarch64_sve_add_<optab><vsi2qi>"
   "@
    <sur>mmla\\t%0.s, %2.b, %3.b
    movprfx\t%0, %1\;<sur>mmla\\t%0.s, %2.b, %3.b"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "usmmla")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -7019,7 +7352,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
    <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
    <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
    movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  [(set_attr "movprfx" "*,*,yes")
+   (set_attr "sve_type" "fma")]
 )
 
 ;; Predicated floating-point ternary operations with merging.
@@ -7067,7 +7401,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
   {
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fma")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
@@ -7087,7 +7422,8 @@ (define_insn "*cond_<optab><mode>_2_strict"
   "@
    <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
    movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fma")]
 )
 
 ;; Predicated floating-point ternary operations, merging with the
@@ -7113,7 +7449,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_4_relaxed"
   {
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fma")]
 )
 
 (define_insn "*cond_<optab><mode>_4_strict"
@@ -7133,7 +7470,8 @@ (define_insn "*cond_<optab><mode>_4_strict"
   "@
    <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
    movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fma")]
 )
 
 ;; Predicated floating-point ternary operations, merging with an
@@ -7177,7 +7515,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "fma")]
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
@@ -7212,7 +7551,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
 					     operands[5], operands[1]));
     operands[5] = operands[4] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "fma")]
 )
 
 ;; Unpredicated FMLA and FMLS by selected lanes.  It doesn't seem worth using
@@ -7231,7 +7571,8 @@ (define_insn "@aarch64_<optab>_lane_<mode>"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
    movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fma")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -7255,7 +7596,8 @@ (define_insn "@aarch64_pred_<optab><mode>"
   "@
    fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
    movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcmla")]
 )
 
 ;; unpredicated optab pattern for auto-vectorizer
@@ -7353,7 +7695,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_4_relaxed"
   {
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcmla")]
 )
 
 (define_insn "*cond_<optab><mode>_4_strict"
@@ -7373,7 +7716,8 @@ (define_insn "*cond_<optab><mode>_4_strict"
   "@
    fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
    movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcmla")]
 )
 
 ;; Predicated FCMLA, merging with an independent value.
@@ -7411,7 +7755,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "fcmla")]
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
@@ -7441,7 +7786,8 @@ (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
 					     operands[5], operands[1]));
     operands[5] = operands[4] = operands[0];
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "fcmla")]
 )
 
 ;; Unpredicated FCMLA with indexing.
@@ -7459,7 +7805,8 @@ (define_insn "@aarch64_<optab>_lane_<mode>"
   "@
    fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
    movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcmla")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -7480,7 +7827,8 @@ (define_insn "@aarch64_sve_tmad<mode>"
   "@
    ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
    movprfx\t%0, %1\;ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "ftmad")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -7504,7 +7852,8 @@ (define_insn "@aarch64_sve_<sve_fp_op>vnx4sf"
   "@
    <sve_fp_op>\t%0.s, %2.h, %3.h
    movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; The immediate range is enforced before generating the instruction.
@@ -7520,7 +7869,8 @@ (define_insn "@aarch64_sve_<sve_fp_op>_lanevnx4sf"
   "@
    <sve_fp_op>\t%0.s, %2.h, %3.h[%4]
    movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -7542,7 +7892,8 @@ (define_insn "@aarch64_sve_<sve_fp_op><mode>"
   "@
    <sve_fp_op>\\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
    movprfx\t%0, %1\;<sve_fp_op>\\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fmmla")]
 )
 
 ;; =========================================================================
@@ -7610,7 +7961,8 @@ (define_insn "*vcond_mask_<mode><vpred>"
    movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;fmov\t%0.<Vetype>, %3/m, #%1
    movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, #%I1
    movprfx\t%0, %2\;fmov\t%0.<Vetype>, %3/m, #%1"
-  [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
+  [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")
+   (set_attr "sve_type" "mov_z, mov_z_imm, mov_z_imm, mov_z_imm, mov_z_imm, mov_z_imm, mov_z_imm")]
 )
 
 ;; Optimize selects between a duplicated scalar variable and another vector,
@@ -7633,7 +7985,8 @@ (define_insn "@aarch64_sel_dup<mode>"
    movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
    movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
    movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<Vetype>1"
-  [(set_attr "movprfx" "*,*,yes,yes,yes,yes")]
+  [(set_attr "movprfx" "*,*,yes,yes,yes,yes")
+   (set_attr "sve_type" "mov_from_gp, mov_from_simd, mov_from_gp, mov_from_simd, mov_from_gp, mov_from_simd")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -7784,6 +8137,7 @@ (define_insn "@aarch64_pred_cmp<cmp_op><mode>"
   "@
    cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
    cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+  [(set_attr "sve_type" "cmp")]
 )
 
 ;; Predicated integer comparisons in which both the flag and predicate
@@ -7820,6 +8174,7 @@ (define_insn_and_rewrite "*cmp<cmp_op><mode>_cc"
     operands[6] = copy_rtx (operands[4]);
     operands[7] = operands[5];
   }
+  [(set_attr "sve_type" "cmp")]
 )
 
 ;; Predicated integer comparisons in which only the flags result is
@@ -7849,6 +8204,7 @@ (define_insn_and_rewrite "*cmp<cmp_op><mode>_ptest"
     operands[6] = copy_rtx (operands[4]);
     operands[7] = operands[5];
   }
+  [(set_attr "sve_type" "cmp")]
 )
 
 ;; Predicated integer comparisons, formed by combining a PTRUE-predicated
@@ -7896,6 +8252,7 @@ (define_insn "@aarch64_pred_cmp<cmp_op><mode>_wide"
    (clobber (reg:CC_NZC CC_REGNUM))]
   "TARGET_SVE"
   "cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.d"
+  [(set_attr "sve_type" "cmp")]
 )
 
 ;; Predicated integer wide comparisons in which both the flag and
@@ -7927,6 +8284,7 @@ (define_insn "*aarch64_pred_cmp<cmp_op><mode>_wide_cc"
   "TARGET_SVE
    && aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
   "cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.d"
+  [(set_attr "sve_type" "cmp")]
 )
 
 ;; Predicated integer wide comparisons in which only the flags result
@@ -7950,6 +8308,7 @@ (define_insn "*aarch64_pred_cmp<cmp_op><mode>_wide_ptest"
   "TARGET_SVE
    && aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
   "cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.d"
+  [(set_attr "sve_type" "cmp")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -7978,6 +8337,7 @@ (define_insn "@while_<while_optab_cmp><GPI:mode><PRED_ALL:mode>"
    (clobber (reg:CC_NZC CC_REGNUM))]
   "TARGET_SVE"
   "while<cmp_op>\t%0.<PRED_ALL:Vetype>, %<w>1, %<w>2"
+  [(set_attr "sve_type" "while")]
 )
 
 ;; The WHILE instructions set the flags in the same way as a PTEST with
@@ -8007,6 +8367,7 @@ (define_insn_and_rewrite "*while_<while_optab_cmp><GPI:mode><PRED_ALL:mode>_cc"
     operands[3] = CONSTM1_RTX (VNx16BImode);
     operands[4] = CONSTM1_RTX (<PRED_ALL:MODE>mode);
   }
+  [(set_attr "sve_type" "while")]
 )
 
 ;; Same, but handle the case in which only the flags result is useful.
@@ -8031,6 +8392,7 @@ (define_insn_and_rewrite "@while_<while_optab_cmp><GPI:mode><PRED_ALL:mode>_ptes
     operands[3] = CONSTM1_RTX (VNx16BImode);
     operands[4] = CONSTM1_RTX (<PRED_ALL:MODE>mode);
   }
+  [(set_attr "sve_type" "while")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8075,6 +8437,7 @@ (define_insn "@aarch64_pred_fcm<cmp_op><mode>"
   "@
    fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #0.0
    fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+  [(set_attr "sve_type" "fcm")]
 )
 
 ;; Same for unordered comparisons.
@@ -8088,6 +8451,7 @@ (define_insn "@aarch64_pred_fcmuo<mode>"
 	  UNSPEC_COND_FCMUO))]
   "TARGET_SVE"
   "fcmuo\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+  [(set_attr "sve_type" "fcm")]
 )
 
 ;; Floating-point comparisons predicated on a PTRUE, with the results ANDed
@@ -8351,6 +8715,7 @@ (define_insn_and_rewrite "*aarch64_pred_fac<cmp_op><mode>_relaxed"
     operands[5] = copy_rtx (operands[1]);
     operands[6] = copy_rtx (operands[1]);
   }
+  [(set_attr "sve_type" "facm")]
 )
 
 (define_insn "*aarch64_pred_fac<cmp_op><mode>_strict"
@@ -8371,6 +8736,7 @@ (define_insn "*aarch64_pred_fac<cmp_op><mode>_strict"
 	  SVE_COND_FP_ABS_CMP))]
   "TARGET_SVE"
   "fac<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
+  [(set_attr "sve_type" "facm")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8391,6 +8757,7 @@ (define_insn "@vcond_mask_<mode><mode>"
 	    (match_operand:PRED_ALL 2 "register_operand" "Upa"))))]
   "TARGET_SVE"
   "sel\t%0.b, %3, %1.b, %2.b"
+  [(set_attr "sve_type" "sel_p")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8439,6 +8806,7 @@ (define_insn "aarch64_ptest<mode>"
 		       UNSPEC_PTEST))]
   "TARGET_SVE"
   "ptest\t%0, %3.b"
+  [(set_attr "sve_type" "ptest")]
 )
 
 ;; =========================================================================
@@ -8466,6 +8834,7 @@ (define_insn "@fold_extract_<last_op>_<mode>"
   "@
    clast<ab>\t%<vwcore>0, %2, %<vwcore>0, %3.<Vetype>
    clast<ab>\t%<Vetype>0, %2, %<Vetype>0, %3.<Vetype>"
+  [(set_attr "sve_type" "clastab_gp, clastab_simd")]
 )
 
 (define_insn "@aarch64_fold_extract_vector_<last_op>_<mode>"
@@ -8479,6 +8848,7 @@ (define_insn "@aarch64_fold_extract_vector_<last_op>_<mode>"
   "@
    clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %1\;clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>"
+  [(set_attr "sve_type" "clastab_simd")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8519,6 +8889,7 @@ (define_insn "@aarch64_pred_reduc_<optab>_<mode>"
 		   SVE_INT_ADDV))]
   "TARGET_SVE && <max_elem_bits> >= <elem_bits>"
   "<su>addv\t%d0, %1, %2.<Vetype>"
+  [(set_attr "sve_type" "int_reduce_<Vetype>")]
 )
 
 ;; Unpredicated integer reductions.
@@ -8541,6 +8912,7 @@ (define_insn "@aarch64_pred_reduc_<optab>_<mode>"
 		      SVE_INT_REDUCTION))]
   "TARGET_SVE"
   "<sve_int_op>\t%<Vetype>0, %1, %2.<Vetype>"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8585,6 +8957,7 @@ (define_insn "@aarch64_pred_reduc_<optab>_<mode>"
 		      SVE_FP_REDUCTION))]
   "TARGET_SVE"
   "<sve_fp_op>\t%<Vetype>0, %1, %2.<Vetype>"
+  [(set_attr "sve_type" "float_reduce_<Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8616,6 +8989,7 @@ (define_insn "mask_fold_left_plus_<mode>"
 		      UNSPEC_FADDA))]
   "TARGET_SVE"
   "fadda\t%<Vetype>0, %3, %<Vetype>0, %2.<Vetype>"
+  [(set_attr "sve_type" "fadda_<Vetype>")]
 )
 
 ;; =========================================================================
@@ -8650,6 +9024,7 @@ (define_insn "@aarch64_sve_tbl<mode>"
 	  UNSPEC_TBL))]
   "TARGET_SVE"
   "tbl\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "tbl")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8670,6 +9045,7 @@ (define_insn "@aarch64_sve_compact<mode>"
 	  UNSPEC_SVE_COMPACT))]
   "TARGET_SVE"
   "compact\t%0.<Vetype>, %1, %2.<Vetype>"
+  [(set_attr "sve_type" "compact")]
 )
 
 ;; Duplicate one element of a vector.
@@ -8682,6 +9058,7 @@ (define_insn "@aarch64_sve_dup_lane<mode>"
   "TARGET_SVE
    && IN_RANGE (INTVAL (operands[2]) * <container_bits> / 8, 0, 63)"
   "dup\t%0.<Vctype>, %1.<Vctype>[%2]"
+  [(set_attr "sve_type" "dup_index")]
 )
 
 ;; Use DUP.Q to duplicate a 128-bit segment of a register.
@@ -8718,6 +9095,7 @@ (define_insn "@aarch64_sve_dupq_lane<mode>"
     operands[2] = gen_int_mode (byte / 16, DImode);
     return "dup\t%0.q, %1.q[%2]";
   }
+  [(set_attr "sve_type" "dup_from_q")]
 )
 
 ;; Reverse the order of elements within a full vector.
@@ -8727,7 +9105,9 @@ (define_insn "@aarch64_sve_rev<mode>"
 	  [(match_operand:SVE_ALL 1 "register_operand" "w")]
 	  UNSPEC_REV))]
   "TARGET_SVE"
-  "rev\t%0.<Vctype>, %1.<Vctype>")
+  "rev\t%0.<Vctype>, %1.<Vctype>"
+  [(set_attr "sve_type" "rev")]
+)
 
 ;; -------------------------------------------------------------------------
 ;; ---- [INT,FP] Special-purpose binary permutes
@@ -8755,7 +9135,8 @@ (define_insn "@aarch64_sve_splice<mode>"
   "@
    splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*, yes")]
+  [(set_attr "movprfx" "*, yes")
+   (set_attr "sve_type" "splice")]
 )
 
 ;; Permutes that take half the elements from one vector and half the
@@ -8768,6 +9149,7 @@ (define_insn "@aarch64_sve_<perm_insn><mode>"
 	  PERMUTE))]
   "TARGET_SVE"
   "<perm_insn>\t%0.<Vctype>, %1.<Vctype>, %2.<Vctype>"
+  [(set_attr "sve_type" "permute_z")]
 )
 
 ;; Apply PERMUTE to 128-bit sequences.  The behavior of these patterns
@@ -8780,6 +9162,7 @@ (define_insn "@aarch64_sve_<optab><mode>"
 	  PERMUTEQ))]
   "TARGET_SVE_F64MM"
   "<perm_insn>\t%0.q, %1.q, %2.q"
+  [(set_attr "sve_type" "permute_z")]
 )
 
 ;; Concatenate two vectors and extract a subvector.  Note that the
@@ -8799,7 +9182,8 @@ (define_insn "@aarch64_sve_ext<mode>"
 	    ? "ext\\t%0.b, %0.b, %2.b, #%3"
 	    : "movprfx\t%0, %1\;ext\\t%0.b, %0.b, %2.b, #%3");
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "ext")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8814,7 +9198,9 @@ (define_insn "@aarch64_sve_rev<mode>"
 	(unspec:PRED_ALL [(match_operand:PRED_ALL 1 "register_operand" "Upa")]
 			 UNSPEC_REV))]
   "TARGET_SVE"
-  "rev\t%0.<Vetype>, %1.<Vetype>")
+  "rev\t%0.<Vetype>, %1.<Vetype>"
+  [(set_attr "sve_type" "rev_p")]
+)
 
 ;; -------------------------------------------------------------------------
 ;; ---- [PRED] Special-purpose binary permutes
@@ -8837,6 +9223,7 @@ (define_insn "@aarch64_sve_<perm_insn><mode>"
 			 PERMUTE))]
   "TARGET_SVE"
   "<perm_insn>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "permute_p")]
 )
 
 ;; Special purpose permute used by the predicate generation instructions.
@@ -8851,6 +9238,7 @@ (define_insn "@aarch64_sve_trn1_conv<mode>"
 			UNSPEC_TRN1_CONV))]
   "TARGET_SVE"
   "trn1\t%0.<PRED_ALL:Vetype>, %1.<PRED_ALL:Vetype>, %2.<PRED_ALL:Vetype>"
+  [(set_attr "sve_type" "permute_p")]
 )
 
 ;; =========================================================================
@@ -8874,6 +9262,7 @@ (define_insn "vec_pack_trunc_<Vwide>"
 	  UNSPEC_PACK))]
   "TARGET_SVE"
   "uzp1\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "permute_z")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8910,6 +9299,7 @@ (define_insn "@aarch64_sve_<su>unpk<perm_hilo>_<SVE_FULL_BHSI:mode>"
 	  UNPACK))]
   "TARGET_SVE"
   "<su>unpk<perm_hilo>\t%0.<Vewtype>, %1.<Vetype>"
+  [(set_attr "sve_type" "unpk_z")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -8947,7 +9337,8 @@ (define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>
   "@
    fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
    movprfx\t%0, %2\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcvtz_<SVE_FULL_F:Vetype>")]
 )
 
 ;; Predicated narrowing float-to-integer conversion.
@@ -8962,7 +9353,8 @@ (define_insn "@aarch64_sve_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>"
   "@
    fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
    movprfx\t%0, %2\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcvtz_<VNx2DF_ONLY:Vetype>")]
 )
 
 ;; Predicated float-to-integer conversion with merging, either to the same
@@ -9006,7 +9398,8 @@ (define_insn_and_rewrite "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "fcvtz_<SVE_FULL_F:Vetype>")]
 )
 
 (define_insn "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict"
@@ -9025,7 +9418,8 @@ (define_insn "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict
    fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
    movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
    movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "fcvtz_<SVE_FULL_F:Vetype>")]
 )
 
 ;; Predicated narrowing float-to-integer conversion with merging.
@@ -9059,7 +9453,8 @@ (define_insn "*cond_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>"
    fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
    movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
    movprfx\t%0, %3\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "fcvtz_<VNx2DF_ONLY:Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9134,7 +9529,8 @@ (define_insn "@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode
   "@
    <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
    movprfx\t%0, %2\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "cvtf_<SVE_FULL_HSDI:Vetype>")]
 )
 
 ;; Predicated widening integer-to-float conversion.
@@ -9149,7 +9545,8 @@ (define_insn "@aarch64_sve_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>"
   "@
    <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
    movprfx\t%0, %2\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "cvtf_<VNx4SI_ONLY:Vetype>")]
 )
 
 ;; Predicated integer-to-float conversion with merging, either to the same
@@ -9193,7 +9590,8 @@ (define_insn_and_rewrite "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "cvtf_<SVE_FULL_HSDI:Vetype>")]
 )
 
 (define_insn "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_strict"
@@ -9212,7 +9610,8 @@ (define_insn "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_stric
    <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
    movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
    movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "cvtf_<SVE_FULL_HSDI:Vetype>")]
 )
 
 ;; Predicated widening integer-to-float conversion with merging.
@@ -9246,7 +9645,8 @@ (define_insn "*cond_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>"
    <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
    movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
    movprfx\t%0, %3\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "cvtf_<VNx4SI_ONLY:Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9332,7 +9732,8 @@ (define_insn "@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
   "@
    fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
    movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcvt_<SVE_FULL_SDF:Vetype>")]
 )
 
 ;; Predicated float-to-float truncation with merging.
@@ -9366,7 +9767,8 @@ (define_insn "*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
    fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
    movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
    movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "fcvt_<SVE_FULL_SDF:Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9389,7 +9791,8 @@ (define_insn "@aarch64_sve_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>"
   "@
    bfcvt\t%0.h, %1/m, %2.s
    movprfx\t%0, %2\;bfcvt\t%0.h, %1/m, %2.s"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bfcvt")]
 )
 
 ;; Predicated BFCVT with merging.
@@ -9423,7 +9826,8 @@ (define_insn "*cond_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>"
    bfcvt\t%0.h, %1/m, %2.s
    movprfx\t%0.s, %1/z, %2.s\;bfcvt\t%0.h, %1/m, %2.s
    movprfx\t%0, %3\;bfcvt\t%0.h, %1/m, %2.s"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "bfcvt")]
 )
 
 ;; Predicated BFCVTNT.  This doesn't give a natural aarch64_pred_*/cond_*
@@ -9441,6 +9845,7 @@ (define_insn "@aarch64_sve_cvtnt<mode>"
 	  UNSPEC_COND_FCVTNT))]
   "TARGET_SVE_BF16"
   "bfcvtnt\t%0.h, %2/m, %3.s"
+  [(set_attr "sve_type" "bfcvt")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9489,7 +9894,8 @@ (define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode
   "@
    fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
    movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcvt_<SVE_FULL_HSF:Vetype>")]
 )
 
 ;; Predicated float-to-float extension with merging.
@@ -9523,7 +9929,8 @@ (define_insn "*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
    fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
    movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
    movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "fcvt_<SVE_FULL_HSF:Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9543,6 +9950,7 @@ (define_insn "vec_pack_trunc_<Vwide>"
 	  UNSPEC_PACK))]
   "TARGET_SVE"
   "uzp1\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "permute_p")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9576,6 +9984,7 @@ (define_insn "@aarch64_sve_punpk<perm_hilo>_<mode>"
 			UNPACK_UNSIGNED))]
   "TARGET_SVE"
   "punpk<perm_hilo>\t%0.h, %1.b"
+  [(set_attr "sve_type" "unpk_p")]
 )
 
 ;; =========================================================================
@@ -9606,6 +10015,7 @@ (define_insn "@aarch64_brk<brk_op>"
   "@
    brk<brk_op>\t%0.b, %1/z, %2.b
    brk<brk_op>\t%0.b, %1/m, %2.b"
+  [(set_attr "sve_type" "brkab")]
 )
 
 ;; Same, but also producing a flags result.
@@ -9629,6 +10039,7 @@ (define_insn "*aarch64_brk<brk_op>_cc"
 	  SVE_BRK_UNARY))]
   "TARGET_SVE"
   "brk<brk_op>s\t%0.b, %1/z, %2.b"
+  [(set_attr "sve_type" "brkab")]
 )
 
 ;; Same, but with only the flags result being interesting.
@@ -9647,6 +10058,7 @@ (define_insn "*aarch64_brk<brk_op>_ptest"
    (clobber (match_scratch:VNx16BI 0 "=Upa"))]
   "TARGET_SVE"
   "brk<brk_op>s\t%0.b, %1/z, %2.b"
+  [(set_attr "sve_type" "brkabs")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9671,6 +10083,7 @@ (define_insn "@aarch64_brk<brk_op>"
 	  SVE_BRK_BINARY))]
   "TARGET_SVE"
   "brk<brk_op>\t%0.b, %1/z, %2.b, %<brk_reg_opno>.b"
+  [(set_attr "sve_type" "brknpab")]
 )
 
 ;; BRKN, producing both a predicate and a flags result.  Unlike other
@@ -9748,6 +10161,7 @@ (define_insn "*aarch64_brk<brk_op>_cc"
 	  SVE_BRKP))]
   "TARGET_SVE"
   "brk<brk_op>s\t%0.b, %1/z, %2.b, %3.b"
+  [(set_attr "sve_type" "brknpabs")]
 )
 
 ;; Same, but with only the flags result being interesting.
@@ -9766,6 +10180,7 @@ (define_insn "*aarch64_brk<brk_op>_ptest"
    (clobber (match_scratch:VNx16BI 0 "=Upa"))]
   "TARGET_SVE"
   "brk<brk_op>s\t%0.b, %1/z, %2.b, %3.b"
+[(set_attr "sve_type" "brknpabs")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9786,6 +10201,7 @@ (define_insn "@aarch64_sve_<sve_pred_op><mode>"
    (clobber (reg:CC_NZC CC_REGNUM))]
   "TARGET_SVE && <max_elem_bits> >= <elem_bits>"
   "<sve_pred_op>\t%0.<Vetype>, %1, %0.<Vetype>"
+  [(set_attr "sve_type" "piter")]
 )
 
 ;; Same, but also producing a flags result.
@@ -9816,6 +10232,7 @@ (define_insn_and_rewrite "*aarch64_sve_<sve_pred_op><mode>_cc"
     operands[4] = operands[2];
     operands[5] = operands[3];
   }
+  [(set_attr "sve_type" "piter")]
 )
 
 ;; Same, but with only the flags result being interesting.
@@ -9841,6 +10258,7 @@ (define_insn_and_rewrite "*aarch64_sve_<sve_pred_op><mode>_ptest"
     operands[4] = operands[2];
     operands[5] = operands[3];
   }
+  [(set_attr "sve_type" "piter")]
 )
 
 ;; =========================================================================
@@ -9873,6 +10291,7 @@ (define_insn "aarch64_sve_cnt_pat"
   {
     return aarch64_output_sve_cnt_pat_immediate ("cnt", "%x0", operands + 1);
   }
+  [(set_attr "sve_type" "pred_count_scalar")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9899,6 +10318,7 @@ (define_insn "@aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", "%x0",
 						 operands + 2);
   }
+  [(set_attr "sve_type" "pred_count_scalar")]
 )
 
 ;; Increment an SImode register by the number of elements in an svpattern
@@ -9915,6 +10335,7 @@ (define_insn "*aarch64_sve_incsi_pat"
   {
     return aarch64_output_sve_cnt_pat_immediate ("inc", "%x0", operands + 2);
   }
+  [(set_attr "sve_type" "pred_count_scalar")]
 )
 
 ;; Increment an SImode register by the number of elements in an svpattern
@@ -9936,6 +10357,7 @@ (define_insn "@aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", registers,
 						 operands + 2);
   }
+  [(set_attr "sve_type" "pred_count_scalar")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -9966,7 +10388,8 @@ (define_insn "@aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", "%0.<Vetype>",
 						 operands + 2);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_vec")]
 )
 
 ;; Increment a vector of SIs by the number of elements in an svpattern.
@@ -9987,7 +10410,8 @@ (define_insn "@aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", "%0.<Vetype>",
 						 operands + 2);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_vec")]
 )
 
 ;; Increment a vector of HIs by the number of elements in an svpattern.
@@ -10022,7 +10446,8 @@ (define_insn "*aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", "%0.<Vetype>",
 						 operands + 2);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_vec")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -10049,6 +10474,7 @@ (define_insn "@aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", "%x0",
 						 operands + 2);
   }
+  [(set_attr "sve_type" "pred_count_scalar")]
 )
 
 ;; Decrement an SImode register by the number of elements in an svpattern
@@ -10065,6 +10491,7 @@ (define_insn "*aarch64_sve_decsi_pat"
   {
     return aarch64_output_sve_cnt_pat_immediate ("dec", "%x0", operands + 2);
   }
+  [(set_attr "sve_type" "pred_count_scalar")]
 )
 
 ;; Decrement an SImode register by the number of elements in an svpattern
@@ -10116,7 +10543,8 @@ (define_insn "@aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", "%0.<Vetype>",
 						 operands + 2);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_vec")]
 )
 
 ;; Decrement a vector of SIs by the number of elements in an svpattern.
@@ -10137,7 +10565,8 @@ (define_insn "@aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", "%0.<Vetype>",
 						 operands + 2);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_vec")]
 )
 
 ;; Decrement a vector of HIs by the number of elements in an svpattern.
@@ -10172,7 +10601,8 @@ (define_insn "*aarch64_sve_<inc_dec><mode>_pat"
     return aarch64_output_sve_cnt_pat_immediate ("<inc_dec>", "%0.<Vetype>",
 						 operands + 2);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_vec")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -10192,7 +10622,9 @@ (define_insn "@aarch64_pred_cntp<mode>"
 		      (match_operand:PRED_ALL 3 "register_operand" "Upa")]
 		     UNSPEC_CNTP)))]
   "TARGET_SVE"
-  "cntp\t%x0, %1, %3.<Vetype>")
+  "cntp\t%x0, %1, %3.<Vetype>"
+  [(set_attr "sve_type" "pred_count_active_scalar")]
+)
 
 ;; -------------------------------------------------------------------------
 ;; ---- [INT] Increment by the number of elements in a predicate (scalar)
@@ -10235,6 +10667,7 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><DI_ONLY:mode><PRED_ALL:mode>_cn
   {
     operands[3] = CONSTM1_RTX (<PRED_ALL:MODE>mode);
   }
+  [(set_attr "sve_type" "pred_count_active_scalar")]
 )
 
 ;; Increment an SImode register by the number of set bits in a predicate
@@ -10254,6 +10687,7 @@ (define_insn_and_rewrite "*aarch64_incsi<mode>_cntp"
   {
     operands[3] = CONSTM1_RTX (<MODE>mode);
   }
+  [(set_attr "sve_type" "pred_count_active_scalar")]
 )
 
 ;; Increment an SImode register by the number of set bits in a predicate
@@ -10295,6 +10729,7 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><SI_ONLY:mode><PRED_ALL:mode>_cn
   {
     operands[3] = CONSTM1_RTX (<PRED_ALL:MODE>mode);
   }
+  [(set_attr "sve_type" "pred_count_active_scalar")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -10344,7 +10779,8 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_active_vec")]
 )
 
 ;; Increment a vector of SIs by the number of set bits in a predicate.
@@ -10383,7 +10819,8 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_active_vec")]
 )
 
 ;; Increment a vector of HIs by the number of set bits in a predicate.
@@ -10424,7 +10861,8 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_active_vec")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -10468,6 +10906,7 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><DI_ONLY:mode><PRED_ALL:mode>_cn
   {
     operands[3] = CONSTM1_RTX (<PRED_ALL:MODE>mode);
   }
+  [(set_attr "sve_type" "pred_count_active_scalar")]
 )
 
 ;; Decrement an SImode register by the number of set bits in a predicate
@@ -10487,6 +10926,7 @@ (define_insn_and_rewrite "*aarch64_decsi<mode>_cntp"
   {
     operands[3] = CONSTM1_RTX (<MODE>mode);
   }
+  [(set_attr "sve_type" "pred_count_active_scalar")]
 )
 
 ;; Decrement an SImode register by the number of set bits in a predicate
@@ -10528,6 +10968,7 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><SI_ONLY:mode><PRED_ALL:mode>_cn
   {
     operands[3] = CONSTM1_RTX (<PRED_ALL:MODE>mode);
   }
+  [(set_attr "sve_type" "pred_count_active_scalar")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -10577,7 +11018,8 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_active_vec")]
 )
 
 ;; Decrement a vector of SIs by the number of set bits in a predicate.
@@ -10616,7 +11058,8 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_active_vec")]
 )
 
 ;; Decrement a vector of HIs by the number of set bits in a predicate.
@@ -10657,5 +11100,6 @@ (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "pred_count_active_vec")]
 )
diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md
index 5df38e3f951c30b51796395c667797dce03750bd..68e297f8dcbe0b9641ae2511ce16e6f54c6544e8 100644
--- a/gcc/config/aarch64/aarch64-sve2.md
+++ b/gcc/config/aarch64/aarch64-sve2.md
@@ -113,6 +113,8 @@ (define_insn "@aarch64_gather_ldnt<mode>"
   "@
    ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>]
    ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>, %2]"
+  [(set_attr "sve_type" "load_gather_<Vetype>_scalar")
+   (set_attr "memop_nontemporal" "yes")]
 )
 
 ;; Extending loads.
@@ -137,6 +139,8 @@ (define_insn_and_rewrite "@aarch64_gather_ldnt_<ANY_EXTEND:optab><SVE_FULL_SDI:m
   {
     operands[4] = CONSTM1_RTX (<SVE_FULL_SDI:VPRED>mode);
   }
+  [(set_attr "sve_type" "load_gather_<SVE_FULL_SDI:Vetype>_scalar")
+   (set_attr "memop_nontemporal" "yes")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -163,6 +167,8 @@ (define_insn "@aarch64_scatter_stnt<mode>"
   "@
    stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>]
    stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>, %1]"
+  [(set_attr "sve_type" "store_scatter_<Vetype>_scalar")
+   (set_attr "memop_nontemporal" "yes")]
 )
 
 ;; Truncating stores.
@@ -180,6 +186,8 @@ (define_insn "@aarch64_scatter_stnt_<SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>"
   "@
    stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>]
    stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>, %1]"
+  [(set_attr "sve_type" "store_scatter_<SVE_FULL_SDI:Vetype>_scalar")
+   (set_attr "memop_nontemporal" "yes")]
 )
 
 ;; =========================================================================
@@ -203,6 +211,7 @@ (define_insn "@aarch64_mul_lane_<mode>"
 	  (match_operand:SVE_FULL_HSDI 1 "register_operand" "w")))]
   "TARGET_SVE2"
   "mul\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]"
+  [(set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -322,7 +331,8 @@ (define_insn "@aarch64_sve_suqadd<mode>_const"
   "@
    sqadd\t%0.<Vetype>, %0.<Vetype>, #%D2
    movprfx\t%0, %1\;sqadd\t%0.<Vetype>, %0.<Vetype>, #%D2"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "int_arith_complex")]
 )
 
 ;; General predicated binary arithmetic.  All operations handled here
@@ -341,7 +351,8 @@ (define_insn "@aarch64_pred_<sve_int_op><mode>"
    <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  [(set_attr "movprfx" "*,*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated binary arithmetic with merging.
@@ -386,7 +397,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated binary arithmetic, merging with the second input.
@@ -411,7 +423,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_3"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated binary operations, merging with an independent value.
@@ -452,7 +465,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated binary operations with no reverse form, merging with zero.
@@ -480,7 +494,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_z"
   {
     operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -499,6 +514,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_BINARY))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "@aarch64_sve_<sve_int_op>_lane_<mode>"
@@ -512,6 +528,7 @@ (define_insn "@aarch64_sve_<sve_int_op>_lane_<mode>"
 	  SVE2_INT_BINARY_LANE))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -541,7 +558,8 @@ (define_insn "@aarch64_pred_<sve_int_op><mode>"
    <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,*,yes,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated left shifts with merging.
@@ -588,7 +606,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  [(set_attr "movprfx" "*,*,yes,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated left shifts, merging with the second input.
@@ -613,7 +632,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_3"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; Predicated left shifts, merging with an independent value.
@@ -658,7 +678,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
     else
       FAIL;
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; =========================================================================
@@ -690,7 +711,8 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
    movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "@aarch64_sve_<sve_int_op>_lane_<mode>"
@@ -707,7 +729,8 @@ (define_insn "@aarch64_sve_<sve_int_op>_lane_<mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
    movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -732,7 +755,8 @@ (define_insn "@aarch64_sve_add_mul_lane_<mode>"
   "@
    mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
    movprfx\t%0, %1\;mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 (define_insn "@aarch64_sve_sub_mul_lane_<mode>"
@@ -749,7 +773,8 @@ (define_insn "@aarch64_sve_sub_mul_lane_<mode>"
   "@
    mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
    movprfx\t%0, %1\;mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "mul_accum_<Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -770,7 +795,8 @@ (define_insn "@aarch64_sve2_xar<mode>"
   "@
   xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
   movprfx\t%0, %1\;xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "xar")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -822,7 +848,8 @@ (define_insn_and_rewrite "*aarch64_sve2_bcax<mode>"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bcax")]
 )
 
 ;; Unpredicated 3-way exclusive OR.
@@ -839,7 +866,8 @@ (define_insn "@aarch64_sve2_eor3<mode>"
   eor3\t%0.d, %0.d, %1.d, %3.d
   eor3\t%0.d, %0.d, %1.d, %2.d
   movprfx\t%0, %1\;eor3\t%0.d, %0.d, %2.d, %3.d"
-  [(set_attr "movprfx" "*,*,*,yes")]
+  [(set_attr "movprfx" "*,*,*,yes")
+   (set_attr "sve_type" "eor3")]
 )
 
 ;; Use NBSL for vector NOR.
@@ -861,7 +889,8 @@ (define_insn_and_rewrite "*aarch64_sve2_nor<mode>"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bsl")]
 )
 
 ;; Use NBSL for vector NAND.
@@ -883,7 +912,8 @@ (define_insn_and_rewrite "*aarch64_sve2_nand<mode>"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bsl")]
 )
 
 ;; Unpredicated bitwise select.
@@ -913,7 +943,8 @@ (define_insn "*aarch64_sve2_bsl<mode>"
   "@
   bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
   movprfx\t%0, %<bsl_mov>\;bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bsl")]
 )
 
 ;; Unpredicated bitwise inverted select.
@@ -958,7 +989,8 @@ (define_insn_and_rewrite "*aarch64_sve2_nbsl<mode>"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bsl")]
 )
 
 ;; Unpredicated bitwise select with inverted first operand.
@@ -1003,7 +1035,8 @@ (define_insn_and_rewrite "*aarch64_sve2_bsl1n<mode>"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bsl")]
 )
 
 ;; Unpredicated bitwise select with inverted second operand.
@@ -1050,7 +1083,8 @@ (define_insn_and_rewrite "*aarch64_sve2_bsl2n<mode>"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bsl")]
 )
 
 ;; Unpredicated bitwise select with inverted second operand, alternative form.
@@ -1077,7 +1111,8 @@ (define_insn_and_rewrite "*aarch64_sve2_bsl2n<mode>"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "bsl")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1127,7 +1162,8 @@ (define_insn_and_rewrite "*aarch64_sve2_sra<mode>"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "shift_accum")]
 )
 
 ;; SRSRA and URSRA.
@@ -1143,7 +1179,8 @@ (define_insn "@aarch64_sve_add_<sve_int_op><mode>"
   "@
    <sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3
    movprfx\t%0, %1\;<sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "shift_accum")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1164,6 +1201,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_SHIFT_INSERT))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, #%3"
+  [(set_attr "sve_type" "shift_insert")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1221,7 +1259,8 @@ (define_insn "*aarch64_sve2_<su>aba<mode>"
   "@
    <su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
    movprfx\t%0, %1\;<su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "abd_accum")]
 )
 
 ;; =========================================================================
@@ -1250,6 +1289,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_BINARY_WIDE))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Ventype>"
+  [(set_attr "sve_type" "int_arith_long")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1287,6 +1327,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_BINARY_LONG))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "@aarch64_sve_<sve_int_op>_lane_<mode>"
@@ -1300,6 +1341,7 @@ (define_insn "@aarch64_sve_<sve_int_op>_lane_<mode>"
 	  SVE2_INT_BINARY_LONG_LANE))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]"
+  [(set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1321,6 +1363,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_SHIFT_IMM_LONG))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %1.<Ventype>, #%2"
+  [(set_attr "sve_type" "shift_long")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1360,7 +1403,8 @@ (define_insn "@aarch64_sve_add_<sve_int_op><mode>"
   "@
    <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
    movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>_accum")]
 )
 
 ;; Non-saturating MLA operations with lane select.
@@ -1379,7 +1423,8 @@ (define_insn "@aarch64_sve_add_<sve_int_op>_lane_<mode>"
   "@
    <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
    movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>_accum")]
 )
 
 ;; Saturating MLA operations.
@@ -1395,7 +1440,8 @@ (define_insn "@aarch64_sve_qadd_<sve_int_op><mode>"
   "@
    <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
    movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>_accum")]
 )
 
 ;; Saturating MLA operations with lane select.
@@ -1414,7 +1460,8 @@ (define_insn "@aarch64_sve_qadd_<sve_int_op>_lane_<mode>"
   "@
    <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
    movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>_accum")]
 )
 
 ;; Non-saturating MLS operations.
@@ -1430,7 +1477,8 @@ (define_insn "@aarch64_sve_sub_<sve_int_op><mode>"
   "@
    <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
    movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>_accum")]
 )
 
 ;; Non-saturating MLS operations with lane select.
@@ -1449,7 +1497,8 @@ (define_insn "@aarch64_sve_sub_<sve_int_op>_lane_<mode>"
   "@
    <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
    movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>_accum")]
 )
 
 ;; Saturating MLS operations.
@@ -1465,7 +1514,8 @@ (define_insn "@aarch64_sve_qsub_<sve_int_op><mode>"
   "@
    <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
    movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>_accum")]
 )
 
 ;; Saturating MLS operations with lane select.
@@ -1484,7 +1534,8 @@ (define_insn "@aarch64_sve_qsub_<sve_int_op>_lane_<mode>"
   "@
    <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
    movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>_accum")]
 )
 ;; -------------------------------------------------------------------------
 ;; ---- [FP] Long multiplication with accumulation
@@ -1507,7 +1558,8 @@ (define_insn "@aarch64_sve_<sve_fp_op><mode>"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>
    movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "@aarch64_<sve_fp_op>_lane_<mode>"
@@ -1524,7 +1576,8 @@ (define_insn "@aarch64_<sve_fp_op>_lane_<mode>"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]
    movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; =========================================================================
@@ -1550,6 +1603,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_UNARY_NARROWB))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Ventype>, %1.<Vetype>"
+  [(set_attr "sve_type" "usqxtun")]
 )
 
 ;; These instructions do not take MOVPRFX.
@@ -1561,6 +1615,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_UNARY_NARROWT))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Ventype>, %2.<Vetype>"
+  [(set_attr "sve_type" "usqxtun")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1585,6 +1640,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_BINARY_NARROWB))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Ventype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "int_arith_complex")]
 )
 
 ;; These instructions do not take MOVPRFX.
@@ -1597,6 +1653,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_BINARY_NARROWT))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Ventype>, %2.<Vetype>, %3.<Vetype>"
+  [(set_attr "sve_type" "int_arith_complex")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1630,6 +1687,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_SHIFT_IMM_NARROWB))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Ventype>, %1.<Vetype>, #%2"
+  [(set_attr "sve_type" "int_shift_complex")]
 )
 
 ;; The immediate range is enforced before generating the instruction.
@@ -1643,6 +1701,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_SHIFT_IMM_NARROWT))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Ventype>, %2.<Vetype>, #%3"
+  [(set_attr "sve_type" "int_shift_complex")]
 )
 
 ;; =========================================================================
@@ -1671,7 +1730,8 @@ (define_insn "@aarch64_pred_<sve_int_op><mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1696,7 +1756,8 @@ (define_insn "@aarch64_pred_<sve_fp_op><mode>"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -1748,7 +1809,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "adalp")]
 )
 
 ;; Predicated pairwise absolute difference and accumulate, merging with zero.
@@ -1771,7 +1833,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_z"
   {
     operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "yes")]
+  [(set_attr "movprfx" "yes")
+   (set_attr "sve_type" "adalp")]
 )
 
 ;; =========================================================================
@@ -1796,7 +1859,8 @@ (define_insn "@aarch64_sve_<optab><mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>
    movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "cadd")]
 )
 
 ;; unpredicated optab pattern for auto-vectorizer
@@ -1828,7 +1892,8 @@ (define_insn "@aarch64_sve_<optab><mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>
    movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 (define_insn "@aarch64_<optab>_lane_<mode>"
@@ -1845,7 +1910,8 @@ (define_insn "@aarch64_<optab>_lane_<mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>
    movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "<sve_sched>")]
 )
 
 ;; unpredicated optab pattern for auto-vectorizer
@@ -1908,7 +1974,8 @@ (define_insn "@aarch64_sve_<optab><mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>
    movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "cdot_<Vetype_fourth>")]
 )
 
 (define_insn "@aarch64_<optab>_lane_<mode>"
@@ -1925,7 +1992,8 @@ (define_insn "@aarch64_<optab>_lane_<mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>
    movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "cdot_<Vetype_fourth>")]
 )
 
 ;; =========================================================================
@@ -1949,6 +2017,7 @@ (define_insn "@aarch64_pred_<sve_fp_op><mode>"
 	  SVE2_COND_FP_UNARY_LONG))]
   "TARGET_SVE2"
   "<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Ventype>"
+  [(set_attr "sve_type" "fcvt_<Ventype>")]
 )
 
 ;; Predicated convert long top with merging.
@@ -1984,6 +2053,7 @@ (define_insn_and_rewrite "*cond_<sve_fp_op><mode>_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
+  [(set_attr "sve_type" "fcvt_<Ventype>")]
 )
 
 (define_insn "*cond_<sve_fp_op><mode>_strict"
@@ -1999,6 +2069,7 @@ (define_insn "*cond_<sve_fp_op><mode>_strict"
 	  UNSPEC_SEL))]
   "TARGET_SVE2"
   "<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Ventype>"
+  [(set_attr "sve_type" "fcvt_<Ventype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2025,6 +2096,7 @@ (define_insn "@aarch64_sve_cvtnt<mode>"
 	  UNSPEC_COND_FCVTNT))]
   "TARGET_SVE2"
   "fcvtnt\t%0.<Vetype>, %2/m, %3.<Vewtype>"
+  [(set_attr "sve_type" "fcvt_<Vewtype>")]
 )
 
 ;; Predicated FCVTX (equivalent to what would be FCVTXNB, except that
@@ -2040,7 +2112,8 @@ (define_insn "@aarch64_pred_<sve_fp_op><mode>"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "fcvtx")]
 )
 
 ;; Predicated FCVTX with merging.
@@ -2078,7 +2151,8 @@ (define_insn_and_rewrite "*cond_<sve_fp_op><mode>_any_relaxed"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "fcvtx")]
 )
 
 (define_insn "*cond_<sve_fp_op><mode>_any_strict"
@@ -2097,7 +2171,8 @@ (define_insn "*cond_<sve_fp_op><mode>_any_strict"
    <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
    movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
    movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "fcvtx")]
 )
 
 ;; Predicated FCVTXNT.  This doesn't give a natural aarch64_pred_*/cond_*
@@ -2115,6 +2190,7 @@ (define_insn "@aarch64_sve2_cvtxnt<mode>"
 	  UNSPEC_COND_FCVTXNT))]
   "TARGET_SVE2"
   "fcvtxnt\t%0.<Ventype>, %2/m, %3.<Vetype>"
+  [(set_attr "sve_type" "fcvtx")]
 )
 
 ;; =========================================================================
@@ -2142,7 +2218,8 @@ (define_insn "@aarch64_pred_<sve_int_op><mode>"
   "@
    <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "urecpe")]
 )
 
 ;; Predicated integer unary operations with merging.
@@ -2185,7 +2262,8 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "urecpe")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2207,7 +2285,8 @@ (define_insn "@aarch64_pred_<sve_fp_op><mode>"
   "@
    <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  [(set_attr "movprfx" "*,yes")
+   (set_attr "sve_type" "flogb_<Vetype>")]
 )
 
 ;; Predicated FLOGB with merging.
@@ -2245,7 +2324,8 @@ (define_insn_and_rewrite "*cond_<sve_fp_op><mode>"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "flogb_<Vetype>")]
 )
 
 (define_insn "*cond_<sve_fp_op><mode>_strict"
@@ -2264,7 +2344,8 @@ (define_insn "*cond_<sve_fp_op><mode>_strict"
    <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
    movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  [(set_attr "movprfx" "*,yes,yes")
+   (set_attr "sve_type" "flogb_<Vetype>")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2285,6 +2366,7 @@ (define_insn "@aarch64_sve2_pmul<mode>"
 	  UNSPEC_PMUL))]
   "TARGET_SVE2"
   "pmul\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "pmul")]
 )
 
 ;; Extending PMUL, with the results modeled as wider vectors.
@@ -2297,6 +2379,7 @@ (define_insn "@aarch64_sve_<optab><mode>"
 	  SVE2_PMULL))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>"
+  [(set_attr "sve_type" "pmul")]
 )
 
 ;; Extending PMUL, with the results modeled as pairs of values.
@@ -2310,6 +2393,7 @@ (define_insn "@aarch64_sve_<optab><mode>"
 	  SVE2_PMULL_PAIR))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vewtype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "pmul")]
 )
 
 ;; =========================================================================
@@ -2333,6 +2417,7 @@ (define_insn "@aarch64_sve2_tbl2<mode>"
 	  UNSPEC_TBL2))]
   "TARGET_SVE2"
   "tbl\t%0.<Vetype>, %1, %2.<Vetype>"
+  [(set_attr "sve_type" "tbl")]
 )
 
 ;; TBX.  These instructions do not take MOVPRFX.
@@ -2345,6 +2430,7 @@ (define_insn "@aarch64_sve2_tbx<mode>"
 	  UNSPEC_TBX))]
   "TARGET_SVE2"
   "tbx\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
+  [(set_attr "sve_type" "tbx")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2364,6 +2450,7 @@ (define_insn "@aarch64_sve_<sve_int_op><mode>"
 	  SVE2_INT_BITPERM))]
   "TARGET_SVE2_BITPERM"
   "<sve_int_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "bdep")]
 )
 
 ;; =========================================================================
@@ -2428,6 +2515,7 @@ (define_insn "@aarch64_sve2_histcnt<mode>"
 	  UNSPEC_HISTCNT))]
   "TARGET_SVE2"
   "histcnt\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
+  [(set_attr "sve_type" "histcnt")]
 )
 
 (define_insn "@aarch64_sve2_histseg<mode>"
@@ -2438,6 +2526,7 @@ (define_insn "@aarch64_sve2_histseg<mode>"
 	  UNSPEC_HISTSEG))]
   "TARGET_SVE2"
   "histseg\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+  [(set_attr "sve_type" "histcnt")]
 )
 
 ;; -------------------------------------------------------------------------
@@ -2462,6 +2551,7 @@ (define_insn "@aarch64_pred_<sve_int_op><mode>"
    (clobber (reg:CC_NZC CC_REGNUM))]
   "TARGET_SVE2"
   "<sve_int_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+  [(set_attr "sve_type" "match")]
 )
 
 ;; Predicated string matching in which both the flag and predicate results
@@ -2498,6 +2588,7 @@ (define_insn_and_rewrite "*aarch64_pred_<sve_int_op><mode>_cc"
     operands[6] = copy_rtx (operands[4]);
     operands[7] = operands[5];
   }
+  [(set_attr "sve_type" "match")]
 )
 
 ;; Predicated string matching in which only the flags result is interesting.
@@ -2525,6 +2616,7 @@ (define_insn_and_rewrite "*aarch64_pred_<sve_int_op><mode>_ptest"
     operands[6] = copy_rtx (operands[4]);
     operands[7] = operands[5];
   }
+  [(set_attr "sve_type" "match")]
 )
 
 ;; =========================================================================
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index f2e3d905dbbeb2949f2947f5cfd68208c94c9272..e69581002ee77412535d89672364aa560bdfd5ae 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -359,6 +359,8 @@ (define_constants
 ; to share pipeline descriptions.
 (include "../arm/types.md")
 
+(include "aarch64-sched-types.md")
+
 ;; It is important to set the fp or simd attributes to yes when a pattern
 ;; alternative uses the FP or SIMD register files, usually signified by use of
 ;; the 'w' constraint.  This will ensure that the alternative will be
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index a8ad4e5ff215ade06c3ca13a24ef18d259afcb6c..5e6ff595c0e2a084ecf88ac9b60158d9983039f8 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -2552,6 +2552,27 @@ (define_code_attr sve_pred_int_rhs2_operand
 (define_code_attr inc_dec [(minus "dec") (ss_minus "sqdec") (us_minus "uqdec")
 			   (plus "inc") (ss_plus "sqinc") (us_plus "uqinc")])
 
+
+(define_code_attr sve_sched_code [(mult "mul_<Vetype>")
+				  (smax "int_simple_binary")
+				  (smin "int_simple_binary")
+				  (umin "int_simple_binary")
+				  (umax "int_simple_binary")
+				  (plus "int_simple_binary")
+				  (minus "int_simple_binary")
+				  (and "int_simple_binary")
+				  (ior "int_simple_binary")
+				  (xor "int_simple_binary")
+				  (ashift "simple_shift")
+				  (ashiftrt "simple_shift")
+				  (lshiftrt "simple_shift")
+				  (ss_plus "int_arith_complex")
+				  (us_plus "int_arith_complex")
+				  (ss_minus "int_arith_complex")
+				  (us_minus "int_arith_complex")])
+
+(define_code_attr sve_sched_fp_code [(plus "farith_simple")(minus "farith_simple") (mult "fmul")])
+
 ;; -------------------------------------------------------------------
 ;; Int Iterators.
 ;; -------------------------------------------------------------------
@@ -4014,3 +4035,120 @@ (define_int_attr fpscr_name
    (UNSPECV_SET_FPSR "fpsr")
    (UNSPECV_GET_FPCR "fpcr")
    (UNSPECV_SET_FPCR "fpcr")])
+
+
+(define_int_attr sve_sched [(UNSPEC_COND_FABS "farith_simple")
+			    (UNSPEC_COND_FNEG "farith_simple")
+			    (UNSPEC_COND_FADD "farith_simple")
+                            (UNSPEC_COND_FSUB "farith_simple")
+			    (UNSPEC_COND_FMAX "fminmax")
+			    (UNSPEC_COND_FMAXNM "fminmax")
+                            (UNSPEC_COND_FMIN "fminmax")
+                            (UNSPEC_COND_FMINNM "fminmax")
+                            (UNSPEC_COND_FMUL "fmul")
+                            (UNSPEC_COND_FMULX "fmul")
+			    (UNSPEC_COND_FDIV "fdiv_<Vetype>")
+			    (UNSPEC_COND_FSQRT "fsqrt_<Vetype>")
+			    (UNSPEC_COND_FRECPX "frec_estimate_<Vetype>")
+			    (UNSPEC_COND_FRINTA "frint_<Vetype>")
+			    (UNSPEC_COND_FRINTI "frint_<Vetype>")
+			    (UNSPEC_COND_FRINTM "frint_<Vetype>")
+			    (UNSPEC_COND_FRINTN "frint_<Vetype>")
+			    (UNSPEC_COND_FRINTP "frint_<Vetype>")
+			    (UNSPEC_COND_FRINTX "frint_<Vetype>")
+			    (UNSPEC_COND_FRINTZ "frint_<Vetype>")
+			    (UNSPEC_FRECPS "frec_step_<Vetype>")
+			    (UNSPEC_RSQRTS "frec_step_<Vetype>")
+                            (UNSPEC_FRECPE "frec_estimate_<Vetype>")
+                            (UNSPEC_FRECPX "frec_estimate_<Vetype>")
+			    (UNSPEC_RSQRTE "frec_estimate_<Vetype>")
+			    (UNSPEC_ASRD "asrd")
+			    (UNSPEC_BFDOT "bfdot")
+                            (UNSPEC_BFMLALB "bfmlal")
+                            (UNSPEC_BFMLALT "bfmlal")
+                            (UNSPEC_BFMMLA "bfmmla")
+			    (UNSPEC_ANDF "int_simple_binary")
+                            (UNSPEC_IORF "int_simple_binary")
+                            (UNSPEC_XORF "int_simple_binary")
+			    (UNSPEC_SQSHLU "int_shift_complex")
+			    (UNSPEC_SRSHR "int_shift_complex")
+			    (UNSPEC_URSHR "int_shift_complex")
+			    (UNSPEC_FTSMUL "ftsmul")
+			    (UNSPEC_FTSSEL "ftssel")
+			    (UNSPEC_COND_FSCALE "fscale")
+			    (UNSPEC_ANDV "logical_reduce")
+                            (UNSPEC_IORV "logical_reduce")
+                            (UNSPEC_XORV "logical_reduce")
+                            (UNSPEC_SMAXV "int_reduce_<Vetype>")
+                            (UNSPEC_SMINV "int_reduce_<Vetype>")
+                            (UNSPEC_UMAXV "int_reduce_<Vetype>")
+                            (UNSPEC_UMINV "int_reduce_<Vetype>")
+                            (UNSPEC_SHADD "int_arith_complex")
+                            (UNSPEC_SHSUB "int_arith_complex")
+                            (UNSPEC_SRHADD "int_arith_complex")
+                            (UNSPEC_UHADD "int_arith_complex")
+                            (UNSPEC_URHADD "int_arith_complex")
+                            (UNSPEC_UHSUB "int_arith_complex")
+                            (UNSPEC_SUQADD "int_arith_complex")
+                            (UNSPEC_USQADD "int_arith_complex")
+                            (UNSPEC_SQRSHL "int_shift_complex")
+                            (UNSPEC_SRSHL "int_shift_complex")
+                            (UNSPEC_UQRSHL "int_shift_complex")
+                            (UNSPEC_URSHL "int_shift_complex")
+                            (UNSPEC_SQSHL "int_shift_complex")
+                            (UNSPEC_UQSHL "int_shift_complex")
+                            (UNSPEC_SQDMULH "sqrdmulh_<Vetype>")
+                            (UNSPEC_SQRDMULH "sqrdmulh_<Vetype>")
+                            (UNSPEC_ADCLB "arith_large_int")
+                            (UNSPEC_ADCLT "arith_large_int")
+                            (UNSPEC_SBCLB "arith_large_int")
+                            (UNSPEC_SBCLT "arith_large_int")
+                            (UNSPEC_EORBT "eorbt")
+                            (UNSPEC_EORTB "eorbt")
+                            (UNSPEC_SQRDMLAH "sqrdmulh_<Vetype>_accum")
+                            (UNSPEC_SQRDMLSH "sqrdmulh_<Vetype>_accum")
+                            (UNSPEC_SABDLB "abd_long")
+                            (UNSPEC_SABDLT "abd_long")
+                            (UNSPEC_SADDLB "int_arith_long")
+                            (UNSPEC_SADDLBT "int_arith_long")
+                            (UNSPEC_SADDLT "int_arith_long")
+                            (UNSPEC_SMULLB "mul_long")
+                            (UNSPEC_SMULLT "mul_long")
+                            (UNSPEC_SQDMULLB "mul_long")
+                            (UNSPEC_SQDMULLBT "mul_long")
+                            (UNSPEC_SQDMULLT "mul_long")
+                            (UNSPEC_SSUBLB "int_arith_long")
+                            (UNSPEC_SSUBLBT "int_arith_long")
+                            (UNSPEC_SSUBLT "int_arith_long")
+                            (UNSPEC_SSUBLTB "int_arith_long")
+                            (UNSPEC_UADDLB "int_arith_long")
+                            (UNSPEC_UADDLT "int_arith_long")
+                            (UNSPEC_USUBLB "int_arith_long")
+                            (UNSPEC_USUBLT "int_arith_long")
+                            (UNSPEC_UABDLB "abd_long")
+                            (UNSPEC_UABDLT "abd_long")
+                            (UNSPEC_UMULLB "mul_long")
+                            (UNSPEC_UMULLT "mul_long")
+                            (UNSPEC_FMLALB "fmlal")
+                            (UNSPEC_FMLALT "fmlal")
+                            (UNSPEC_FMLSLB "fmlal")
+                            (UNSPEC_FMLSLT "fmlal")
+                            (UNSPEC_ADDP "add_pairwise")
+                            (UNSPEC_SMAXP "minmax_pairwise")
+                            (UNSPEC_SMINP "minmax_pairwise")
+                            (UNSPEC_UMAXP "minmax_pairwise")
+                            (UNSPEC_UMINP "minmax_pairwise")
+                            (UNSPEC_FADDP "fadd_pairwise")
+                            (UNSPEC_FMAXP "fminmax_pairwise")
+                            (UNSPEC_FMAXNMP "fminmax_pairwise")
+                            (UNSPEC_FMINP "fminmax_pairwise")
+                            (UNSPEC_FMINNMP "fminmax_pairwise")
+                            (UNSPEC_CMLA "cmla_<Vetype>")
+                            (UNSPEC_CMLA90 "cmla_<Vetype>")
+                            (UNSPEC_CMLA180 "cmla_<Vetype>")
+                            (UNSPEC_CMLA270 "cmla_<Vetype>")
+                            (UNSPEC_SQRDCMLAH "sqrdcmlah_<Vetype>")
+                            (UNSPEC_SQRDCMLAH90 "sqrdcmlah_<Vetype>")
+                            (UNSPEC_SQRDCMLAH180 "sqrdcmlah_<Vetype>")
+                            (UNSPEC_SQRDCMLAH270 "sqrdcmlah_<Vetype>")])
+

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-11-23  9:49 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-23  9:49 [PATCH][RFC] aarch64: Scheduling classification for AArch64 SVE patterns Kyrylo Tkachov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).