[Resending this with the patch compressed as it's more than 400 KB...] Hi all, this patch converts a number of multi multi choice patterns within the aarch64 backend to the new syntax. The list of the converted patterns is in the Changelog. For completeness here follows the list of multi choice patterns that were rejected for conversion by my parser, they typically have some C as asm output and require some manual intervention: aarch64_simd_vec_set, aarch64_get_lane, aarch64_cmdi, aarch64_cmdi, aarch64_cmtstdi, *aarch64_movv8di, *aarch64_be_mov, *aarch64_be_movci, *aarch64_be_mov, *aarch64_be_movxi, *aarch64_sve_mov_le, *aarch64_sve_mov_be, @aarch64_pred_mov, @aarch64_sve_gather_prefetch, @aarch64_sve_gather_prefetch, *aarch64_sve_gather_prefetch_sxtw, *aarch64_sve_gather_prefetch_uxtw, @aarch64_vec_duplicate_vq_le, *vec_extract_0, *vec_extract_v128, *cmp_and, *fcm_and_combine, @aarch64_sve_ext, @aarch64_sve2_aba, *sibcall_insn, *sibcall_value_insn, *xor_one_cmpl3, *insv_reg_, *aarch64_bfi_, *aarch64_bfidi_subreg_, *aarch64_bfxil, *aarch64_bfxilsi_uxtw, *aarch64_cvtf2_mult, atomic_store. Bootstraped and reg tested on aarch64-unknown-linux-gnu, also I analysed tmp-mddump.md (from 'make mddump') and could not find effective differences, okay for trunk? Bests Andrea gcc/ChangeLog: * config/aarch64/aarch64.md (@ccmp) (@ccmp_rev, *call_insn, *call_value_insn) (*mov_aarch64, load_pair_sw_) (load_pair_dw_) (store_pair_sw_) (store_pair_dw_, *extendsidi2_aarch64) (*zero_extendsidi2_aarch64, *load_pair_zero_extendsidi2_aarch64) (*extend2_aarch64) (*zero_extend2_aarch64) (*extendqihi2_aarch64, *zero_extendqihi2_aarch64) (*add3_aarch64, *addsi3_aarch64_uxtw, *add3_poly_1) (add3_compare0, *addsi3_compare0_uxtw) (*add3_compareC_cconly, add3_compareC) (*add3_compareV_cconly_imm, add3_compareV_imm) (*add3nr_compare0, subdi3, subv_imm) (*cmpv_insn, sub3_compare1_imm, neg2) (cmp, fcmp, fcmpe, *cmov_insn) (*cmovsi_insn_uxtw, 3, *si3_uxtw) (*and3_compare0, *andsi3_compare0_uxtw, one_cmpl2) (*_one_cmpl3, *and3nr_compare0) (*aarch64_ashl_sisd_or_int_3) (*aarch64_lshr_sisd_or_int_3) (*aarch64_ashr_sisd_or_int_3, *ror3_insn) (*si3_insn_uxtw, _trunc2) (2) (3) (3) (*aarch64_3_cssc, copysign3_insn): Update to new syntax. * config/aarch64/aarch64-sve2.md (@aarch64_scatter_stnt) (@aarch64_scatter_stnt_) (*aarch64_mul_unpredicated_) (@aarch64_pred_, *cond__2) (*cond__3, *cond__any) (*cond__z, @aarch64_pred_) (*cond__2, *cond__3) (*cond__any, @aarch64_sve_) (@aarch64_sve__lane_) (@aarch64_sve_add_mul_lane_) (@aarch64_sve_sub_mul_lane_, @aarch64_sve2_xar) (*aarch64_sve2_bcax, @aarch64_sve2_eor3) (*aarch64_sve2_nor, *aarch64_sve2_nand) (*aarch64_sve2_bsl, *aarch64_sve2_nbsl) (*aarch64_sve2_bsl1n, *aarch64_sve2_bsl2n) (*aarch64_sve2_sra, @aarch64_sve_add_) (*aarch64_sve2_aba, @aarch64_sve_add_) (@aarch64_sve_add__lane_) (@aarch64_sve_qadd_) (@aarch64_sve_qadd__lane_) (@aarch64_sve_sub_) (@aarch64_sve_sub__lane_) (@aarch64_sve_qsub_) (@aarch64_sve_qsub__lane_) (@aarch64_sve_, @aarch64__lane_) (@aarch64_pred_) (@aarch64_pred_, *cond__2) (*cond__z, @aarch64_sve_) (@aarch64__lane_, @aarch64_sve_) (@aarch64__lane_, @aarch64_pred_) (*cond__any_relaxed) (*cond__any_strict) (@aarch64_pred_, *cond_) (@aarch64_pred_, *cond_) (*cond__strict): Update to new syntax. * config/aarch64/aarch64-sve.md (*aarch64_sve_mov_ldr_str) (*aarch64_sve_mov_no_ldr_str, @aarch64_pred_mov) (*aarch64_sve_mov, aarch64_wrffr) (mask_scatter_store) (*mask_scatter_store_xtw_unpacked) (*mask_scatter_store_sxtw) (*mask_scatter_store_uxtw) (@aarch64_scatter_store_trunc) (@aarch64_scatter_store_trunc) (*aarch64_scatter_store_trunc_sxtw) (*aarch64_scatter_store_trunc_uxtw) (*vec_duplicate_reg, vec_shl_insert_) (vec_series, @extract__) (@aarch64_pred_, *cond__2) (*cond__any, @aarch64_pred_) (@aarch64_sve_revbhw_) (@cond_) (*2) (@aarch64_pred_sxt) (@aarch64_cond_sxt) (*cond_uxt_2, *cond_uxt_any, *cnot) (*cond_cnot_2, *cond_cnot_any) (@aarch64_pred_, *cond__2_relaxed) (*cond__2_strict, *cond__any_relaxed) (*cond__any_strict, @aarch64_pred_) (*cond__2, *cond__3) (*cond__any, add3, sub3) (@aarch64_pred_abd, *aarch64_cond_abd_2) (*aarch64_cond_abd_3, *aarch64_cond_abd_any) (@aarch64_sve_, @aarch64_pred_) (*cond__2, *cond__z) (@aarch64_pred_, *cond__2) (*cond__3, *cond__any, 3) (*cond_bic_2, *cond_bic_any) (@aarch64_pred_, *cond__2_const) (*cond__any_const, *cond__m) (*cond__z, *sdiv_pow23) (*cond__2, *cond__any) (@aarch64_pred_, *cond__2_relaxed) (*cond__2_strict, *cond__any_relaxed) (*cond__any_strict, @aarch64_pred_) (*cond__2_relaxed, *cond__2_strict) (*cond__2_const_relaxed) (*cond__2_const_strict) (*cond__3_relaxed, *cond__3_strict) (*cond__any_relaxed, *cond__any_strict) (*cond__any_const_relaxed) (*cond__any_const_strict) (@aarch64_pred_, *cond_add_2_const_relaxed) (*cond_add_2_const_strict) (*cond_add_any_const_relaxed) (*cond_add_any_const_strict, @aarch64_pred_) (*cond__2_relaxed, *cond__2_strict) (*cond__any_relaxed, *cond__any_strict) (@aarch64_pred_, *cond_sub_3_const_relaxed) (*cond_sub_3_const_strict, *cond_sub_const_relaxed) (*cond_sub_const_strict, *aarch64_pred_abd_relaxed) (*aarch64_pred_abd_strict) (*aarch64_cond_abd_2_relaxed) (*aarch64_cond_abd_2_strict) (*aarch64_cond_abd_3_relaxed) (*aarch64_cond_abd_3_strict) (*aarch64_cond_abd_any_relaxed) (*aarch64_cond_abd_any_strict, @aarch64_pred_) (@aarch64_pred_fma, *cond_fma_2, *cond_fma_4) (*cond_fma_any, @aarch64_pred_fnma) (*cond_fnma_2, *cond_fnma_4, *cond_fnma_any) (dot_prod, @aarch64_dot_prod_lane) (@dot_prod, @aarch64_dot_prod_lane) (@aarch64_sve_add_, @aarch64_pred_) (*cond__2_relaxed, *cond__2_strict) (*cond__4_relaxed, *cond__4_strict) (*cond__any_relaxed, *cond__any_strict) (@aarch64__lane_, @aarch64_pred_) (*cond__4_relaxed, *cond__4_strict) (*cond__any_relaxed, *cond__any_strict) (@aarch64__lane_, @aarch64_sve_tmad) (@aarch64_sve_vnx4sf) (@aarch64_sve__lanevnx4sf) (@aarch64_sve_, *vcond_mask_) (@aarch64_sel_dup, @aarch64_pred_cmp) (*cmp_cc, *cmp_ptest) (@aarch64_pred_fcm, @fold_extract__) (@aarch64_fold_extract_vector__) (@aarch64_sve_splice) (@aarch64_sve__nontrunc) (@aarch64_sve__trunc) (*cond__nontrunc_relaxed) (*cond__nontrunc_strict) (*cond__trunc) (@aarch64_sve__nonextend) (@aarch64_sve__extend) (*cond__nonextend_relaxed) (*cond__nonextend_strict) (*cond__extend) (@aarch64_sve__trunc) (*cond__trunc) (@aarch64_sve__trunc) (*cond__trunc) (@aarch64_sve__nontrunc) (*cond__nontrunc) (@aarch64_brk, *aarch64_sve__cntp): Update to new syntax. * config/aarch64/aarch64-simd.md (aarch64_simd_dup) (load_pair) (vec_store_pair, aarch64_simd_stp) (aarch64_simd_mov_from_low) (aarch64_simd_mov_from_high, and3) (ior3, aarch64_simd_ashr) (aarch64_simd_bsl_internal) (*aarch64_simd_bsl_alt) (aarch64_simd_bsldi_internal, aarch64_simd_bsldi_alt) (store_pair_lanes, *aarch64_combine_internal) (*aarch64_combine_internal_be, *aarch64_combinez) (*aarch64_combinez_be) (aarch64_cm, *aarch64_cmdi) (aarch64_cm, *aarch64_mov) (*aarch64_be_mov, *aarch64_be_movoi): Update to new syntax.