public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/vendors/ARM/heads/morello)] Fix alternative-base addresses for 8- to 64-bit scalar moves
@ 2022-05-05 12:05 Matthew Malcomson
  0 siblings, 0 replies; only message in thread
From: Matthew Malcomson @ 2022-05-05 12:05 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:8fe2aec9967f4b991747fa3ce7616ba2b500ed1f

commit 8fe2aec9967f4b991747fa3ce7616ba2b500ed1f
Author: Richard Sandiford <richard.sandiford@arm.com>
Date:   Thu Apr 7 13:25:22 2022 +0100

    Fix alternative-base addresses for 8- to 64-bit scalar moves
    
    Alternative-base addresses are more restricted than normal-base
    addresses:
    
    - There are no register-index forms of FP LDR and FP STR.
    
    - LDRB Wn, LDR Wn and LDR Xn only support 9-bit unsigned scaled
      offsets, rather than the 12-bit range for normal-base addresses.
    
    - There are no immediate-offset forms of LDRH Wn, FP LDR, and FP STR.
    
    The patch takes the following approach:
    
    - For integer modes:
      - Make "m" accept what GPR loads and stores accept.
      - Keep "m" for the GPR alternatives of the move patterns.
      - Use a new "UAu" constraint for the FPR alternatives of the
        move patterns, where "UAu" is:
        - equivalent to "m" for normal-base addresses
        - enforces the LDUR range for alternative-base addresses
    
    - For FP modes:
      - Make "m" accept what FPR loads and stores accept.  This means
        that the GPR move alternatives miss out on some possibilities,
        but that seems like the right trade-off; see the comment in the
        patch for details.
    
    The patch only handles scalar integer and floating-point move patterns
    (up to 64 bits in size).  Later patches will handle more.
    
    Unfortunately, we need forms of the pointer_plus base+index patterns
    that use MULT instead of ASHIFT for scaling, in order to support the
    address-like arithmetic that LRA generates for reloads.  These patterns
    are based on existing plus patterns in aarch64.md.  (Alex has removed
    the need for this on trunk, but the changes are probably too invasive
    to backport.)

Diff:
---
 gcc/config/aarch64/aarch64-morello.md              |  58 ++++
 gcc/config/aarch64/aarch64-protos.h                |   2 +
 gcc/config/aarch64/aarch64.c                       |  88 +++++-
 gcc/config/aarch64/aarch64.md                      |  12 +-
 gcc/config/aarch64/constraints.md                  |   6 +
 gcc/config/aarch64/predicates.md                   |   5 +
 .../aarch64/morello/alt-base-load-df-1.c           | 209 ++++++++++++++
 .../aarch64/morello/alt-base-load-di-1.c           | 314 +++++++++++++++++++++
 .../aarch64/morello/alt-base-load-hf-1.c           | 211 ++++++++++++++
 .../aarch64/morello/alt-base-load-hi-1.c           | 283 +++++++++++++++++++
 .../aarch64/morello/alt-base-load-qi-1.c           | 202 +++++++++++++
 .../aarch64/morello/alt-base-load-sf-1.c           | 209 ++++++++++++++
 .../aarch64/morello/alt-base-load-si-1.c           | 314 +++++++++++++++++++++
 .../aarch64/morello/alt-base-store-df-1.c          | 209 ++++++++++++++
 .../aarch64/morello/alt-base-store-df-2.c          | 108 +++++++
 .../aarch64/morello/alt-base-store-di-1.c          | 314 +++++++++++++++++++++
 .../aarch64/morello/alt-base-store-di-2.c          | 172 +++++++++++
 .../aarch64/morello/alt-base-store-hf-1.c          | 211 ++++++++++++++
 .../aarch64/morello/alt-base-store-hf-2.c          | 110 ++++++++
 .../aarch64/morello/alt-base-store-hi-1.c          | 283 +++++++++++++++++++
 .../aarch64/morello/alt-base-store-hi-2.c          | 142 ++++++++++
 .../aarch64/morello/alt-base-store-qi-1.c          | 202 +++++++++++++
 .../aarch64/morello/alt-base-store-qi-2.c          | 110 ++++++++
 .../aarch64/morello/alt-base-store-sf-1.c          | 209 ++++++++++++++
 .../aarch64/morello/alt-base-store-sf-2.c          | 108 +++++++
 .../aarch64/morello/alt-base-store-si-1.c          | 314 +++++++++++++++++++++
 .../aarch64/morello/alt-base-store-si-2.c          | 172 +++++++++++
 .../gcc.target/aarch64/morello/load-store-utils.h  |  44 ++-
 28 files changed, 4594 insertions(+), 27 deletions(-)

diff --git a/gcc/config/aarch64/aarch64-morello.md b/gcc/config/aarch64/aarch64-morello.md
index 7cddee9c075..e594d6d7f1d 100644
--- a/gcc/config/aarch64/aarch64-morello.md
+++ b/gcc/config/aarch64/aarch64-morello.md
@@ -41,6 +41,17 @@
   [(set_attr "type" "alu_shift_imm")]
 )
 
+(define_insn "*pointer_plus_multp2_cadi"
+  [(set (match_operand:CADI 0 "register_operand" "=rk")
+	(pointer_plus:CADI
+	  (match_operand:CADI 1 "register_operand" "r")
+	  (mult:DI (match_operand:DI 2 "register_operand" "r")
+		   (match_operand:DI 3 "aarch64_pwr_imm3"))))]
+  "TARGET_CAPABILITY_ANY"
+  "add\t%0, %1, %2, lsl %p3"
+  [(set_attr "type" "alu_shift_imm")]
+)
+
 (define_insn "*pointer_plus_<optab><ALLX:mode>_cadi"
   [(set (match_operand:CADI 0 "register_operand" "=rk")
 	(pointer_plus:CADI
@@ -64,6 +75,34 @@
   [(set_attr "type" "alu_ext")]
 )
 
+(define_insn "*pointer_plus_<optab><ALLX:mode>_multp2_cadi"
+  [(set (match_operand:CADI 0 "register_operand" "=rk")
+	(pointer_plus:CADI
+	  (match_operand:CADI 1 "register_operand" "r")
+	  (mult:DI
+	    (ANY_EXTEND:DI
+	      (match_operand:ALLX 2 "register_operand" "r"))
+	    (match_operand:DI 3 "aarch64_pwr_imm3"))))]
+  "TARGET_CAPABILITY_ANY"
+  "add\t%0, %1, %w2, <su>xt<ALLX:size> %p3"
+  [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "*pointer_plus_<optab>_multp2_cadi"
+  [(set (match_operand:CADI 0 "register_operand" "=rk")
+	(pointer_plus:CADI
+	  (match_operand:CADI 1 "register_operand" "r")
+	  (ANY_EXTRACT:DI
+	    (mult:DI (match_operand:DI 2 "register_operand" "r")
+		     (match_operand:DI 3 "aarch64_pwr_imm3"))
+	    (match_operand 4 "const_int_operand")
+	    (const_int 0))))]
+  "TARGET_CAPABILITY_ANY
+   && aarch64_is_extend_from_extract (DImode, operands[3], operands[4])"
+  "add\t%0, %1, %w2, <su>xt%e4 %p3"
+  [(set_attr "type" "alu_ext")]
+)
+
 (define_insn "*pointer_plus_and_lsl_cadi"
   [(set (match_operand:CADI 0 "register_operand" "=rk")
 	(pointer_plus:CADI
@@ -82,6 +121,25 @@
   [(set_attr "type" "alu_ext")]
 )
 
+(define_insn "*pointer_plus_uxt_multp2_cadi"
+  [(set (match_operand:CADI 0 "register_operand" "=rk")
+	(pointer_plus:CADI
+	  (match_operand:CADI 1 "register_operand" "r")
+	  (and:DI
+	    (mult:DI (match_operand:DI 2 "register_operand" "r")
+		     (match_operand 3 "aarch64_pwr_imm3"))
+	    (match_operand 4 "const_int_operand"))))]
+  "TARGET_CAPABILITY_ANY
+   && aarch64_uxt_size (exact_log2 (INTVAL (operands[3])),
+			INTVAL (operands[4])) != 0"
+  {
+    operands[4] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[3])),
+					     INTVAL (operands[4])));
+    return "add\t%0, %1, %w2, uxt%e4 %p3";
+  }
+  [(set_attr "type" "alu_ext")]
+)
+
 ; TODO: many more alternatives.
 (define_insn "*movcadi_aarch64"
   [(set (match_operand:CADI 0 "nonimmediate_operand" "=rk,r,r,m,r,r")
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index cff310d407b..3b4a3f2c165 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -328,6 +328,7 @@ enum aarch64_address_type {
 /* Address information.  */
 struct aarch64_address_info {
   enum aarch64_address_type type;
+  bool alt_base_p;
   rtx base;
   rtx offset;
   poly_int64 const_offset;
@@ -581,6 +582,7 @@ const char *aarch64_output_move_struct (rtx *operands);
 rtx aarch64_return_addr_rtx (void);
 rtx aarch64_return_addr (int, rtx);
 rtx aarch64_simd_gen_const_vector_dup (machine_mode, HOST_WIDE_INT);
+bool aarch64_ldr_or_alt_ldur_address_p (machine_mode, rtx);
 bool aarch64_simd_mem_operand_p (rtx);
 bool aarch64_sve_ld1r_operand_p (rtx);
 bool aarch64_sve_ld1rq_operand_p (rtx);
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index cb7e4b7ff74..a328e30db61 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -7935,6 +7935,17 @@ offset_9bit_signed_scaled_p (machine_mode mode, poly_int64 offset)
 	  && IN_RANGE (multiple, -256, 255));
 }
 
+/* Return true if OFFSET is an unsigned 9-bit value multiplied by the size
+   of MODE.  */
+
+static inline bool
+offset_9bit_unsigned_scaled_p (machine_mode mode, poly_int64 offset)
+{
+  HOST_WIDE_INT multiple;
+  return (constant_multiple_p (offset, GET_MODE_SIZE (mode), &multiple)
+	  && IN_RANGE (multiple, 0, 511));
+}
+
 /* Return true if OFFSET is an unsigned 12-bit value multiplied by the size
    of MODE.  */
 
@@ -9654,6 +9665,25 @@ aarch64_classify_address (struct aarch64_address_info *info,
   unsigned int vec_flags = aarch64_classify_vector_mode (mode);
   vec_flags &= ~VEC_PARTIAL;
 
+  /* Whether we're using "alternative" rather than "normal" base registers,
+     i.e. Cn bases outside C64 or Xn bases within C64.  (We currently don't
+     support the latter.)
+
+     The GPR and FP/SIMD ranges for alternative bases are different from
+     each other.  Here we enforce the GPR range for integer modes and the
+     FP/SIMD range for floating-point and vector modes.  This means that
+     GPR loads and stores miss out on some addressing modes for floats, but:
+
+     - Such loads and stores are rare.
+
+     - We want ivopts and other gimple passes to optimize for the FPR range.
+
+     - Advertizing extra modes for GPRs might encourage the RTL passes
+       to load and store via GPR temporaries instead of reloading the
+       address.  This would lead to unnecessary cross-file moves.  */
+  bool alt_base_p = (TARGET_CAPABILITY_HYBRID
+		     && CAPABILITY_MODE_P (GET_MODE (x)));
+
   /* On BE, we use load/store pair for all large int mode load/stores.
      TI/TFmode may also use a load/store pair.  */
   bool advsimd_struct_p = (vec_flags == (VEC_ADVSIMD | VEC_STRUCT));
@@ -9674,7 +9704,10 @@ aarch64_classify_address (struct aarch64_address_info *info,
 			    && (known_lt (GET_MODE_SIZE (mode), 16)
 				|| mode == CADImode
 				|| vec_flags == VEC_ADVSIMD
-				|| vec_flags & VEC_SVE_DATA));
+				|| vec_flags & VEC_SVE_DATA)
+			    && !(alt_base_p
+				 && (FLOAT_MODE_P (mode)
+				     || VECTOR_MODE_P (mode))));
 
   /* For SVE, only accept [Rn], [Rn, Rm, LSL #shift] and
      [Rn, #offset, MUL VL].  */
@@ -9698,6 +9731,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
   gcc_checking_assert (GET_MODE (x) == VOIDmode
 		       || SCALAR_ADDR_MODE_P (GET_MODE (x)));
 
+  info->alt_base_p = alt_base_p;
   switch (code)
     {
     case REG:
@@ -9800,9 +9834,30 @@ aarch64_classify_address (struct aarch64_address_info *info,
 		     || known_eq (GET_MODE_SIZE (mode), 8)
 		     || known_eq (GET_MODE_SIZE (mode), 16))
 		    && aarch64_offset_7bit_signed_scaled_p (mode, offset));
-	  else
-	    return (aarch64_offset_9bit_signed_unscaled_p (mode, offset)
-		    || offset_12bit_unsigned_scaled_p (mode, offset));
+
+	  /* Match LDUR forms, which exist for all remaining
+	     (access mode x base mode) combinations.  */
+	  if (aarch64_offset_9bit_signed_unscaled_p (mode, offset))
+	    return true;
+
+	  if (alt_base_p)
+	    switch (mode)
+	      {
+	      case E_QImode:
+	      case E_SImode:
+	      case E_DImode:
+	      case E_CADImode:
+		/* LDRB Wn, LDR Wn, LDR Xn and LDR Cn.  */
+		return offset_9bit_unsigned_scaled_p (mode, offset);
+
+	      default:
+		/* There is no immediate form of LDRH Wn.  Similarly for
+		   FP/SIMD versions of LDR, which take precedence over
+		   the GPR forms when dealing with FP and vector modes.  */
+		return false;
+	      }
+
+	  return offset_12bit_unsigned_scaled_p (mode, offset);
 	}
 
       if (allow_reg_index_p)
@@ -11468,6 +11523,7 @@ aarch64_legitimize_address (rtx x, rtx /* orig_x  */, machine_mode mode)
 
   if (any_plus_p (x) && CONST_INT_P (XEXP (x, 1)))
     {
+      auto addr_mode = as_a<scalar_addr_mode> (GET_MODE (x));
       rtx base = XEXP (x, 0);
       rtx offset_rtx = XEXP (x, 1);
       HOST_WIDE_INT offset = INTVAL (offset_rtx);
@@ -11516,9 +11572,9 @@ aarch64_legitimize_address (rtx x, rtx /* orig_x  */, machine_mode mode)
 							     cap);
 	  if (base_offset != 0)
 	    {
-	      base = plus_constant (Pmode, base, base_offset);
+	      base = plus_constant (addr_mode, base, base_offset);
 	      base = force_operand (base, NULL_RTX);
-	      return plus_constant (Pmode, base, offset - base_offset);
+	      return plus_constant (addr_mode, base, offset - base_offset);
 	    }
 	}
     }
@@ -19253,6 +19309,26 @@ aarch64_endian_lane_rtx (machine_mode mode, unsigned int n)
   return gen_int_mode (ENDIAN_LANE_N (GET_MODE_NUNITS (mode), n), SImode);
 }
 
+/* Return true if X is either:
+
+   - a valid normal-base memory address for an LDR of mode MODE
+   - a valid alternative-base memory address for an LDUR of mode MODE.  */
+
+bool
+aarch64_ldr_or_alt_ldur_address_p (machine_mode mode, rtx x)
+{
+  struct aarch64_address_info addr;
+
+  if (!aarch64_classify_address (&addr, x, mode, false))
+    return false;
+
+  if (!addr.alt_base_p)
+    return true;
+
+  return (addr.type == ADDRESS_REG_IMM
+	  && aarch64_offset_9bit_signed_unscaled_p (mode, addr.const_offset));
+}
+
 /* Return TRUE if OP is a valid vector addressing mode.  */
 
 bool
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 2d989044f46..bf16e7c7256 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1207,8 +1207,8 @@
 )
 
 (define_insn "*mov<mode>_aarch64"
-  [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r,    w,r  ,r,w, m,m,r,w,w")
-	(match_operand:SHORT 1 "aarch64_mov_operand"  " r,M,D<hq>,Usv,m,m,rZ,w,w,r,w"))]
+  [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r,    w,  r,r,  w, m,UAu,r,w,w")
+	(match_operand:SHORT 1 "aarch64_mov_operand"  " r,M,D<hq>,Usv,m,UAu,rZ,  w,w,r,w"))]
   "(register_operand (operands[0], <MODE>mode)
     || aarch64_reg_or_zero (operands[1], <MODE>mode))"
 {
@@ -1272,8 +1272,8 @@
 )
 
 (define_insn_and_split "*movsi_aarch64"
-  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,k,r,r,r,r, r,w, m, m,  r,  r, w,r,w, w")
-	(match_operand:SI 1 "aarch64_mov_operand"  " r,r,k,M,n,Usv,m,m,rZ,w,Usa,Ush,rZ,w,w,Ds"))]
+  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,k,r,r,r,  r,r,  w, m,UAu,  r,  r, w,r,w, w")
+	(match_operand:SI 1 "aarch64_mov_operand"  " r,r,k,M,n,Usv,m,UAu,rZ,  w,Usa,Ush,rZ,w,w,Ds"))]
   "(register_operand (operands[0], SImode)
     || aarch64_reg_or_zero (operands[1], SImode))"
   "@
@@ -1307,8 +1307,8 @@
 )
 
 (define_insn_and_split "*movdi_aarch64"
-  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,r,r, r,w, m,m,  r,  r, w,r,w, w")
-	(match_operand:DI 1 "aarch64_mov_operand"  " r,r,k,N,M,n,Usv,m,m,rZ,w,Usa,Ush,rZ,w,w,Dd"))]
+  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,r,  r,r,  w, m,UAu,  r,  r, w,r,w, w")
+	(match_operand:DI 1 "aarch64_mov_operand"  " r,r,k,N,M,n,Usv,m,UAu,rZ,  w,Usa,Ush,rZ,w,w,Dd"))]
   "(register_operand (operands[0], DImode)
     || aarch64_reg_or_zero (operands[1], DImode))"
   "@
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index 8cc6f508881..579c89dd4f5 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -373,6 +373,12 @@
    LD[234] and ST[234] patterns)."
   (match_operand 0 "aarch64_sve_struct_memory_operand"))
 
+(define_memory_constraint "UAu"
+  "@internal
+   Either a general memory operand with a normal base register or
+   a memory operand suitable for alternative-base forms of LDUR"
+  (match_operand 0 "aarch64_ldr_or_alt_ldur_operand"))
+
 (define_constraint "Ufc"
   "A floating point constant which can be used with an\
    FMOV immediate operation."
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
index 2d0c8f570f3..0f5570d59a4 100644
--- a/gcc/config/aarch64/predicates.md
+++ b/gcc/config/aarch64/predicates.md
@@ -250,6 +250,11 @@
        (match_test "INTVAL (op) != 0
 		    && (unsigned) exact_log2 (INTVAL (op)) < 64")))
 
+(define_predicate "aarch64_ldr_or_alt_ldur_operand"
+  (and (match_code "mem")
+       (match_test "aarch64_ldr_or_alt_ldur_address_p (GET_MODE (op),
+						       XEXP (op, 0))")))
+
 (define_predicate "aarch64_mem_pair_offset"
   (and (match_code "const_int")
        (match_test "aarch64_offset_7bit_signed_scaled_p (mode, INTVAL (op))")))
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-df-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-df-1.c
new file mode 100644
index 00000000000..f731d3e4a2b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-df-1.c
@@ -0,0 +1,209 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** load_x10_double_m264:
+**	sub	(c[0-9]+), c0, #264
+**	ldr	x10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, m264)
+
+/*
+** load_x10_double_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	x10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, m257)
+
+/*
+** load_x10_double_m256:
+**	ldr	x10, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, m256)
+
+/*
+** load_x10_double_m248:
+**	ldr	x10, \[c0, #?-248\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, m248)
+
+/*
+** load_x10_double_m8:
+**	ldr	x10, \[c0, #?-8\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, m8)
+
+/*
+** load_x10_double_m1:
+**	ldr	x10, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, m1)
+
+/*
+** load_x10_double_1:
+**	ldr	x10, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, 1)
+
+/*
+** load_x10_double_8:
+**	ldr	x10, \[c0, #?8\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, 8)
+
+/*
+** load_x10_double_248:
+**	ldr	x10, \[c0, #?248\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, 248)
+
+/*
+** load_x10_double_255:
+**	ldr	x10, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, 255)
+
+/*
+** load_x10_double_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	x10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, double, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (x10, double, int32_t, 1)
+LOAD_REG_INDEX (x10, double, uint32_t, 1)
+LOAD_REG_INDEX (x10, double, uint64_t, 1)
+
+LOAD_REG_INDEX (x10, double, int32_t, 2)
+LOAD_REG_INDEX (x10, double, uint32_t, 2)
+LOAD_REG_INDEX (x10, double, uint64_t, 2)
+
+LOAD_REG_INDEX (x10, double, int32_t, 4)
+LOAD_REG_INDEX (x10, double, uint32_t, 4)
+LOAD_REG_INDEX (x10, double, uint64_t, 4)
+
+LOAD_REG_INDEX (x10, double, int32_t, 8)
+LOAD_REG_INDEX (x10, double, uint32_t, 8)
+LOAD_REG_INDEX (x10, double, uint64_t, 8)
+
+LOAD_REG_INDEX (x10, double, int32_t, 16)
+LOAD_REG_INDEX (x10, double, uint32_t, 16)
+LOAD_REG_INDEX (x10, double, uint64_t, 16)
+
+/*
+** load_d20_double_m264:
+**	sub	(c[0-9]+), c0, #264
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, m264)
+
+/*
+** load_d20_double_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, m257)
+
+/*
+** load_d20_double_m256:
+**	ldr	d20, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, m256)
+
+/*
+** load_d20_double_m248:
+**	ldr	d20, \[c0, #?-248\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, m248)
+
+/*
+** load_d20_double_m8:
+**	ldr	d20, \[c0, #?-8\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, m8)
+
+/*
+** load_d20_double_m1:
+**	ldr	d20, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, m1)
+
+/*
+** load_d20_double_1:
+**	ldr	d20, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, 1)
+
+/*
+** load_d20_double_8:
+**	ldr	d20, \[c0, #?8\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, 8)
+
+/*
+** load_d20_double_248:
+**	ldr	d20, \[c0, #?248\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, 248)
+
+/*
+** load_d20_double_255:
+**	ldr	d20, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, 255)
+
+/*
+** load_d20_double_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, double, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (d10, double, int32_t, 1)
+LOAD_REG_INDEX (d10, double, uint32_t, 1)
+LOAD_REG_INDEX (d10, double, uint64_t, 1)
+
+LOAD_REG_INDEX (d10, double, int32_t, 2)
+LOAD_REG_INDEX (d10, double, uint32_t, 2)
+LOAD_REG_INDEX (d10, double, uint64_t, 2)
+
+LOAD_REG_INDEX (d10, double, int32_t, 4)
+LOAD_REG_INDEX (d10, double, uint32_t, 4)
+LOAD_REG_INDEX (d10, double, uint64_t, 4)
+
+LOAD_REG_INDEX (d10, double, int32_t, 8)
+LOAD_REG_INDEX (d10, double, uint32_t, 8)
+LOAD_REG_INDEX (d10, double, uint64_t, 8)
+
+LOAD_REG_INDEX (d10, double, int32_t, 16)
+LOAD_REG_INDEX (d10, double, uint32_t, 16)
+LOAD_REG_INDEX (d10, double, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-di-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-di-1.c
new file mode 100644
index 00000000000..7d6bacc4b10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-di-1.c
@@ -0,0 +1,314 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** load_x10_uint64_t_m264:
+**	sub	(c[0-9]+), c0, #264
+**	ldr	x10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, m264)
+
+/*
+** load_x10_uint64_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	x10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, m257)
+
+/*
+** load_x10_uint64_t_m256:
+**	ldr	x10, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, m256)
+
+/*
+** load_x10_uint64_t_m248:
+**	ldr	x10, \[c0, #?-248\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, m248)
+
+/*
+** load_x10_uint64_t_m8:
+**	ldr	x10, \[c0, #?-8\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, m8)
+
+/*
+** load_x10_uint64_t_m1:
+**	ldr	x10, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, m1)
+
+/*
+** load_x10_uint64_t_1:
+**	ldr	x10, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 1)
+
+/*
+** load_x10_uint64_t_8:
+**	ldr	x10, \[c0, #?8\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 8)
+
+/*
+** load_x10_uint64_t_248:
+**	ldr	x10, \[c0, #?248\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 248)
+
+/*
+** load_x10_uint64_t_255:
+**	ldr	x10, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 255)
+
+/*
+** load_x10_uint64_t_256:
+**	ldr	x10, \[c0, #?256\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 256)
+
+/*
+** load_x10_uint64_t_257:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	x10, \[\1, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 257)
+
+/*
+** load_x10_uint64_t_264:
+**	ldr	x10, \[c0, #?264\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 264)
+
+/*
+** load_x10_uint64_t_4088:
+**	ldr	x10, \[c0, #?4088\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 4088)
+
+/*
+** load_x10_uint64_t_4096:
+**	add	(c[0-9]+), c0, #?4096
+**	ldr	x10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (x10, uint64_t, 4096)
+
+/*
+** load_x10_uint64_t_int32_t_1:
+**	ldr	x10, \[c0, w1, sxtw\]
+**	ret
+*/
+LOAD_REG_INDEX (x10, uint64_t, int32_t, 1)
+
+/*
+** load_x10_uint64_t_uint32_t_1:
+**	ldr	x10, \[c0, w1, uxtw\]
+**	ret
+*/
+LOAD_REG_INDEX (x10, uint64_t, uint32_t, 1)
+
+/*
+** load_x10_uint64_t_uint64_t_1:
+**	ldr	x10, \[c0, x1\]
+**	ret
+*/
+LOAD_REG_INDEX (x10, uint64_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (x10, uint64_t, int32_t, 2)
+LOAD_REG_INDEX (x10, uint64_t, uint32_t, 2)
+LOAD_REG_INDEX (x10, uint64_t, uint64_t, 2)
+
+LOAD_REG_INDEX (x10, uint64_t, int32_t, 4)
+LOAD_REG_INDEX (x10, uint64_t, uint32_t, 4)
+LOAD_REG_INDEX (x10, uint64_t, uint64_t, 4)
+
+/*
+** load_x10_uint64_t_int32_t_8:
+**	ldr	x10, \[c0, w1, sxtw #?3\]
+**	ret
+*/
+LOAD_REG_INDEX (x10, uint64_t, int32_t, 8)
+
+/*
+** load_x10_uint64_t_uint32_t_8:
+**	ldr	x10, \[c0, w1, uxtw #?3\]
+**	ret
+*/
+LOAD_REG_INDEX (x10, uint64_t, uint32_t, 8)
+
+/*
+** load_x10_uint64_t_uint64_t_8:
+**	ldr	x10, \[c0, x1, lsl #?3\]
+**	ret
+*/
+LOAD_REG_INDEX (x10, uint64_t, uint64_t, 8)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (x10, uint64_t, int32_t, 16)
+LOAD_REG_INDEX (x10, uint64_t, uint32_t, 16)
+LOAD_REG_INDEX (x10, uint64_t, uint64_t, 16)
+
+/*
+** load_d20_uint64_t_m264:
+**	sub	(c[0-9]+), c0, #264
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, m264)
+
+/*
+** load_d20_uint64_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, m257)
+
+/*
+** load_d20_uint64_t_m256:
+**	ldr	d20, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, m256)
+
+/*
+** load_d20_uint64_t_m248:
+**	ldr	d20, \[c0, #?-248\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, m248)
+
+/*
+** load_d20_uint64_t_m8:
+**	ldr	d20, \[c0, #?-8\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, m8)
+
+/*
+** load_d20_uint64_t_m1:
+**	ldr	d20, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, m1)
+
+/*
+** load_d20_uint64_t_1:
+**	ldr	d20, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, 1)
+
+/*
+** load_d20_uint64_t_8:
+**	ldr	d20, \[c0, #?8\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, 8)
+
+/*
+** load_d20_uint64_t_248:
+**	ldr	d20, \[c0, #?248\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, 248)
+
+/*
+** load_d20_uint64_t_255:
+**	ldr	d20, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, 255)
+
+/*
+** load_d20_uint64_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (d20, uint64_t, 256)
+
+/*
+** load_d20_uint64_t_int32_t_1:
+**	add	(c[0-9]+), c0, w1, sxtw
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (d20, uint64_t, int32_t, 1)
+
+/*
+** load_d20_uint64_t_uint32_t_1:
+**	add	(c[0-9]+), c0, w1, uxtw
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (d20, uint64_t, uint32_t, 1)
+
+/*
+** load_d20_uint64_t_uint64_t_1:
+**	add	(c[0-9]+), c0, x1
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (d20, uint64_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (d20, uint64_t, int32_t, 2)
+LOAD_REG_INDEX (d20, uint64_t, uint32_t, 2)
+LOAD_REG_INDEX (d20, uint64_t, uint64_t, 2)
+
+LOAD_REG_INDEX (d20, uint64_t, int32_t, 4)
+LOAD_REG_INDEX (d20, uint64_t, uint32_t, 4)
+LOAD_REG_INDEX (d20, uint64_t, uint64_t, 4)
+
+/*
+** load_d20_uint64_t_int32_t_8:
+**	add	(c[0-9]+), c0, w1, sxtw #?3
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (d20, uint64_t, int32_t, 8)
+
+/*
+** load_d20_uint64_t_uint32_t_8:
+**	add	(c[0-9]+), c0, w1, uxtw #?3
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (d20, uint64_t, uint32_t, 8)
+
+/*
+** load_d20_uint64_t_uint64_t_8:
+**	add	(c[0-9]+), c0, x1, lsl #?3
+**	ldr	d20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (d20, uint64_t, uint64_t, 8)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (d20, uint64_t, int32_t, 16)
+LOAD_REG_INDEX (d20, uint64_t, uint32_t, 16)
+LOAD_REG_INDEX (d20, uint64_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-hf-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-hf-1.c
new file mode 100644
index 00000000000..c2ffb6ff3e9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-hf-1.c
@@ -0,0 +1,211 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+typedef __fp16 fp16;
+
+/*
+** load_w10_fp16_m258:
+**	sub	(c[0-9]+), c0, #258
+**	ldrh	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, m258)
+
+/*
+** load_w10_fp16_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldrh	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, m257)
+
+/*
+** load_w10_fp16_m256:
+**	ldrh	w10, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, m256)
+
+/*
+** load_w10_fp16_m254:
+**	ldrh	w10, \[c0, #?-254\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, m254)
+
+/*
+** load_w10_fp16_m2:
+**	ldrh	w10, \[c0, #?-2\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, m2)
+
+/*
+** load_w10_fp16_m1:
+**	ldrh	w10, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, m1)
+
+/*
+** load_w10_fp16_1:
+**	ldrh	w10, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, 1)
+
+/*
+** load_w10_fp16_2:
+**	ldrh	w10, \[c0, #?2\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, 2)
+
+/*
+** load_w10_fp16_254:
+**	ldrh	w10, \[c0, #?254\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, 254)
+
+/*
+** load_w10_fp16_255:
+**	ldrh	w10, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, 255)
+
+/*
+** load_w10_fp16_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldrh	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, fp16, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (w10, fp16, int32_t, 1)
+LOAD_REG_INDEX (w10, fp16, uint32_t, 1)
+LOAD_REG_INDEX (w10, fp16, uint64_t, 1)
+
+LOAD_REG_INDEX (w10, fp16, int32_t, 2)
+LOAD_REG_INDEX (w10, fp16, uint32_t, 2)
+LOAD_REG_INDEX (w10, fp16, uint64_t, 2)
+
+LOAD_REG_INDEX (w10, fp16, int32_t, 4)
+LOAD_REG_INDEX (w10, fp16, uint32_t, 4)
+LOAD_REG_INDEX (w10, fp16, uint64_t, 4)
+
+LOAD_REG_INDEX (w10, fp16, int32_t, 8)
+LOAD_REG_INDEX (w10, fp16, uint32_t, 8)
+LOAD_REG_INDEX (w10, fp16, uint64_t, 8)
+
+LOAD_REG_INDEX (w10, fp16, int32_t, 16)
+LOAD_REG_INDEX (w10, fp16, uint32_t, 16)
+LOAD_REG_INDEX (w10, fp16, uint64_t, 16)
+
+/*
+** load_h20_fp16_m258:
+**	sub	(c[0-9]+), c0, #258
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, m258)
+
+/*
+** load_h20_fp16_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, m257)
+
+/*
+** load_h20_fp16_m256:
+**	ldr	h20, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, m256)
+
+/*
+** load_h20_fp16_m254:
+**	ldr	h20, \[c0, #?-254\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, m254)
+
+/*
+** load_h20_fp16_m2:
+**	ldr	h20, \[c0, #?-2\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, m2)
+
+/*
+** load_h20_fp16_m1:
+**	ldr	h20, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, m1)
+
+/*
+** load_h20_fp16_1:
+**	ldr	h20, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, 1)
+
+/*
+** load_h20_fp16_2:
+**	ldr	h20, \[c0, #?2\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, 2)
+
+/*
+** load_h20_fp16_254:
+**	ldr	h20, \[c0, #?254\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, 254)
+
+/*
+** load_h20_fp16_255:
+**	ldr	h20, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, 255)
+
+/*
+** load_h20_fp16_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, fp16, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (h20, fp16, int32_t, 1)
+LOAD_REG_INDEX (h20, fp16, uint32_t, 1)
+LOAD_REG_INDEX (h20, fp16, uint64_t, 1)
+
+LOAD_REG_INDEX (h20, fp16, int32_t, 2)
+LOAD_REG_INDEX (h20, fp16, uint32_t, 2)
+LOAD_REG_INDEX (h20, fp16, uint64_t, 2)
+
+LOAD_REG_INDEX (h20, fp16, int32_t, 4)
+LOAD_REG_INDEX (h20, fp16, uint32_t, 4)
+LOAD_REG_INDEX (h20, fp16, uint64_t, 4)
+
+LOAD_REG_INDEX (h20, fp16, int32_t, 8)
+LOAD_REG_INDEX (h20, fp16, uint32_t, 8)
+LOAD_REG_INDEX (h20, fp16, uint64_t, 8)
+
+LOAD_REG_INDEX (h20, fp16, int32_t, 16)
+LOAD_REG_INDEX (h20, fp16, uint32_t, 16)
+LOAD_REG_INDEX (h20, fp16, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-hi-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-hi-1.c
new file mode 100644
index 00000000000..4338c36d7fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-hi-1.c
@@ -0,0 +1,283 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** load_w10_uint16_t_m258:
+**	sub	(c[0-9]+), c0, #258
+**	ldrh	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, m258)
+
+/*
+** load_w10_uint16_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldrh	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, m257)
+
+/*
+** load_w10_uint16_t_m256:
+**	ldrh	w10, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, m256)
+
+/*
+** load_w10_uint16_t_m254:
+**	ldrh	w10, \[c0, #?-254\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, m254)
+
+/*
+** load_w10_uint16_t_m2:
+**	ldrh	w10, \[c0, #?-2\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, m2)
+
+/*
+** load_w10_uint16_t_m1:
+**	ldrh	w10, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, m1)
+
+/*
+** load_w10_uint16_t_1:
+**	ldrh	w10, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, 1)
+
+/*
+** load_w10_uint16_t_2:
+**	ldrh	w10, \[c0, #?2\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, 2)
+
+/*
+** load_w10_uint16_t_254:
+**	ldrh	w10, \[c0, #?254\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, 254)
+
+/*
+** load_w10_uint16_t_255:
+**	ldrh	w10, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, 255)
+
+/*
+** load_w10_uint16_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldrh	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint16_t, 256)
+
+/*
+** load_w10_uint16_t_int32_t_1:
+**	ldrh	w10, \[c0, w1, sxtw\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint16_t, int32_t, 1)
+
+/*
+** load_w10_uint16_t_uint32_t_1:
+**	ldrh	w10, \[c0, w1, uxtw\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint16_t, uint32_t, 1)
+
+/*
+** load_w10_uint16_t_uint64_t_1:
+**	ldrh	w10, \[c0, x1\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint16_t, uint64_t, 1)
+
+/*
+** load_w10_uint16_t_int32_t_2:
+**	ldrh	w10, \[c0, w1, sxtw #?1\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint16_t, int32_t, 2)
+
+/*
+** load_w10_uint16_t_uint32_t_2:
+**	ldrh	w10, \[c0, w1, uxtw #?1\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint16_t, uint32_t, 2)
+
+/*
+** load_w10_uint16_t_uint64_t_2:
+**	ldrh	w10, \[c0, x1, lsl #?1\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint16_t, uint64_t, 2)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (w10, uint16_t, int32_t, 4)
+LOAD_REG_INDEX (w10, uint16_t, uint32_t, 4)
+LOAD_REG_INDEX (w10, uint16_t, uint64_t, 4)
+
+LOAD_REG_INDEX (w10, uint16_t, int32_t, 8)
+LOAD_REG_INDEX (w10, uint16_t, uint32_t, 8)
+LOAD_REG_INDEX (w10, uint16_t, uint64_t, 8)
+
+LOAD_REG_INDEX (w10, uint16_t, int32_t, 16)
+LOAD_REG_INDEX (w10, uint16_t, uint32_t, 16)
+LOAD_REG_INDEX (w10, uint16_t, uint64_t, 16)
+
+/*
+** load_h20_uint16_t_m258:
+**	sub	(c[0-9]+), c0, #258
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, m258)
+
+/*
+** load_h20_uint16_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, m257)
+
+/*
+** load_h20_uint16_t_m256:
+**	ldr	h20, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, m256)
+
+/*
+** load_h20_uint16_t_m254:
+**	ldr	h20, \[c0, #?-254\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, m254)
+
+/*
+** load_h20_uint16_t_m2:
+**	ldr	h20, \[c0, #?-2\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, m2)
+
+/*
+** load_h20_uint16_t_m1:
+**	ldr	h20, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, m1)
+
+/*
+** load_h20_uint16_t_1:
+**	ldr	h20, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, 1)
+
+/*
+** load_h20_uint16_t_2:
+**	ldr	h20, \[c0, #?2\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, 2)
+
+/*
+** load_h20_uint16_t_254:
+**	ldr	h20, \[c0, #?254\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, 254)
+
+/*
+** load_h20_uint16_t_255:
+**	ldr	h20, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, 255)
+
+/*
+** load_h20_uint16_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (h20, uint16_t, 256)
+
+/*
+** load_h20_uint16_t_int32_t_1:
+**	add	(c[0-9]+), c0, w1, sxtw
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (h20, uint16_t, int32_t, 1)
+
+/*
+** load_h20_uint16_t_uint32_t_1:
+**	add	(c[0-9]+), c0, w1, uxtw
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (h20, uint16_t, uint32_t, 1)
+
+/*
+** load_h20_uint16_t_uint64_t_1:
+**	add	(c[0-9]+), c0, x1
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (h20, uint16_t, uint64_t, 1)
+
+/*
+** load_h20_uint16_t_int32_t_2:
+**	add	(c[0-9]+), c0, w1, sxtw #?1
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (h20, uint16_t, int32_t, 2)
+
+/*
+** load_h20_uint16_t_uint32_t_2:
+**	add	(c[0-9]+), c0, w1, uxtw #?1
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (h20, uint16_t, uint32_t, 2)
+
+/*
+** load_h20_uint16_t_uint64_t_2:
+**	add	(c[0-9]+), c0, x1, lsl #?1
+**	ldr	h20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (h20, uint16_t, uint64_t, 2)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (h20, uint16_t, int32_t, 4)
+LOAD_REG_INDEX (h20, uint16_t, uint32_t, 4)
+LOAD_REG_INDEX (h20, uint16_t, uint64_t, 4)
+
+LOAD_REG_INDEX (h20, uint16_t, int32_t, 8)
+LOAD_REG_INDEX (h20, uint16_t, uint32_t, 8)
+LOAD_REG_INDEX (h20, uint16_t, uint64_t, 8)
+
+LOAD_REG_INDEX (h20, uint16_t, int32_t, 16)
+LOAD_REG_INDEX (h20, uint16_t, uint32_t, 16)
+LOAD_REG_INDEX (h20, uint16_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-qi-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-qi-1.c
new file mode 100644
index 00000000000..5069fa284eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-qi-1.c
@@ -0,0 +1,202 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** load_w10_uint8_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldrb	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, m257)
+
+/*
+** load_w10_uint8_t_m256:
+**	ldrb	w10, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, m256)
+
+/*
+** load_w10_uint8_t_m255:
+**	ldrb	w10, \[c0, #?-255\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, m255)
+
+/*
+** load_w10_uint8_t_m1:
+**	ldrb	w10, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, m1)
+
+/*
+** load_w10_uint8_t_1:
+**	ldrb	w10, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, 1)
+
+/*
+** load_w10_uint8_t_255:
+**	ldrb	w10, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, 255)
+
+/*
+** load_w10_uint8_t_256:
+**	ldrb	w10, \[c0, #?256\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, 256)
+
+/*
+** load_w10_uint8_t_511:
+**	ldrb	w10, \[c0, #?511\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, 511)
+
+/*
+** load_w10_uint8_t_512:
+**	add	(c[0-9]+), c0, #?512
+**	ldrb	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint8_t, 512)
+
+/*
+** load_w10_uint8_t_int32_t_1:
+**	ldrb	w10, \[c0, w1, sxtw\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint8_t, int32_t, 1)
+
+/*
+** load_w10_uint8_t_uint32_t_1:
+**	ldrb	w10, \[c0, w1, uxtw\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint8_t, uint32_t, 1)
+
+/*
+** load_w10_uint8_t_uint64_t_1:
+**	ldrb	w10, \[c0, x1\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint8_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (w10, uint8_t, int32_t, 2)
+LOAD_REG_INDEX (w10, uint8_t, uint32_t, 2)
+LOAD_REG_INDEX (w10, uint8_t, uint64_t, 2)
+
+LOAD_REG_INDEX (w10, uint8_t, int32_t, 4)
+LOAD_REG_INDEX (w10, uint8_t, uint32_t, 4)
+LOAD_REG_INDEX (w10, uint8_t, uint64_t, 4)
+
+LOAD_REG_INDEX (w10, uint8_t, int32_t, 8)
+LOAD_REG_INDEX (w10, uint8_t, uint32_t, 8)
+LOAD_REG_INDEX (w10, uint8_t, uint64_t, 8)
+
+LOAD_REG_INDEX (w10, uint8_t, int32_t, 16)
+LOAD_REG_INDEX (w10, uint8_t, uint32_t, 16)
+LOAD_REG_INDEX (w10, uint8_t, uint64_t, 16)
+
+/*
+** load_b20_uint8_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	b20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (b20, uint8_t, m257)
+
+/*
+** load_b20_uint8_t_m256:
+**	ldr	b20, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (b20, uint8_t, m256)
+
+/*
+** load_b20_uint8_t_m255:
+**	ldr	b20, \[c0, #?-255\]
+**	ret
+*/
+LOAD_REG_OFFSET (b20, uint8_t, m255)
+
+/*
+** load_b20_uint8_t_m1:
+**	ldr	b20, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (b20, uint8_t, m1)
+
+/*
+** load_b20_uint8_t_1:
+**	ldr	b20, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (b20, uint8_t, 1)
+
+/*
+** load_b20_uint8_t_255:
+**	ldr	b20, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (b20, uint8_t, 255)
+
+/*
+** load_b20_uint8_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	b20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (b20, uint8_t, 256)
+
+/*
+** load_b20_uint8_t_int32_t_1:
+**	add	(c[0-9]+), c0, w1, sxtw
+**	ldr	b20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (b20, uint8_t, int32_t, 1)
+
+/*
+** load_b20_uint8_t_uint32_t_1:
+**	add	(c[0-9]+), c0, w1, uxtw
+**	ldr	b20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (b20, uint8_t, uint32_t, 1)
+
+/*
+** load_b20_uint8_t_uint64_t_1:
+**	add	(c[0-9]+), c0, x1
+**	ldr	b20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (b20, uint8_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (b20, uint8_t, int32_t, 2)
+LOAD_REG_INDEX (b20, uint8_t, uint32_t, 2)
+LOAD_REG_INDEX (b20, uint8_t, uint64_t, 2)
+
+LOAD_REG_INDEX (b20, uint8_t, int32_t, 4)
+LOAD_REG_INDEX (b20, uint8_t, uint32_t, 4)
+LOAD_REG_INDEX (b20, uint8_t, uint64_t, 4)
+
+LOAD_REG_INDEX (b20, uint8_t, int32_t, 8)
+LOAD_REG_INDEX (b20, uint8_t, uint32_t, 8)
+LOAD_REG_INDEX (b20, uint8_t, uint64_t, 8)
+
+LOAD_REG_INDEX (b20, uint8_t, int32_t, 16)
+LOAD_REG_INDEX (b20, uint8_t, uint32_t, 16)
+LOAD_REG_INDEX (b20, uint8_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-sf-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-sf-1.c
new file mode 100644
index 00000000000..89883f93354
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-sf-1.c
@@ -0,0 +1,209 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** load_w10_float_m260:
+**	sub	(c[0-9]+), c0, #260
+**	ldr	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, m260)
+
+/*
+** load_w10_float_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, m257)
+
+/*
+** load_w10_float_m256:
+**	ldr	w10, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, m256)
+
+/*
+** load_w10_float_m252:
+**	ldr	w10, \[c0, #?-252\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, m252)
+
+/*
+** load_w10_float_m4:
+**	ldr	w10, \[c0, #?-4\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, m4)
+
+/*
+** load_w10_float_m1:
+**	ldr	w10, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, m1)
+
+/*
+** load_w10_float_1:
+**	ldr	w10, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, 1)
+
+/*
+** load_w10_float_4:
+**	ldr	w10, \[c0, #?4\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, 4)
+
+/*
+** load_w10_float_252:
+**	ldr	w10, \[c0, #?252\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, 252)
+
+/*
+** load_w10_float_255:
+**	ldr	w10, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, 255)
+
+/*
+** load_w10_float_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, float, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (w10, float, int32_t, 1)
+LOAD_REG_INDEX (w10, float, uint32_t, 1)
+LOAD_REG_INDEX (w10, float, uint64_t, 1)
+
+LOAD_REG_INDEX (w10, float, int32_t, 2)
+LOAD_REG_INDEX (w10, float, uint32_t, 2)
+LOAD_REG_INDEX (w10, float, uint64_t, 2)
+
+LOAD_REG_INDEX (w10, float, int32_t, 4)
+LOAD_REG_INDEX (w10, float, uint32_t, 4)
+LOAD_REG_INDEX (w10, float, uint64_t, 4)
+
+LOAD_REG_INDEX (w10, float, int32_t, 8)
+LOAD_REG_INDEX (w10, float, uint32_t, 8)
+LOAD_REG_INDEX (w10, float, uint64_t, 8)
+
+LOAD_REG_INDEX (w10, float, int32_t, 16)
+LOAD_REG_INDEX (w10, float, uint32_t, 16)
+LOAD_REG_INDEX (w10, float, uint64_t, 16)
+
+/*
+** load_s20_float_m260:
+**	sub	(c[0-9]+), c0, #260
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, m260)
+
+/*
+** load_s20_float_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, m257)
+
+/*
+** load_s20_float_m256:
+**	ldr	s20, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, m256)
+
+/*
+** load_s20_float_m252:
+**	ldr	s20, \[c0, #?-252\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, m252)
+
+/*
+** load_s20_float_m4:
+**	ldr	s20, \[c0, #?-4\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, m4)
+
+/*
+** load_s20_float_m1:
+**	ldr	s20, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, m1)
+
+/*
+** load_s20_float_1:
+**	ldr	s20, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, 1)
+
+/*
+** load_s20_float_4:
+**	ldr	s20, \[c0, #?4\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, 4)
+
+/*
+** load_s20_float_252:
+**	ldr	s20, \[c0, #?252\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, 252)
+
+/*
+** load_s20_float_255:
+**	ldr	s20, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, 255)
+
+/*
+** load_s20_float_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, float, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (s20, float, int32_t, 1)
+LOAD_REG_INDEX (s20, float, uint32_t, 1)
+LOAD_REG_INDEX (s20, float, uint64_t, 1)
+
+LOAD_REG_INDEX (s20, float, int32_t, 2)
+LOAD_REG_INDEX (s20, float, uint32_t, 2)
+LOAD_REG_INDEX (s20, float, uint64_t, 2)
+
+LOAD_REG_INDEX (s20, float, int32_t, 4)
+LOAD_REG_INDEX (s20, float, uint32_t, 4)
+LOAD_REG_INDEX (s20, float, uint64_t, 4)
+
+LOAD_REG_INDEX (s20, float, int32_t, 8)
+LOAD_REG_INDEX (s20, float, uint32_t, 8)
+LOAD_REG_INDEX (s20, float, uint64_t, 8)
+
+LOAD_REG_INDEX (s20, float, int32_t, 16)
+LOAD_REG_INDEX (s20, float, uint32_t, 16)
+LOAD_REG_INDEX (s20, float, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-si-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-si-1.c
new file mode 100644
index 00000000000..33d217419b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-load-si-1.c
@@ -0,0 +1,314 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** load_w10_uint32_t_m260:
+**	sub	(c[0-9]+), c0, #260
+**	ldr	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, m260)
+
+/*
+** load_w10_uint32_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, m257)
+
+/*
+** load_w10_uint32_t_m256:
+**	ldr	w10, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, m256)
+
+/*
+** load_w10_uint32_t_m252:
+**	ldr	w10, \[c0, #?-252\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, m252)
+
+/*
+** load_w10_uint32_t_m4:
+**	ldr	w10, \[c0, #?-4\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, m4)
+
+/*
+** load_w10_uint32_t_m1:
+**	ldr	w10, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, m1)
+
+/*
+** load_w10_uint32_t_1:
+**	ldr	w10, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 1)
+
+/*
+** load_w10_uint32_t_4:
+**	ldr	w10, \[c0, #?4\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 4)
+
+/*
+** load_w10_uint32_t_252:
+**	ldr	w10, \[c0, #?252\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 252)
+
+/*
+** load_w10_uint32_t_255:
+**	ldr	w10, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 255)
+
+/*
+** load_w10_uint32_t_256:
+**	ldr	w10, \[c0, #?256\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 256)
+
+/*
+** load_w10_uint32_t_257:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	w10, \[\1, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 257)
+
+/*
+** load_w10_uint32_t_260:
+**	ldr	w10, \[c0, #?260\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 260)
+
+/*
+** load_w10_uint32_t_2044:
+**	ldr	w10, \[c0, #?2044\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 2044)
+
+/*
+** load_w10_uint32_t_2048:
+**	add	(c[0-9]+), c0, #?2048
+**	ldr	w10, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (w10, uint32_t, 2048)
+
+/*
+** load_w10_uint32_t_int32_t_1:
+**	ldr	w10, \[c0, w1, sxtw\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint32_t, int32_t, 1)
+
+/*
+** load_w10_uint32_t_uint32_t_1:
+**	ldr	w10, \[c0, w1, uxtw\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint32_t, uint32_t, 1)
+
+/*
+** load_w10_uint32_t_uint64_t_1:
+**	ldr	w10, \[c0, x1\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint32_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (w10, uint32_t, int32_t, 2)
+LOAD_REG_INDEX (w10, uint32_t, uint32_t, 2)
+LOAD_REG_INDEX (w10, uint32_t, uint64_t, 2)
+
+/*
+** load_w10_uint32_t_int32_t_4:
+**	ldr	w10, \[c0, w1, sxtw #?2\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint32_t, int32_t, 4)
+
+/*
+** load_w10_uint32_t_uint32_t_4:
+**	ldr	w10, \[c0, w1, uxtw #?2\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint32_t, uint32_t, 4)
+
+/*
+** load_w10_uint32_t_uint64_t_4:
+**	ldr	w10, \[c0, x1, lsl #?2\]
+**	ret
+*/
+LOAD_REG_INDEX (w10, uint32_t, uint64_t, 4)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (w10, uint32_t, int32_t, 8)
+LOAD_REG_INDEX (w10, uint32_t, uint32_t, 8)
+LOAD_REG_INDEX (w10, uint32_t, uint64_t, 8)
+
+LOAD_REG_INDEX (w10, uint32_t, int32_t, 16)
+LOAD_REG_INDEX (w10, uint32_t, uint32_t, 16)
+LOAD_REG_INDEX (w10, uint32_t, uint64_t, 16)
+
+/*
+** load_s20_uint32_t_m260:
+**	sub	(c[0-9]+), c0, #260
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, m260)
+
+/*
+** load_s20_uint32_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, m257)
+
+/*
+** load_s20_uint32_t_m256:
+**	ldr	s20, \[c0, #?-256\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, m256)
+
+/*
+** load_s20_uint32_t_m252:
+**	ldr	s20, \[c0, #?-252\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, m252)
+
+/*
+** load_s20_uint32_t_m4:
+**	ldr	s20, \[c0, #?-4\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, m4)
+
+/*
+** load_s20_uint32_t_m1:
+**	ldr	s20, \[c0, #?-1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, m1)
+
+/*
+** load_s20_uint32_t_1:
+**	ldr	s20, \[c0, #?1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, 1)
+
+/*
+** load_s20_uint32_t_4:
+**	ldr	s20, \[c0, #?4\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, 4)
+
+/*
+** load_s20_uint32_t_252:
+**	ldr	s20, \[c0, #?252\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, 252)
+
+/*
+** load_s20_uint32_t_255:
+**	ldr	s20, \[c0, #?255\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, 255)
+
+/*
+** load_s20_uint32_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_OFFSET (s20, uint32_t, 256)
+
+/*
+** load_s20_uint32_t_int32_t_1:
+**	add	(c[0-9]+), c0, w1, sxtw
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (s20, uint32_t, int32_t, 1)
+
+/*
+** load_s20_uint32_t_uint32_t_1:
+**	add	(c[0-9]+), c0, w1, uxtw
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (s20, uint32_t, uint32_t, 1)
+
+/*
+** load_s20_uint32_t_uint64_t_1:
+**	add	(c[0-9]+), c0, x1
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (s20, uint32_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (s20, uint32_t, int32_t, 2)
+LOAD_REG_INDEX (s20, uint32_t, uint32_t, 2)
+LOAD_REG_INDEX (s20, uint32_t, uint64_t, 2)
+
+/*
+** load_s20_uint32_t_int32_t_4:
+**	add	(c[0-9]+), c0, w1, sxtw #?2
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (s20, uint32_t, int32_t, 4)
+
+/*
+** load_s20_uint32_t_uint32_t_4:
+**	add	(c[0-9]+), c0, w1, uxtw #?2
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (s20, uint32_t, uint32_t, 4)
+
+/*
+** load_s20_uint32_t_uint64_t_4:
+**	add	(c[0-9]+), c0, x1, lsl #?2
+**	ldr	s20, \[\1\]
+**	ret
+*/
+LOAD_REG_INDEX (s20, uint32_t, uint64_t, 4)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+LOAD_REG_INDEX (s20, uint32_t, int32_t, 8)
+LOAD_REG_INDEX (s20, uint32_t, uint32_t, 8)
+LOAD_REG_INDEX (s20, uint32_t, uint64_t, 8)
+
+LOAD_REG_INDEX (s20, uint32_t, int32_t, 16)
+LOAD_REG_INDEX (s20, uint32_t, uint32_t, 16)
+LOAD_REG_INDEX (s20, uint32_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-df-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-df-1.c
new file mode 100644
index 00000000000..257615dda79
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-df-1.c
@@ -0,0 +1,209 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_x10_double_m264:
+**	sub	(c[0-9]+), c0, #264
+**	str	x10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, m264)
+
+/*
+** store_x10_double_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	x10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, m257)
+
+/*
+** store_x10_double_m256:
+**	str	x10, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, m256)
+
+/*
+** store_x10_double_m248:
+**	str	x10, \[c0, #?-248\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, m248)
+
+/*
+** store_x10_double_m8:
+**	str	x10, \[c0, #?-8\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, m8)
+
+/*
+** store_x10_double_m1:
+**	str	x10, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, m1)
+
+/*
+** store_x10_double_1:
+**	str	x10, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, 1)
+
+/*
+** store_x10_double_8:
+**	str	x10, \[c0, #?8\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, 8)
+
+/*
+** store_x10_double_248:
+**	str	x10, \[c0, #?248\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, 248)
+
+/*
+** store_x10_double_255:
+**	str	x10, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, 255)
+
+/*
+** store_x10_double_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	x10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, double, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (x10, double, int32_t, 1)
+STORE_REG_INDEX (x10, double, uint32_t, 1)
+STORE_REG_INDEX (x10, double, uint64_t, 1)
+
+STORE_REG_INDEX (x10, double, int32_t, 2)
+STORE_REG_INDEX (x10, double, uint32_t, 2)
+STORE_REG_INDEX (x10, double, uint64_t, 2)
+
+STORE_REG_INDEX (x10, double, int32_t, 4)
+STORE_REG_INDEX (x10, double, uint32_t, 4)
+STORE_REG_INDEX (x10, double, uint64_t, 4)
+
+STORE_REG_INDEX (x10, double, int32_t, 8)
+STORE_REG_INDEX (x10, double, uint32_t, 8)
+STORE_REG_INDEX (x10, double, uint64_t, 8)
+
+STORE_REG_INDEX (x10, double, int32_t, 16)
+STORE_REG_INDEX (x10, double, uint32_t, 16)
+STORE_REG_INDEX (x10, double, uint64_t, 16)
+
+/*
+** store_d20_double_m264:
+**	sub	(c[0-9]+), c0, #264
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, m264)
+
+/*
+** store_d20_double_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, m257)
+
+/*
+** store_d20_double_m256:
+**	str	d20, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, m256)
+
+/*
+** store_d20_double_m248:
+**	str	d20, \[c0, #?-248\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, m248)
+
+/*
+** store_d20_double_m8:
+**	str	d20, \[c0, #?-8\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, m8)
+
+/*
+** store_d20_double_m1:
+**	str	d20, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, m1)
+
+/*
+** store_d20_double_1:
+**	str	d20, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, 1)
+
+/*
+** store_d20_double_8:
+**	str	d20, \[c0, #?8\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, 8)
+
+/*
+** store_d20_double_248:
+**	str	d20, \[c0, #?248\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, 248)
+
+/*
+** store_d20_double_255:
+**	str	d20, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, 255)
+
+/*
+** store_d20_double_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, double, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (d10, double, int32_t, 1)
+STORE_REG_INDEX (d10, double, uint32_t, 1)
+STORE_REG_INDEX (d10, double, uint64_t, 1)
+
+STORE_REG_INDEX (d10, double, int32_t, 2)
+STORE_REG_INDEX (d10, double, uint32_t, 2)
+STORE_REG_INDEX (d10, double, uint64_t, 2)
+
+STORE_REG_INDEX (d10, double, int32_t, 4)
+STORE_REG_INDEX (d10, double, uint32_t, 4)
+STORE_REG_INDEX (d10, double, uint64_t, 4)
+
+STORE_REG_INDEX (d10, double, int32_t, 8)
+STORE_REG_INDEX (d10, double, uint32_t, 8)
+STORE_REG_INDEX (d10, double, uint64_t, 8)
+
+STORE_REG_INDEX (d10, double, int32_t, 16)
+STORE_REG_INDEX (d10, double, uint32_t, 16)
+STORE_REG_INDEX (d10, double, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-df-2.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-df-2.c
new file mode 100644
index 00000000000..04546c251b6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-df-2.c
@@ -0,0 +1,108 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_zero_double_m264:
+**	sub	(c[0-9]+), c0, #264
+**	str	xzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, m264)
+
+/*
+** store_zero_double_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	xzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, m257)
+
+/*
+** store_zero_double_m256:
+**	str	xzr, \[c0, #?-256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, m256)
+
+/*
+** store_zero_double_m248:
+**	str	xzr, \[c0, #?-248\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, m248)
+
+/*
+** store_zero_double_m8:
+**	str	xzr, \[c0, #?-8\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, m8)
+
+/*
+** store_zero_double_m1:
+**	str	xzr, \[c0, #?-1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, m1)
+
+/*
+** store_zero_double_1:
+**	str	xzr, \[c0, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, 1)
+
+/*
+** store_zero_double_8:
+**	str	xzr, \[c0, #?8\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, 8)
+
+/*
+** store_zero_double_248:
+**	str	xzr, \[c0, #?248\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, 248)
+
+/*
+** store_zero_double_255:
+**	str	xzr, \[c0, #?255\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, 255)
+
+/*
+** store_zero_double_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	xzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (double, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (double, int32_t, 1)
+STORE_ZERO_INDEX (double, uint32_t, 1)
+STORE_ZERO_INDEX (double, uint64_t, 1)
+
+STORE_ZERO_INDEX (double, int32_t, 2)
+STORE_ZERO_INDEX (double, uint32_t, 2)
+STORE_ZERO_INDEX (double, uint64_t, 2)
+
+STORE_ZERO_INDEX (double, int32_t, 4)
+STORE_ZERO_INDEX (double, uint32_t, 4)
+STORE_ZERO_INDEX (double, uint64_t, 4)
+
+STORE_ZERO_INDEX (double, int32_t, 8)
+STORE_ZERO_INDEX (double, uint32_t, 8)
+STORE_ZERO_INDEX (double, uint64_t, 8)
+
+STORE_ZERO_INDEX (double, int32_t, 16)
+STORE_ZERO_INDEX (double, uint32_t, 16)
+STORE_ZERO_INDEX (double, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-di-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-di-1.c
new file mode 100644
index 00000000000..029512d105f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-di-1.c
@@ -0,0 +1,314 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_x10_uint64_t_m264:
+**	sub	(c[0-9]+), c0, #264
+**	str	x10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, m264)
+
+/*
+** store_x10_uint64_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	x10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, m257)
+
+/*
+** store_x10_uint64_t_m256:
+**	str	x10, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, m256)
+
+/*
+** store_x10_uint64_t_m248:
+**	str	x10, \[c0, #?-248\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, m248)
+
+/*
+** store_x10_uint64_t_m8:
+**	str	x10, \[c0, #?-8\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, m8)
+
+/*
+** store_x10_uint64_t_m1:
+**	str	x10, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, m1)
+
+/*
+** store_x10_uint64_t_1:
+**	str	x10, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 1)
+
+/*
+** store_x10_uint64_t_8:
+**	str	x10, \[c0, #?8\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 8)
+
+/*
+** store_x10_uint64_t_248:
+**	str	x10, \[c0, #?248\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 248)
+
+/*
+** store_x10_uint64_t_255:
+**	str	x10, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 255)
+
+/*
+** store_x10_uint64_t_256:
+**	str	x10, \[c0, #?256\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 256)
+
+/*
+** store_x10_uint64_t_257:
+**	add	(c[0-9]+), c0, #?256
+**	str	x10, \[\1, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 257)
+
+/*
+** store_x10_uint64_t_264:
+**	str	x10, \[c0, #?264\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 264)
+
+/*
+** store_x10_uint64_t_4088:
+**	str	x10, \[c0, #?4088\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 4088)
+
+/*
+** store_x10_uint64_t_4096:
+**	add	(c[0-9]+), c0, #?4096
+**	str	x10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (x10, uint64_t, 4096)
+
+/*
+** store_x10_uint64_t_int32_t_1:
+**	str	x10, \[c0, w1, sxtw\]
+**	ret
+*/
+STORE_REG_INDEX (x10, uint64_t, int32_t, 1)
+
+/*
+** store_x10_uint64_t_uint32_t_1:
+**	str	x10, \[c0, w1, uxtw\]
+**	ret
+*/
+STORE_REG_INDEX (x10, uint64_t, uint32_t, 1)
+
+/*
+** store_x10_uint64_t_uint64_t_1:
+**	str	x10, \[c0, x1\]
+**	ret
+*/
+STORE_REG_INDEX (x10, uint64_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (x10, uint64_t, int32_t, 2)
+STORE_REG_INDEX (x10, uint64_t, uint32_t, 2)
+STORE_REG_INDEX (x10, uint64_t, uint64_t, 2)
+
+STORE_REG_INDEX (x10, uint64_t, int32_t, 4)
+STORE_REG_INDEX (x10, uint64_t, uint32_t, 4)
+STORE_REG_INDEX (x10, uint64_t, uint64_t, 4)
+
+/*
+** store_x10_uint64_t_int32_t_8:
+**	str	x10, \[c0, w1, sxtw #?3\]
+**	ret
+*/
+STORE_REG_INDEX (x10, uint64_t, int32_t, 8)
+
+/*
+** store_x10_uint64_t_uint32_t_8:
+**	str	x10, \[c0, w1, uxtw #?3\]
+**	ret
+*/
+STORE_REG_INDEX (x10, uint64_t, uint32_t, 8)
+
+/*
+** store_x10_uint64_t_uint64_t_8:
+**	str	x10, \[c0, x1, lsl #?3\]
+**	ret
+*/
+STORE_REG_INDEX (x10, uint64_t, uint64_t, 8)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (x10, uint64_t, int32_t, 16)
+STORE_REG_INDEX (x10, uint64_t, uint32_t, 16)
+STORE_REG_INDEX (x10, uint64_t, uint64_t, 16)
+
+/*
+** store_d20_uint64_t_m264:
+**	sub	(c[0-9]+), c0, #264
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, m264)
+
+/*
+** store_d20_uint64_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, m257)
+
+/*
+** store_d20_uint64_t_m256:
+**	str	d20, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, m256)
+
+/*
+** store_d20_uint64_t_m248:
+**	str	d20, \[c0, #?-248\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, m248)
+
+/*
+** store_d20_uint64_t_m8:
+**	str	d20, \[c0, #?-8\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, m8)
+
+/*
+** store_d20_uint64_t_m1:
+**	str	d20, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, m1)
+
+/*
+** store_d20_uint64_t_1:
+**	str	d20, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, 1)
+
+/*
+** store_d20_uint64_t_8:
+**	str	d20, \[c0, #?8\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, 8)
+
+/*
+** store_d20_uint64_t_248:
+**	str	d20, \[c0, #?248\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, 248)
+
+/*
+** store_d20_uint64_t_255:
+**	str	d20, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, 255)
+
+/*
+** store_d20_uint64_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (d20, uint64_t, 256)
+
+/*
+** store_d20_uint64_t_int32_t_1:
+**	add	(c[0-9]+), c0, w1, sxtw
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (d20, uint64_t, int32_t, 1)
+
+/*
+** store_d20_uint64_t_uint32_t_1:
+**	add	(c[0-9]+), c0, w1, uxtw
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (d20, uint64_t, uint32_t, 1)
+
+/*
+** store_d20_uint64_t_uint64_t_1:
+**	add	(c[0-9]+), c0, x1
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (d20, uint64_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (d20, uint64_t, int32_t, 2)
+STORE_REG_INDEX (d20, uint64_t, uint32_t, 2)
+STORE_REG_INDEX (d20, uint64_t, uint64_t, 2)
+
+STORE_REG_INDEX (d20, uint64_t, int32_t, 4)
+STORE_REG_INDEX (d20, uint64_t, uint32_t, 4)
+STORE_REG_INDEX (d20, uint64_t, uint64_t, 4)
+
+/*
+** store_d20_uint64_t_int32_t_8:
+**	add	(c[0-9]+), c0, w1, sxtw #?3
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (d20, uint64_t, int32_t, 8)
+
+/*
+** store_d20_uint64_t_uint32_t_8:
+**	add	(c[0-9]+), c0, w1, uxtw #?3
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (d20, uint64_t, uint32_t, 8)
+
+/*
+** store_d20_uint64_t_uint64_t_8:
+**	add	(c[0-9]+), c0, x1, lsl #?3
+**	str	d20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (d20, uint64_t, uint64_t, 8)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (d20, uint64_t, int32_t, 16)
+STORE_REG_INDEX (d20, uint64_t, uint32_t, 16)
+STORE_REG_INDEX (d20, uint64_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-di-2.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-di-2.c
new file mode 100644
index 00000000000..1e4b75c14ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-di-2.c
@@ -0,0 +1,172 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_zero_uint64_t_m264:
+**	sub	(c[0-9]+), c0, #264
+**	str	xzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, m264)
+
+/*
+** store_zero_uint64_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	xzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, m257)
+
+/*
+** store_zero_uint64_t_m256:
+**	str	xzr, \[c0, #?-256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, m256)
+
+/*
+** store_zero_uint64_t_m248:
+**	str	xzr, \[c0, #?-248\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, m248)
+
+/*
+** store_zero_uint64_t_m8:
+**	str	xzr, \[c0, #?-8\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, m8)
+
+/*
+** store_zero_uint64_t_m1:
+**	str	xzr, \[c0, #?-1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, m1)
+
+/*
+** store_zero_uint64_t_1:
+**	str	xzr, \[c0, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 1)
+
+/*
+** store_zero_uint64_t_8:
+**	str	xzr, \[c0, #?8\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 8)
+
+/*
+** store_zero_uint64_t_248:
+**	str	xzr, \[c0, #?248\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 248)
+
+/*
+** store_zero_uint64_t_255:
+**	str	xzr, \[c0, #?255\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 255)
+
+/*
+** store_zero_uint64_t_256:
+**	str	xzr, \[c0, #?256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 256)
+
+/*
+** store_zero_uint64_t_257:
+**	add	(c[0-9]+), c0, #?256
+**	str	xzr, \[\1, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 257)
+
+/*
+** store_zero_uint64_t_264:
+**	str	xzr, \[c0, #?264\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 264)
+
+/*
+** store_zero_uint64_t_4088:
+**	str	xzr, \[c0, #?4088\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 4088)
+
+/*
+** store_zero_uint64_t_4096:
+**	add	(c[0-9]+), c0, #?4096
+**	str	xzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint64_t, 4096)
+
+/*
+** store_zero_uint64_t_int32_t_1:
+**	str	xzr, \[c0, w1, sxtw\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint64_t, int32_t, 1)
+
+/*
+** store_zero_uint64_t_uint32_t_1:
+**	str	xzr, \[c0, w1, uxtw\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint64_t, uint32_t, 1)
+
+/*
+** store_zero_uint64_t_uint64_t_1:
+**	str	xzr, \[c0, x1\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint64_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (uint64_t, int32_t, 2)
+STORE_ZERO_INDEX (uint64_t, uint32_t, 2)
+STORE_ZERO_INDEX (uint64_t, uint64_t, 2)
+
+STORE_ZERO_INDEX (uint64_t, int32_t, 4)
+STORE_ZERO_INDEX (uint64_t, uint32_t, 4)
+STORE_ZERO_INDEX (uint64_t, uint64_t, 4)
+
+/*
+** store_zero_uint64_t_int32_t_8:
+**	str	xzr, \[c0, w1, sxtw #?3\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint64_t, int32_t, 8)
+
+/*
+** store_zero_uint64_t_uint32_t_8:
+**	str	xzr, \[c0, w1, uxtw #?3\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint64_t, uint32_t, 8)
+
+/*
+** store_zero_uint64_t_uint64_t_8:
+**	str	xzr, \[c0, x1, lsl #?3\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint64_t, uint64_t, 8)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (uint64_t, int32_t, 16)
+STORE_ZERO_INDEX (uint64_t, uint32_t, 16)
+STORE_ZERO_INDEX (uint64_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hf-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hf-1.c
new file mode 100644
index 00000000000..5452854f0a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hf-1.c
@@ -0,0 +1,211 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+typedef __fp16 fp16;
+
+/*
+** store_w10_fp16_m258:
+**	sub	(c[0-9]+), c0, #258
+**	strh	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, m258)
+
+/*
+** store_w10_fp16_m257:
+**	sub	(c[0-9]+), c0, #257
+**	strh	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, m257)
+
+/*
+** store_w10_fp16_m256:
+**	strh	w10, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, m256)
+
+/*
+** store_w10_fp16_m254:
+**	strh	w10, \[c0, #?-254\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, m254)
+
+/*
+** store_w10_fp16_m2:
+**	strh	w10, \[c0, #?-2\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, m2)
+
+/*
+** store_w10_fp16_m1:
+**	strh	w10, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, m1)
+
+/*
+** store_w10_fp16_1:
+**	strh	w10, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, 1)
+
+/*
+** store_w10_fp16_2:
+**	strh	w10, \[c0, #?2\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, 2)
+
+/*
+** store_w10_fp16_254:
+**	strh	w10, \[c0, #?254\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, 254)
+
+/*
+** store_w10_fp16_255:
+**	strh	w10, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, 255)
+
+/*
+** store_w10_fp16_256:
+**	add	(c[0-9]+), c0, #?256
+**	strh	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, fp16, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (w10, fp16, int32_t, 1)
+STORE_REG_INDEX (w10, fp16, uint32_t, 1)
+STORE_REG_INDEX (w10, fp16, uint64_t, 1)
+
+STORE_REG_INDEX (w10, fp16, int32_t, 2)
+STORE_REG_INDEX (w10, fp16, uint32_t, 2)
+STORE_REG_INDEX (w10, fp16, uint64_t, 2)
+
+STORE_REG_INDEX (w10, fp16, int32_t, 4)
+STORE_REG_INDEX (w10, fp16, uint32_t, 4)
+STORE_REG_INDEX (w10, fp16, uint64_t, 4)
+
+STORE_REG_INDEX (w10, fp16, int32_t, 8)
+STORE_REG_INDEX (w10, fp16, uint32_t, 8)
+STORE_REG_INDEX (w10, fp16, uint64_t, 8)
+
+STORE_REG_INDEX (w10, fp16, int32_t, 16)
+STORE_REG_INDEX (w10, fp16, uint32_t, 16)
+STORE_REG_INDEX (w10, fp16, uint64_t, 16)
+
+/*
+** store_h20_fp16_m258:
+**	sub	(c[0-9]+), c0, #258
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, m258)
+
+/*
+** store_h20_fp16_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, m257)
+
+/*
+** store_h20_fp16_m256:
+**	str	h20, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, m256)
+
+/*
+** store_h20_fp16_m254:
+**	str	h20, \[c0, #?-254\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, m254)
+
+/*
+** store_h20_fp16_m2:
+**	str	h20, \[c0, #?-2\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, m2)
+
+/*
+** store_h20_fp16_m1:
+**	str	h20, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, m1)
+
+/*
+** store_h20_fp16_1:
+**	str	h20, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, 1)
+
+/*
+** store_h20_fp16_2:
+**	str	h20, \[c0, #?2\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, 2)
+
+/*
+** store_h20_fp16_254:
+**	str	h20, \[c0, #?254\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, 254)
+
+/*
+** store_h20_fp16_255:
+**	str	h20, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, 255)
+
+/*
+** store_h20_fp16_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, fp16, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (h20, fp16, int32_t, 1)
+STORE_REG_INDEX (h20, fp16, uint32_t, 1)
+STORE_REG_INDEX (h20, fp16, uint64_t, 1)
+
+STORE_REG_INDEX (h20, fp16, int32_t, 2)
+STORE_REG_INDEX (h20, fp16, uint32_t, 2)
+STORE_REG_INDEX (h20, fp16, uint64_t, 2)
+
+STORE_REG_INDEX (h20, fp16, int32_t, 4)
+STORE_REG_INDEX (h20, fp16, uint32_t, 4)
+STORE_REG_INDEX (h20, fp16, uint64_t, 4)
+
+STORE_REG_INDEX (h20, fp16, int32_t, 8)
+STORE_REG_INDEX (h20, fp16, uint32_t, 8)
+STORE_REG_INDEX (h20, fp16, uint64_t, 8)
+
+STORE_REG_INDEX (h20, fp16, int32_t, 16)
+STORE_REG_INDEX (h20, fp16, uint32_t, 16)
+STORE_REG_INDEX (h20, fp16, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hf-2.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hf-2.c
new file mode 100644
index 00000000000..44046159209
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hf-2.c
@@ -0,0 +1,110 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+typedef __fp16 fp16;
+
+/*
+** store_zero_fp16_m258:
+**	sub	(c[0-9]+), c0, #258
+**	strh	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, m258)
+
+/*
+** store_zero_fp16_m257:
+**	sub	(c[0-9]+), c0, #257
+**	strh	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, m257)
+
+/*
+** store_zero_fp16_m256:
+**	strh	wzr, \[c0, #?-256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, m256)
+
+/*
+** store_zero_fp16_m254:
+**	strh	wzr, \[c0, #?-254\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, m254)
+
+/*
+** store_zero_fp16_m2:
+**	strh	wzr, \[c0, #?-2\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, m2)
+
+/*
+** store_zero_fp16_m1:
+**	strh	wzr, \[c0, #?-1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, m1)
+
+/*
+** store_zero_fp16_1:
+**	strh	wzr, \[c0, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, 1)
+
+/*
+** store_zero_fp16_2:
+**	strh	wzr, \[c0, #?2\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, 2)
+
+/*
+** store_zero_fp16_254:
+**	strh	wzr, \[c0, #?254\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, 254)
+
+/*
+** store_zero_fp16_255:
+**	strh	wzr, \[c0, #?255\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, 255)
+
+/*
+** store_zero_fp16_256:
+**	add	(c[0-9]+), c0, #?256
+**	strh	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (fp16, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (fp16, int32_t, 1)
+STORE_ZERO_INDEX (fp16, uint32_t, 1)
+STORE_ZERO_INDEX (fp16, uint64_t, 1)
+
+STORE_ZERO_INDEX (fp16, int32_t, 2)
+STORE_ZERO_INDEX (fp16, uint32_t, 2)
+STORE_ZERO_INDEX (fp16, uint64_t, 2)
+
+STORE_ZERO_INDEX (fp16, int32_t, 4)
+STORE_ZERO_INDEX (fp16, uint32_t, 4)
+STORE_ZERO_INDEX (fp16, uint64_t, 4)
+
+STORE_ZERO_INDEX (fp16, int32_t, 8)
+STORE_ZERO_INDEX (fp16, uint32_t, 8)
+STORE_ZERO_INDEX (fp16, uint64_t, 8)
+
+STORE_ZERO_INDEX (fp16, int32_t, 16)
+STORE_ZERO_INDEX (fp16, uint32_t, 16)
+STORE_ZERO_INDEX (fp16, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hi-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hi-1.c
new file mode 100644
index 00000000000..aa94a0d0891
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hi-1.c
@@ -0,0 +1,283 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_w10_uint16_t_m258:
+**	sub	(c[0-9]+), c0, #258
+**	strh	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, m258)
+
+/*
+** store_w10_uint16_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	strh	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, m257)
+
+/*
+** store_w10_uint16_t_m256:
+**	strh	w10, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, m256)
+
+/*
+** store_w10_uint16_t_m254:
+**	strh	w10, \[c0, #?-254\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, m254)
+
+/*
+** store_w10_uint16_t_m2:
+**	strh	w10, \[c0, #?-2\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, m2)
+
+/*
+** store_w10_uint16_t_m1:
+**	strh	w10, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, m1)
+
+/*
+** store_w10_uint16_t_1:
+**	strh	w10, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, 1)
+
+/*
+** store_w10_uint16_t_2:
+**	strh	w10, \[c0, #?2\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, 2)
+
+/*
+** store_w10_uint16_t_254:
+**	strh	w10, \[c0, #?254\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, 254)
+
+/*
+** store_w10_uint16_t_255:
+**	strh	w10, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, 255)
+
+/*
+** store_w10_uint16_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	strh	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint16_t, 256)
+
+/*
+** store_w10_uint16_t_int32_t_1:
+**	strh	w10, \[c0, w1, sxtw\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint16_t, int32_t, 1)
+
+/*
+** store_w10_uint16_t_uint32_t_1:
+**	strh	w10, \[c0, w1, uxtw\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint16_t, uint32_t, 1)
+
+/*
+** store_w10_uint16_t_uint64_t_1:
+**	strh	w10, \[c0, x1\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint16_t, uint64_t, 1)
+
+/*
+** store_w10_uint16_t_int32_t_2:
+**	strh	w10, \[c0, w1, sxtw #?1\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint16_t, int32_t, 2)
+
+/*
+** store_w10_uint16_t_uint32_t_2:
+**	strh	w10, \[c0, w1, uxtw #?1\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint16_t, uint32_t, 2)
+
+/*
+** store_w10_uint16_t_uint64_t_2:
+**	strh	w10, \[c0, x1, lsl #?1\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint16_t, uint64_t, 2)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (w10, uint16_t, int32_t, 4)
+STORE_REG_INDEX (w10, uint16_t, uint32_t, 4)
+STORE_REG_INDEX (w10, uint16_t, uint64_t, 4)
+
+STORE_REG_INDEX (w10, uint16_t, int32_t, 8)
+STORE_REG_INDEX (w10, uint16_t, uint32_t, 8)
+STORE_REG_INDEX (w10, uint16_t, uint64_t, 8)
+
+STORE_REG_INDEX (w10, uint16_t, int32_t, 16)
+STORE_REG_INDEX (w10, uint16_t, uint32_t, 16)
+STORE_REG_INDEX (w10, uint16_t, uint64_t, 16)
+
+/*
+** store_h20_uint16_t_m258:
+**	sub	(c[0-9]+), c0, #258
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, m258)
+
+/*
+** store_h20_uint16_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, m257)
+
+/*
+** store_h20_uint16_t_m256:
+**	str	h20, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, m256)
+
+/*
+** store_h20_uint16_t_m254:
+**	str	h20, \[c0, #?-254\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, m254)
+
+/*
+** store_h20_uint16_t_m2:
+**	str	h20, \[c0, #?-2\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, m2)
+
+/*
+** store_h20_uint16_t_m1:
+**	str	h20, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, m1)
+
+/*
+** store_h20_uint16_t_1:
+**	str	h20, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, 1)
+
+/*
+** store_h20_uint16_t_2:
+**	str	h20, \[c0, #?2\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, 2)
+
+/*
+** store_h20_uint16_t_254:
+**	str	h20, \[c0, #?254\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, 254)
+
+/*
+** store_h20_uint16_t_255:
+**	str	h20, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, 255)
+
+/*
+** store_h20_uint16_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (h20, uint16_t, 256)
+
+/*
+** store_h20_uint16_t_int32_t_1:
+**	add	(c[0-9]+), c0, w1, sxtw
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (h20, uint16_t, int32_t, 1)
+
+/*
+** store_h20_uint16_t_uint32_t_1:
+**	add	(c[0-9]+), c0, w1, uxtw
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (h20, uint16_t, uint32_t, 1)
+
+/*
+** store_h20_uint16_t_uint64_t_1:
+**	add	(c[0-9]+), c0, x1
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (h20, uint16_t, uint64_t, 1)
+
+/*
+** store_h20_uint16_t_int32_t_2:
+**	add	(c[0-9]+), c0, w1, sxtw #?1
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (h20, uint16_t, int32_t, 2)
+
+/*
+** store_h20_uint16_t_uint32_t_2:
+**	add	(c[0-9]+), c0, w1, uxtw #?1
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (h20, uint16_t, uint32_t, 2)
+
+/*
+** store_h20_uint16_t_uint64_t_2:
+**	add	(c[0-9]+), c0, x1, lsl #?1
+**	str	h20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (h20, uint16_t, uint64_t, 2)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (h20, uint16_t, int32_t, 4)
+STORE_REG_INDEX (h20, uint16_t, uint32_t, 4)
+STORE_REG_INDEX (h20, uint16_t, uint64_t, 4)
+
+STORE_REG_INDEX (h20, uint16_t, int32_t, 8)
+STORE_REG_INDEX (h20, uint16_t, uint32_t, 8)
+STORE_REG_INDEX (h20, uint16_t, uint64_t, 8)
+
+STORE_REG_INDEX (h20, uint16_t, int32_t, 16)
+STORE_REG_INDEX (h20, uint16_t, uint32_t, 16)
+STORE_REG_INDEX (h20, uint16_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hi-2.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hi-2.c
new file mode 100644
index 00000000000..dbdce413404
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-hi-2.c
@@ -0,0 +1,142 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_zero_uint16_t_m258:
+**	sub	(c[0-9]+), c0, #258
+**	strh	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, m258)
+
+/*
+** store_zero_uint16_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	strh	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, m257)
+
+/*
+** store_zero_uint16_t_m256:
+**	strh	wzr, \[c0, #?-256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, m256)
+
+/*
+** store_zero_uint16_t_m254:
+**	strh	wzr, \[c0, #?-254\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, m254)
+
+/*
+** store_zero_uint16_t_m2:
+**	strh	wzr, \[c0, #?-2\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, m2)
+
+/*
+** store_zero_uint16_t_m1:
+**	strh	wzr, \[c0, #?-1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, m1)
+
+/*
+** store_zero_uint16_t_1:
+**	strh	wzr, \[c0, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, 1)
+
+/*
+** store_zero_uint16_t_2:
+**	strh	wzr, \[c0, #?2\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, 2)
+
+/*
+** store_zero_uint16_t_254:
+**	strh	wzr, \[c0, #?254\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, 254)
+
+/*
+** store_zero_uint16_t_255:
+**	strh	wzr, \[c0, #?255\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, 255)
+
+/*
+** store_zero_uint16_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	strh	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint16_t, 256)
+
+/*
+** store_zero_uint16_t_int32_t_1:
+**	strh	wzr, \[c0, w1, sxtw\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint16_t, int32_t, 1)
+
+/*
+** store_zero_uint16_t_uint32_t_1:
+**	strh	wzr, \[c0, w1, uxtw\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint16_t, uint32_t, 1)
+
+/*
+** store_zero_uint16_t_uint64_t_1:
+**	strh	wzr, \[c0, x1\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint16_t, uint64_t, 1)
+
+/*
+** store_zero_uint16_t_int32_t_2:
+**	strh	wzr, \[c0, w1, sxtw #?1\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint16_t, int32_t, 2)
+
+/*
+** store_zero_uint16_t_uint32_t_2:
+**	strh	wzr, \[c0, w1, uxtw #?1\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint16_t, uint32_t, 2)
+
+/*
+** store_zero_uint16_t_uint64_t_2:
+**	strh	wzr, \[c0, x1, lsl #?1\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint16_t, uint64_t, 2)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (uint16_t, int32_t, 4)
+STORE_ZERO_INDEX (uint16_t, uint32_t, 4)
+STORE_ZERO_INDEX (uint16_t, uint64_t, 4)
+
+STORE_ZERO_INDEX (uint16_t, int32_t, 8)
+STORE_ZERO_INDEX (uint16_t, uint32_t, 8)
+STORE_ZERO_INDEX (uint16_t, uint64_t, 8)
+
+STORE_ZERO_INDEX (uint16_t, int32_t, 16)
+STORE_ZERO_INDEX (uint16_t, uint32_t, 16)
+STORE_ZERO_INDEX (uint16_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-qi-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-qi-1.c
new file mode 100644
index 00000000000..b330792ab87
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-qi-1.c
@@ -0,0 +1,202 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_w10_uint8_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	strb	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, m257)
+
+/*
+** store_w10_uint8_t_m256:
+**	strb	w10, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, m256)
+
+/*
+** store_w10_uint8_t_m255:
+**	strb	w10, \[c0, #?-255\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, m255)
+
+/*
+** store_w10_uint8_t_m1:
+**	strb	w10, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, m1)
+
+/*
+** store_w10_uint8_t_1:
+**	strb	w10, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, 1)
+
+/*
+** store_w10_uint8_t_255:
+**	strb	w10, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, 255)
+
+/*
+** store_w10_uint8_t_256:
+**	strb	w10, \[c0, #?256\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, 256)
+
+/*
+** store_w10_uint8_t_511:
+**	strb	w10, \[c0, #?511\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, 511)
+
+/*
+** store_w10_uint8_t_512:
+**	add	(c[0-9]+), c0, #?512
+**	strb	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint8_t, 512)
+
+/*
+** store_w10_uint8_t_int32_t_1:
+**	strb	w10, \[c0, w1, sxtw\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint8_t, int32_t, 1)
+
+/*
+** store_w10_uint8_t_uint32_t_1:
+**	strb	w10, \[c0, w1, uxtw\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint8_t, uint32_t, 1)
+
+/*
+** store_w10_uint8_t_uint64_t_1:
+**	strb	w10, \[c0, x1\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint8_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (w10, uint8_t, int32_t, 2)
+STORE_REG_INDEX (w10, uint8_t, uint32_t, 2)
+STORE_REG_INDEX (w10, uint8_t, uint64_t, 2)
+
+STORE_REG_INDEX (w10, uint8_t, int32_t, 4)
+STORE_REG_INDEX (w10, uint8_t, uint32_t, 4)
+STORE_REG_INDEX (w10, uint8_t, uint64_t, 4)
+
+STORE_REG_INDEX (w10, uint8_t, int32_t, 8)
+STORE_REG_INDEX (w10, uint8_t, uint32_t, 8)
+STORE_REG_INDEX (w10, uint8_t, uint64_t, 8)
+
+STORE_REG_INDEX (w10, uint8_t, int32_t, 16)
+STORE_REG_INDEX (w10, uint8_t, uint32_t, 16)
+STORE_REG_INDEX (w10, uint8_t, uint64_t, 16)
+
+/*
+** store_b20_uint8_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	b20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (b20, uint8_t, m257)
+
+/*
+** store_b20_uint8_t_m256:
+**	str	b20, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (b20, uint8_t, m256)
+
+/*
+** store_b20_uint8_t_m255:
+**	str	b20, \[c0, #?-255\]
+**	ret
+*/
+STORE_REG_OFFSET (b20, uint8_t, m255)
+
+/*
+** store_b20_uint8_t_m1:
+**	str	b20, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (b20, uint8_t, m1)
+
+/*
+** store_b20_uint8_t_1:
+**	str	b20, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (b20, uint8_t, 1)
+
+/*
+** store_b20_uint8_t_255:
+**	str	b20, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (b20, uint8_t, 255)
+
+/*
+** store_b20_uint8_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	b20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (b20, uint8_t, 256)
+
+/*
+** store_b20_uint8_t_int32_t_1:
+**	add	(c[0-9]+), c0, w1, sxtw
+**	str	b20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (b20, uint8_t, int32_t, 1)
+
+/*
+** store_b20_uint8_t_uint32_t_1:
+**	add	(c[0-9]+), c0, w1, uxtw
+**	str	b20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (b20, uint8_t, uint32_t, 1)
+
+/*
+** store_b20_uint8_t_uint64_t_1:
+**	add	(c[0-9]+), c0, x1
+**	str	b20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (b20, uint8_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (b20, uint8_t, int32_t, 2)
+STORE_REG_INDEX (b20, uint8_t, uint32_t, 2)
+STORE_REG_INDEX (b20, uint8_t, uint64_t, 2)
+
+STORE_REG_INDEX (b20, uint8_t, int32_t, 4)
+STORE_REG_INDEX (b20, uint8_t, uint32_t, 4)
+STORE_REG_INDEX (b20, uint8_t, uint64_t, 4)
+
+STORE_REG_INDEX (b20, uint8_t, int32_t, 8)
+STORE_REG_INDEX (b20, uint8_t, uint32_t, 8)
+STORE_REG_INDEX (b20, uint8_t, uint64_t, 8)
+
+STORE_REG_INDEX (b20, uint8_t, int32_t, 16)
+STORE_REG_INDEX (b20, uint8_t, uint32_t, 16)
+STORE_REG_INDEX (b20, uint8_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-qi-2.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-qi-2.c
new file mode 100644
index 00000000000..0ddd6f903dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-qi-2.c
@@ -0,0 +1,110 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_zero_uint8_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	strb	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, m257)
+
+/*
+** store_zero_uint8_t_m256:
+**	strb	wzr, \[c0, #?-256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, m256)
+
+/*
+** store_zero_uint8_t_m255:
+**	strb	wzr, \[c0, #?-255\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, m255)
+
+/*
+** store_zero_uint8_t_m1:
+**	strb	wzr, \[c0, #?-1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, m1)
+
+/*
+** store_zero_uint8_t_1:
+**	strb	wzr, \[c0, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, 1)
+
+/*
+** store_zero_uint8_t_255:
+**	strb	wzr, \[c0, #?255\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, 255)
+
+/*
+** store_zero_uint8_t_256:
+**	strb	wzr, \[c0, #?256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, 256)
+
+/*
+** store_zero_uint8_t_511:
+**	strb	wzr, \[c0, #?511\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, 511)
+
+/*
+** store_zero_uint8_t_512:
+**	add	(c[0-9]+), c0, #?512
+**	strb	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint8_t, 512)
+
+/*
+** store_zero_uint8_t_int32_t_1:
+**	strb	wzr, \[c0, w1, sxtw\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint8_t, int32_t, 1)
+
+/*
+** store_zero_uint8_t_uint32_t_1:
+**	strb	wzr, \[c0, w1, uxtw\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint8_t, uint32_t, 1)
+
+/*
+** store_zero_uint8_t_uint64_t_1:
+**	strb	wzr, \[c0, x1\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint8_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (uint8_t, int32_t, 2)
+STORE_ZERO_INDEX (uint8_t, uint32_t, 2)
+STORE_ZERO_INDEX (uint8_t, uint64_t, 2)
+
+STORE_ZERO_INDEX (uint8_t, int32_t, 4)
+STORE_ZERO_INDEX (uint8_t, uint32_t, 4)
+STORE_ZERO_INDEX (uint8_t, uint64_t, 4)
+
+STORE_ZERO_INDEX (uint8_t, int32_t, 8)
+STORE_ZERO_INDEX (uint8_t, uint32_t, 8)
+STORE_ZERO_INDEX (uint8_t, uint64_t, 8)
+
+STORE_ZERO_INDEX (uint8_t, int32_t, 16)
+STORE_ZERO_INDEX (uint8_t, uint32_t, 16)
+STORE_ZERO_INDEX (uint8_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-sf-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-sf-1.c
new file mode 100644
index 00000000000..eb0277212eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-sf-1.c
@@ -0,0 +1,209 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_w10_float_m260:
+**	sub	(c[0-9]+), c0, #260
+**	str	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, m260)
+
+/*
+** store_w10_float_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, m257)
+
+/*
+** store_w10_float_m256:
+**	str	w10, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, m256)
+
+/*
+** store_w10_float_m252:
+**	str	w10, \[c0, #?-252\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, m252)
+
+/*
+** store_w10_float_m4:
+**	str	w10, \[c0, #?-4\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, m4)
+
+/*
+** store_w10_float_m1:
+**	str	w10, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, m1)
+
+/*
+** store_w10_float_1:
+**	str	w10, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, 1)
+
+/*
+** store_w10_float_4:
+**	str	w10, \[c0, #?4\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, 4)
+
+/*
+** store_w10_float_252:
+**	str	w10, \[c0, #?252\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, 252)
+
+/*
+** store_w10_float_255:
+**	str	w10, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, 255)
+
+/*
+** store_w10_float_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, float, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (w10, float, int32_t, 1)
+STORE_REG_INDEX (w10, float, uint32_t, 1)
+STORE_REG_INDEX (w10, float, uint64_t, 1)
+
+STORE_REG_INDEX (w10, float, int32_t, 2)
+STORE_REG_INDEX (w10, float, uint32_t, 2)
+STORE_REG_INDEX (w10, float, uint64_t, 2)
+
+STORE_REG_INDEX (w10, float, int32_t, 4)
+STORE_REG_INDEX (w10, float, uint32_t, 4)
+STORE_REG_INDEX (w10, float, uint64_t, 4)
+
+STORE_REG_INDEX (w10, float, int32_t, 8)
+STORE_REG_INDEX (w10, float, uint32_t, 8)
+STORE_REG_INDEX (w10, float, uint64_t, 8)
+
+STORE_REG_INDEX (w10, float, int32_t, 16)
+STORE_REG_INDEX (w10, float, uint32_t, 16)
+STORE_REG_INDEX (w10, float, uint64_t, 16)
+
+/*
+** store_s20_float_m260:
+**	sub	(c[0-9]+), c0, #260
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, m260)
+
+/*
+** store_s20_float_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, m257)
+
+/*
+** store_s20_float_m256:
+**	str	s20, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, m256)
+
+/*
+** store_s20_float_m252:
+**	str	s20, \[c0, #?-252\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, m252)
+
+/*
+** store_s20_float_m4:
+**	str	s20, \[c0, #?-4\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, m4)
+
+/*
+** store_s20_float_m1:
+**	str	s20, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, m1)
+
+/*
+** store_s20_float_1:
+**	str	s20, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, 1)
+
+/*
+** store_s20_float_4:
+**	str	s20, \[c0, #?4\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, 4)
+
+/*
+** store_s20_float_252:
+**	str	s20, \[c0, #?252\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, 252)
+
+/*
+** store_s20_float_255:
+**	str	s20, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, 255)
+
+/*
+** store_s20_float_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, float, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (s20, float, int32_t, 1)
+STORE_REG_INDEX (s20, float, uint32_t, 1)
+STORE_REG_INDEX (s20, float, uint64_t, 1)
+
+STORE_REG_INDEX (s20, float, int32_t, 2)
+STORE_REG_INDEX (s20, float, uint32_t, 2)
+STORE_REG_INDEX (s20, float, uint64_t, 2)
+
+STORE_REG_INDEX (s20, float, int32_t, 4)
+STORE_REG_INDEX (s20, float, uint32_t, 4)
+STORE_REG_INDEX (s20, float, uint64_t, 4)
+
+STORE_REG_INDEX (s20, float, int32_t, 8)
+STORE_REG_INDEX (s20, float, uint32_t, 8)
+STORE_REG_INDEX (s20, float, uint64_t, 8)
+
+STORE_REG_INDEX (s20, float, int32_t, 16)
+STORE_REG_INDEX (s20, float, uint32_t, 16)
+STORE_REG_INDEX (s20, float, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-sf-2.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-sf-2.c
new file mode 100644
index 00000000000..ace58f464d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-sf-2.c
@@ -0,0 +1,108 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_zero_float_m260:
+**	sub	(c[0-9]+), c0, #260
+**	str	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, m260)
+
+/*
+** store_zero_float_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, m257)
+
+/*
+** store_zero_float_m256:
+**	str	wzr, \[c0, #?-256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, m256)
+
+/*
+** store_zero_float_m252:
+**	str	wzr, \[c0, #?-252\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, m252)
+
+/*
+** store_zero_float_m4:
+**	str	wzr, \[c0, #?-4\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, m4)
+
+/*
+** store_zero_float_m1:
+**	str	wzr, \[c0, #?-1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, m1)
+
+/*
+** store_zero_float_1:
+**	str	wzr, \[c0, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, 1)
+
+/*
+** store_zero_float_4:
+**	str	wzr, \[c0, #?4\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, 4)
+
+/*
+** store_zero_float_252:
+**	str	wzr, \[c0, #?252\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, 252)
+
+/*
+** store_zero_float_255:
+**	str	wzr, \[c0, #?255\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, 255)
+
+/*
+** store_zero_float_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (float, 256)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (float, int32_t, 1)
+STORE_ZERO_INDEX (float, uint32_t, 1)
+STORE_ZERO_INDEX (float, uint64_t, 1)
+
+STORE_ZERO_INDEX (float, int32_t, 2)
+STORE_ZERO_INDEX (float, uint32_t, 2)
+STORE_ZERO_INDEX (float, uint64_t, 2)
+
+STORE_ZERO_INDEX (float, int32_t, 4)
+STORE_ZERO_INDEX (float, uint32_t, 4)
+STORE_ZERO_INDEX (float, uint64_t, 4)
+
+STORE_ZERO_INDEX (float, int32_t, 8)
+STORE_ZERO_INDEX (float, uint32_t, 8)
+STORE_ZERO_INDEX (float, uint64_t, 8)
+
+STORE_ZERO_INDEX (float, int32_t, 16)
+STORE_ZERO_INDEX (float, uint32_t, 16)
+STORE_ZERO_INDEX (float, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-si-1.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-si-1.c
new file mode 100644
index 00000000000..1df7b42952a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-si-1.c
@@ -0,0 +1,314 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_w10_uint32_t_m260:
+**	sub	(c[0-9]+), c0, #260
+**	str	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, m260)
+
+/*
+** store_w10_uint32_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, m257)
+
+/*
+** store_w10_uint32_t_m256:
+**	str	w10, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, m256)
+
+/*
+** store_w10_uint32_t_m252:
+**	str	w10, \[c0, #?-252\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, m252)
+
+/*
+** store_w10_uint32_t_m4:
+**	str	w10, \[c0, #?-4\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, m4)
+
+/*
+** store_w10_uint32_t_m1:
+**	str	w10, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, m1)
+
+/*
+** store_w10_uint32_t_1:
+**	str	w10, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 1)
+
+/*
+** store_w10_uint32_t_4:
+**	str	w10, \[c0, #?4\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 4)
+
+/*
+** store_w10_uint32_t_252:
+**	str	w10, \[c0, #?252\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 252)
+
+/*
+** store_w10_uint32_t_255:
+**	str	w10, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 255)
+
+/*
+** store_w10_uint32_t_256:
+**	str	w10, \[c0, #?256\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 256)
+
+/*
+** store_w10_uint32_t_257:
+**	add	(c[0-9]+), c0, #?256
+**	str	w10, \[\1, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 257)
+
+/*
+** store_w10_uint32_t_260:
+**	str	w10, \[c0, #?260\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 260)
+
+/*
+** store_w10_uint32_t_2044:
+**	str	w10, \[c0, #?2044\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 2044)
+
+/*
+** store_w10_uint32_t_2048:
+**	add	(c[0-9]+), c0, #?2048
+**	str	w10, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (w10, uint32_t, 2048)
+
+/*
+** store_w10_uint32_t_int32_t_1:
+**	str	w10, \[c0, w1, sxtw\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint32_t, int32_t, 1)
+
+/*
+** store_w10_uint32_t_uint32_t_1:
+**	str	w10, \[c0, w1, uxtw\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint32_t, uint32_t, 1)
+
+/*
+** store_w10_uint32_t_uint64_t_1:
+**	str	w10, \[c0, x1\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint32_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (w10, uint32_t, int32_t, 2)
+STORE_REG_INDEX (w10, uint32_t, uint32_t, 2)
+STORE_REG_INDEX (w10, uint32_t, uint64_t, 2)
+
+/*
+** store_w10_uint32_t_int32_t_4:
+**	str	w10, \[c0, w1, sxtw #?2\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint32_t, int32_t, 4)
+
+/*
+** store_w10_uint32_t_uint32_t_4:
+**	str	w10, \[c0, w1, uxtw #?2\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint32_t, uint32_t, 4)
+
+/*
+** store_w10_uint32_t_uint64_t_4:
+**	str	w10, \[c0, x1, lsl #?2\]
+**	ret
+*/
+STORE_REG_INDEX (w10, uint32_t, uint64_t, 4)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (w10, uint32_t, int32_t, 8)
+STORE_REG_INDEX (w10, uint32_t, uint32_t, 8)
+STORE_REG_INDEX (w10, uint32_t, uint64_t, 8)
+
+STORE_REG_INDEX (w10, uint32_t, int32_t, 16)
+STORE_REG_INDEX (w10, uint32_t, uint32_t, 16)
+STORE_REG_INDEX (w10, uint32_t, uint64_t, 16)
+
+/*
+** store_s20_uint32_t_m260:
+**	sub	(c[0-9]+), c0, #260
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, m260)
+
+/*
+** store_s20_uint32_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, m257)
+
+/*
+** store_s20_uint32_t_m256:
+**	str	s20, \[c0, #?-256\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, m256)
+
+/*
+** store_s20_uint32_t_m252:
+**	str	s20, \[c0, #?-252\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, m252)
+
+/*
+** store_s20_uint32_t_m4:
+**	str	s20, \[c0, #?-4\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, m4)
+
+/*
+** store_s20_uint32_t_m1:
+**	str	s20, \[c0, #?-1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, m1)
+
+/*
+** store_s20_uint32_t_1:
+**	str	s20, \[c0, #?1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, 1)
+
+/*
+** store_s20_uint32_t_4:
+**	str	s20, \[c0, #?4\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, 4)
+
+/*
+** store_s20_uint32_t_252:
+**	str	s20, \[c0, #?252\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, 252)
+
+/*
+** store_s20_uint32_t_255:
+**	str	s20, \[c0, #?255\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, 255)
+
+/*
+** store_s20_uint32_t_256:
+**	add	(c[0-9]+), c0, #?256
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_OFFSET (s20, uint32_t, 256)
+
+/*
+** store_s20_uint32_t_int32_t_1:
+**	add	(c[0-9]+), c0, w1, sxtw
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (s20, uint32_t, int32_t, 1)
+
+/*
+** store_s20_uint32_t_uint32_t_1:
+**	add	(c[0-9]+), c0, w1, uxtw
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (s20, uint32_t, uint32_t, 1)
+
+/*
+** store_s20_uint32_t_uint64_t_1:
+**	add	(c[0-9]+), c0, x1
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (s20, uint32_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (s20, uint32_t, int32_t, 2)
+STORE_REG_INDEX (s20, uint32_t, uint32_t, 2)
+STORE_REG_INDEX (s20, uint32_t, uint64_t, 2)
+
+/*
+** store_s20_uint32_t_int32_t_4:
+**	add	(c[0-9]+), c0, w1, sxtw #?2
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (s20, uint32_t, int32_t, 4)
+
+/*
+** store_s20_uint32_t_uint32_t_4:
+**	add	(c[0-9]+), c0, w1, uxtw #?2
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (s20, uint32_t, uint32_t, 4)
+
+/*
+** store_s20_uint32_t_uint64_t_4:
+**	add	(c[0-9]+), c0, x1, lsl #?2
+**	str	s20, \[\1\]
+**	ret
+*/
+STORE_REG_INDEX (s20, uint32_t, uint64_t, 4)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_REG_INDEX (s20, uint32_t, int32_t, 8)
+STORE_REG_INDEX (s20, uint32_t, uint32_t, 8)
+STORE_REG_INDEX (s20, uint32_t, uint64_t, 8)
+
+STORE_REG_INDEX (s20, uint32_t, int32_t, 16)
+STORE_REG_INDEX (s20, uint32_t, uint32_t, 16)
+STORE_REG_INDEX (s20, uint32_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-si-2.c b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-si-2.c
new file mode 100644
index 00000000000..5aacf58fc5e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/morello/alt-base-store-si-2.c
@@ -0,0 +1,172 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "-save-temps" } */
+/* { dg-final { check-function-bodies "**" ""  { {-O[123s]} } } } */
+/* { dg-skip-if "" { *-*-* } { "-mabi=purecap" "-mfake-capability" } { "" } }  */
+
+#define ALT_BASE
+#include "load-store-utils.h"
+
+/*
+** store_zero_uint32_t_m260:
+**	sub	(c[0-9]+), c0, #260
+**	str	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, m260)
+
+/*
+** store_zero_uint32_t_m257:
+**	sub	(c[0-9]+), c0, #257
+**	str	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, m257)
+
+/*
+** store_zero_uint32_t_m256:
+**	str	wzr, \[c0, #?-256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, m256)
+
+/*
+** store_zero_uint32_t_m252:
+**	str	wzr, \[c0, #?-252\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, m252)
+
+/*
+** store_zero_uint32_t_m4:
+**	str	wzr, \[c0, #?-4\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, m4)
+
+/*
+** store_zero_uint32_t_m1:
+**	str	wzr, \[c0, #?-1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, m1)
+
+/*
+** store_zero_uint32_t_1:
+**	str	wzr, \[c0, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 1)
+
+/*
+** store_zero_uint32_t_4:
+**	str	wzr, \[c0, #?4\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 4)
+
+/*
+** store_zero_uint32_t_252:
+**	str	wzr, \[c0, #?252\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 252)
+
+/*
+** store_zero_uint32_t_255:
+**	str	wzr, \[c0, #?255\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 255)
+
+/*
+** store_zero_uint32_t_256:
+**	str	wzr, \[c0, #?256\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 256)
+
+/*
+** store_zero_uint32_t_257:
+**	add	(c[0-9]+), c0, #?256
+**	str	wzr, \[\1, #?1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 257)
+
+/*
+** store_zero_uint32_t_260:
+**	str	wzr, \[c0, #?260\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 260)
+
+/*
+** store_zero_uint32_t_2044:
+**	str	wzr, \[c0, #?2044\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 2044)
+
+/*
+** store_zero_uint32_t_2048:
+**	add	(c[0-9]+), c0, #?2048
+**	str	wzr, \[\1\]
+**	ret
+*/
+STORE_ZERO_OFFSET (uint32_t, 2048)
+
+/*
+** store_zero_uint32_t_int32_t_1:
+**	str	wzr, \[c0, w1, sxtw\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint32_t, int32_t, 1)
+
+/*
+** store_zero_uint32_t_uint32_t_1:
+**	str	wzr, \[c0, w1, uxtw\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint32_t, uint32_t, 1)
+
+/*
+** store_zero_uint32_t_uint64_t_1:
+**	str	wzr, \[c0, x1\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint32_t, uint64_t, 1)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (uint32_t, int32_t, 2)
+STORE_ZERO_INDEX (uint32_t, uint32_t, 2)
+STORE_ZERO_INDEX (uint32_t, uint64_t, 2)
+
+/*
+** store_zero_uint32_t_int32_t_4:
+**	str	wzr, \[c0, w1, sxtw #?2\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint32_t, int32_t, 4)
+
+/*
+** store_zero_uint32_t_uint32_t_4:
+**	str	wzr, \[c0, w1, uxtw #?2\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint32_t, uint32_t, 4)
+
+/*
+** store_zero_uint32_t_uint64_t_4:
+**	str	wzr, \[c0, x1, lsl #?2\]
+**	ret
+*/
+STORE_ZERO_INDEX (uint32_t, uint64_t, 4)
+
+/* Check for valid asm, but don't mandate a particular sequence.  */
+STORE_ZERO_INDEX (uint32_t, int32_t, 8)
+STORE_ZERO_INDEX (uint32_t, uint32_t, 8)
+STORE_ZERO_INDEX (uint32_t, uint64_t, 8)
+
+STORE_ZERO_INDEX (uint32_t, int32_t, 16)
+STORE_ZERO_INDEX (uint32_t, uint32_t, 16)
+STORE_ZERO_INDEX (uint32_t, uint64_t, 16)
diff --git a/gcc/testsuite/gcc.target/aarch64/morello/load-store-utils.h b/gcc/testsuite/gcc.target/aarch64/morello/load-store-utils.h
index f15197a4c1f..22dae5e9649 100644
--- a/gcc/testsuite/gcc.target/aarch64/morello/load-store-utils.h
+++ b/gcc/testsuite/gcc.target/aarch64/morello/load-store-utils.h
@@ -1,82 +1,96 @@
 #include <stdint.h>
 #include <stddef.h>
 
+#ifdef ALT_BASE
+#define CAP __capability
+#else
+#define CAP /*nothing*/
+#endif
+
 #define m272 -272
+#define m264 -264
+#define m260 -260
+#define m258 -258
 #define m257 -257
 #define m256 -256
 #define m255 -255
-#define m240 -240
+#define m254 -254
+#define m252 -252
+#define m248 -248
 #define m16 -16
+#define m8 -8
+#define m4 -4
+#define m2 -2
 #define m1 -1
 
 #define LOAD_REG_OFFSET(REG, TYPE, OFFSET)				\
   void									\
-  load_##REG##_##TYPE##_##OFFSET (char *base)				\
+  load_##REG##_##TYPE##_##OFFSET (char *CAP base)			\
   {									\
     register TYPE reg asm (#REG);					\
-    TYPE *ptr = (TYPE *) (base + OFFSET);				\
+    TYPE *CAP ptr = (TYPE *CAP) (base + OFFSET);			\
     asm volatile ("" : "=rw" (reg) : "0" (*ptr));			\
   }
 
 #define LOAD_REG_INDEX(REG, TYPE, INDEX_TYPE, SCALE)			\
   void									\
-  load_##REG##_##TYPE##_##INDEX_TYPE##_##SCALE (char *base,		\
+  load_##REG##_##TYPE##_##INDEX_TYPE##_##SCALE (char *CAP base,		\
 						INDEX_TYPE index)	\
   {									\
     register TYPE reg asm (#REG);					\
     ptrdiff_t byte_index = (ptrdiff_t) index * SCALE;			\
-    TYPE *ptr = (TYPE *) (base + byte_index);				\
+    TYPE *CAP ptr = (TYPE *CAP) (base + byte_index);			\
     asm volatile ("" : "=rw" (reg) : "0" (*ptr));			\
   }
 
 #define STORE_REG_OFFSET(REG, TYPE, OFFSET)				\
   void									\
-  store_##REG##_##TYPE##_##OFFSET (char *base)				\
+  store_##REG##_##TYPE##_##OFFSET (char *CAP base)			\
   {									\
     register TYPE reg asm (#REG);					\
-    TYPE *ptr = (TYPE *) (base + OFFSET);				\
+    TYPE *CAP ptr = (TYPE *CAP) (base + OFFSET);			\
     asm ("" : "=rw" (reg));						\
     *ptr = reg;								\
   }
 
 #define STORE_REG_INDEX(REG, TYPE, INDEX_TYPE, SCALE)			\
   void									\
-  store_##REG##_##TYPE##_##INDEX_TYPE##_##SCALE (char *base,		\
+  store_##REG##_##TYPE##_##INDEX_TYPE##_##SCALE (char *CAP base,	\
 						 INDEX_TYPE index)	\
   {									\
     register TYPE reg asm (#REG);					\
     ptrdiff_t byte_index = (ptrdiff_t) index * SCALE;			\
-    TYPE *ptr = (TYPE *) (base + byte_index);				\
+    TYPE *CAP ptr = (TYPE *CAP) (base + byte_index);			\
     asm ("" : "=rw" (reg));						\
     *ptr = reg;								\
   }
 
 #define STORE_ZERO_OFFSET(TYPE, OFFSET)					\
   void									\
-  store_zero_##TYPE##_##OFFSET (char *base)				\
+  store_zero_##TYPE##_##OFFSET (char *CAP base)				\
   {									\
-    TYPE *ptr = (TYPE *) (base + OFFSET);				\
+    TYPE *CAP ptr = (TYPE *CAP) (base + OFFSET);			\
     *ptr = 0;								\
   }
 
 #define STORE_REG_INDEX(REG, TYPE, INDEX_TYPE, SCALE)			\
   void									\
-  store_##REG##_##TYPE##_##INDEX_TYPE##_##SCALE (char *base,		\
+  store_##REG##_##TYPE##_##INDEX_TYPE##_##SCALE (char *CAP base,	\
 						 INDEX_TYPE index)	\
   {									\
     register TYPE reg asm (#REG);					\
     ptrdiff_t byte_index = (ptrdiff_t) index * SCALE;			\
-    TYPE *ptr = (TYPE *) (base + byte_index);				\
+    TYPE *CAP ptr = (TYPE *CAP) (base + byte_index);			\
     asm ("" : "=rw" (reg));						\
     *ptr = reg;								\
   }
 
 #define STORE_ZERO_INDEX(TYPE, INDEX_TYPE, SCALE)			\
   void									\
-  store_zero_##TYPE##_##INDEX_TYPE##_##SCALE (char *base,		\
+  store_zero_##TYPE##_##INDEX_TYPE##_##SCALE (char *CAP base,		\
 					      INDEX_TYPE index)		\
   {									\
     ptrdiff_t byte_index = (ptrdiff_t) index * SCALE;			\
-    TYPE *ptr = (TYPE *) (base + byte_index);				\
+    TYPE *CAP ptr = (TYPE *CAP) (base + byte_index);			\
     *ptr = 0;								\
   }


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-05-05 12:05 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-05 12:05 [gcc(refs/vendors/ARM/heads/morello)] Fix alternative-base addresses for 8- to 64-bit scalar moves Matthew Malcomson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).