public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/vendors/ARM/heads/morello)] aarch64: Tighten representation of 128-bit splits
@ 2022-05-05 12:07 Matthew Malcomson
  0 siblings, 0 replies; only message in thread
From: Matthew Malcomson @ 2022-05-05 12:07 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:dfa7cbd4380f2edf73bf7cf20434b597993526fa

commit dfa7cbd4380f2edf73bf7cf20434b597993526fa
Author: Richard Sandiford <richard.sandiford@arm.com>
Date:   Fri Apr 8 15:36:46 2022 +0100

    aarch64: Tighten representation of 128-bit splits
    
    The code that splits GPR<->FPR moves would sometimes
    create things like zero_extend:TF, but that isn't AFAIK
    valid RTL: zero_extend is an integer operation.
    
    We don't really need TI and TF versions of those patterns
    since the splitter can use TI regardless of the registers'
    original modes.
    
    Doing this allows a follow-on patch to use the same
    routines for vector splits, which in turn helps the
    handling of alternative-base vector memories.

Diff:
---
 gcc/config/aarch64/aarch64.c  | 18 ++++++++++--------
 gcc/config/aarch64/aarch64.md | 20 ++++++++++----------
 2 files changed, 20 insertions(+), 18 deletions(-)

diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 4471fb4ff3f..cab081fc1d0 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -3674,20 +3674,22 @@ aarch64_split_128bit_move (rtx dst, rtx src)
       /* Handle FP <-> GP regs.  */
       if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
 	{
-	  src_lo = gen_lowpart (word_mode, src);
-	  src_hi = gen_highpart (word_mode, src);
+	  src_lo = gen_lowpart (DImode, src);
+	  src_hi = gen_highpart (DImode, src);
+	  dst = gen_lowpart (TImode, dst);
 
-	  emit_insn (gen_aarch64_movlow_di (mode, dst, src_lo));
-	  emit_insn (gen_aarch64_movhigh_di (mode, dst, src_hi));
+	  emit_insn (gen_aarch64_movtilow_di (dst, src_lo));
+	  emit_insn (gen_aarch64_movtihigh_di (dst, src_hi));
 	  return;
 	}
       else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
 	{
-	  dst_lo = gen_lowpart (word_mode, dst);
-	  dst_hi = gen_highpart (word_mode, dst);
+	  dst_lo = gen_lowpart (DImode, dst);
+	  dst_hi = gen_highpart (DImode, dst);
+	  src = gen_lowpart (TImode, src);
 
-	  emit_insn (gen_aarch64_movdi_low (mode, dst_lo, src));
-	  emit_insn (gen_aarch64_movdi_high (mode, dst_hi, src));
+	  emit_insn (gen_aarch64_movdi_tilow (dst_lo, src));
+	  emit_insn (gen_aarch64_movdi_tihigh (dst_hi, src));
 	  return;
 	}
     }
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index a4ca87f660a..759b0fec974 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -6795,9 +6795,9 @@
 ;; after or during reload as we don't want these patterns to start
 ;; kicking in during the combiner.
 
-(define_insn "@aarch64_movdi_<mode>low"
+(define_insn "aarch64_movdi_tilow"
   [(set (match_operand:DI 0 "register_operand" "=r")
-	(zero_extract:DI (match_operand:TX 1 "register_operand" "w")
+	(zero_extract:DI (match_operand:TI 1 "register_operand" "w")
 			 (const_int 64) (const_int 0)))]
   "TARGET_FLOAT && (reload_completed || reload_in_progress)"
   "fmov\\t%x0, %d1"
@@ -6805,9 +6805,9 @@
    (set_attr "length" "4")
   ])
 
-(define_insn "@aarch64_movdi_<mode>high"
+(define_insn "aarch64_movdi_tihigh"
   [(set (match_operand:DI 0 "register_operand" "=r")
-	(zero_extract:DI (match_operand:TX 1 "register_operand" "w")
+	(zero_extract:DI (match_operand:TI 1 "register_operand" "w")
 			 (const_int 64) (const_int 64)))]
   "TARGET_FLOAT && (reload_completed || reload_in_progress)"
   "fmov\\t%x0, %1.d[1]"
@@ -6815,19 +6815,19 @@
    (set_attr "length" "4")
   ])
 
-(define_insn "@aarch64_mov<mode>high_di"
-  [(set (zero_extract:TX (match_operand:TX 0 "register_operand" "+w")
+(define_insn "aarch64_movtihigh_di"
+  [(set (zero_extract:TI (match_operand:TI 0 "register_operand" "+w")
                          (const_int 64) (const_int 64))
-        (zero_extend:TX (match_operand:DI 1 "register_operand" "r")))]
+        (zero_extend:TI (match_operand:DI 1 "register_operand" "r")))]
   "TARGET_FLOAT && (reload_completed || reload_in_progress)"
   "fmov\\t%0.d[1], %x1"
   [(set_attr "type" "f_mcr")
    (set_attr "length" "4")
   ])
 
-(define_insn "@aarch64_mov<mode>low_di"
-  [(set (match_operand:TX 0 "register_operand" "=w")
-        (zero_extend:TX (match_operand:DI 1 "register_operand" "r")))]
+(define_insn "aarch64_movtilow_di"
+  [(set (match_operand:TI 0 "register_operand" "=w")
+        (zero_extend:TI (match_operand:DI 1 "register_operand" "r")))]
   "TARGET_FLOAT && (reload_completed || reload_in_progress)"
   "fmov\\t%d0, %x1"
   [(set_attr "type" "f_mcr")


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-05-05 12:07 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-05 12:07 [gcc(refs/vendors/ARM/heads/morello)] aarch64: Tighten representation of 128-bit splits Matthew Malcomson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).