From: Michael Collison <Michael.Collison@arm.com>
To: "gcc-patches@gcc.gnu.org" <gcc-patches@gcc.gnu.org>
Cc: nd <nd@arm.com>, "rth@redhat.com" <rth@redhat.com>,
James Greenhalgh <James.Greenhalgh@arm.com>
Subject: [PATCH][Aarch64] Add support for overflow add and sub operations
Date: Wed, 30 Nov 2016 23:06:00 -0000 [thread overview]
Message-ID: <HE1PR0802MB2377F28869A4E53BA8235772958C0@HE1PR0802MB2377.eurprd08.prod.outlook.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 1915 bytes --]
Hi,
This patch improves code generations for builtin arithmetic overflow operations for the aarch64 backend. As an example for a simple test case such as:
int
f (int x, int y, int *ovf)
{
int res;
*ovf = __builtin_sadd_overflow (x, y, &res);
return res;
}
Current trunk at -O2 generates
f:
mov w3, w0
mov w4, 0
add w0, w0, w1
tbnz w1, #31, .L4
cmp w0, w3
blt .L3
.L2:
str w4, [x2]
ret
.p2align 3
.L4:
cmp w0, w3
ble .L2
.L3:
mov w4, 1
b .L2
With the patch this now generates:
f:
adds w0, w0, w1
cset w1, vs
str w1, [x2]
ret
Tested on aarch64-linux-gnu with no regressions. Okay for trunk?
2016-11-30 Michael Collison <michael.collison@arm.com>
Richard Henderson <rth@redhat.com>
* config/aarch64/aarch64-modes.def (CC_V): New.
* config/aarch64/aarch64.c (aarch64_select_cc_mode): Test
for signed overflow using CC_Vmode.
(aarch64_get_condition_code_1): Handle CC_Vmode.
* config/aarch64/aarch64.md (addv<GPI>4, uaddv<GPI>4): New.
(addti3): Create simpler code if low part is already known to be 0.
(addvti4, uaddvti4): New.
(*add<GPI>3_compareC_cconly_imm): New.
(*add<GPI>3_compareC_cconly): New.
(*add<GPI>3_compareC_imm): New.
(*add<GPI>3_compareC): Rename from add<GPI>3_compare1; do not
handle constants within this pattern.
(*add<GPI>3_compareV_cconly_imm): New.
(*add<GPI>3_compareV_cconly): New.
(*add<GPI>3_compareV_imm): New.
(add<GPI>3_compareV): New.
(add<GPI>3_carryinC, add<GPI>3_carryinV): New.
(*add<GPI>3_carryinC_zero, *add<GPI>3_carryinV_zero): New.
(*add<GPI>3_carryinC, *add<GPI>3_carryinV): New.
(subv<GPI>4, usubv<GPI>4): New.
(subti): Handle op1 zero.
(subvti4, usub4ti4): New.
(*sub<GPI>3_compare1_imm): New.
(sub<GPI>3_carryinCV): New.
(*sub<GPI>3_carryinCV_z1_z2, *sub<GPI>3_carryinCV_z1): New.
(*sub<GPI>3_carryinCV_z2, *sub<GPI>3_carryinCV): New
[-- Attachment #2: rth_overflow_ipreview1.patch --]
[-- Type: application/octet-stream, Size: 21909 bytes --]
diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def
index de8227f..71c2069 100644
--- a/gcc/config/aarch64/aarch64-modes.def
+++ b/gcc/config/aarch64/aarch64-modes.def
@@ -24,6 +24,7 @@ CC_MODE (CC_SWP);
CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */
CC_MODE (CC_Z); /* Only Z bit of condition flags is valid. */
CC_MODE (CC_C); /* Only C bit of condition flags is valid. */
+CC_MODE (CC_V); /* Only V bit of condition flags is valid. */
/* Half-precision floating point for __fp16. */
FLOAT_MODE (HF, 2, 0);
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 6078b16..e020d24 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -4324,6 +4324,13 @@ aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
&& GET_CODE (y) == ZERO_EXTEND)
return CC_Cmode;
+ /* A test for signed overflow. */
+ if ((GET_MODE (x) == DImode || GET_MODE (x) == TImode)
+ && code == NE
+ && GET_CODE (x) == PLUS
+ && GET_CODE (y) == SIGN_EXTEND)
+ return CC_Vmode;
+
/* For everything else, return CCmode. */
return CCmode;
}
@@ -4430,6 +4437,15 @@ aarch64_get_condition_code_1 (enum machine_mode mode, enum rtx_code comp_code)
}
break;
+ case CC_Vmode:
+ switch (comp_code)
+ {
+ case NE: return AARCH64_VS;
+ case EQ: return AARCH64_VC;
+ default: return -1;
+ }
+ break;
+
default:
return -1;
break;
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 6afaf90..a074341 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1720,25 +1720,155 @@
}
)
+(define_expand "addv<mode>4"
+ [(match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "register_operand")
+ (match_operand 3 "")]
+ ""
+{
+ emit_insn (gen_add<mode>3_compareV (operands[0], operands[1], operands[2]));
+
+ rtx x;
+ x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CC_Vmode, CC_REGNUM), const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (pc_rtx, x));
+ DONE;
+})
+
+(define_expand "uaddv<mode>4"
+ [(match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "register_operand")
+ (match_operand 3 "")]
+ ""
+{
+ emit_insn (gen_add<mode>3_compareC (operands[0], operands[1], operands[2]));
+
+ rtx x;
+ x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CC_Cmode, CC_REGNUM), const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (pc_rtx, x));
+ DONE;
+})
+
+
(define_expand "addti3"
[(set (match_operand:TI 0 "register_operand" "")
(plus:TI (match_operand:TI 1 "register_operand" "")
- (match_operand:TI 2 "register_operand" "")))]
+ (match_operand:TI 2 "aarch64_reg_or_imm" "")))]
""
{
- rtx low = gen_reg_rtx (DImode);
- emit_insn (gen_adddi3_compareC (low, gen_lowpart (DImode, operands[1]),
- gen_lowpart (DImode, operands[2])));
+ rtx l0 = gen_reg_rtx (DImode);
+ rtx l1 = gen_lowpart (DImode, operands[1]);
+ rtx l2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_lowpart_offset (DImode, TImode));
+ rtx h0 = gen_reg_rtx (DImode);
+ rtx h1 = gen_highpart (DImode, operands[1]);
+ rtx h2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_highpart_offset (DImode, TImode));
- rtx high = gen_reg_rtx (DImode);
- emit_insn (gen_adddi3_carryin (high, gen_highpart (DImode, operands[1]),
- gen_highpart (DImode, operands[2])));
+ if (l2 == const0_rtx)
+ {
+ l0 = l1;
+ if (!aarch64_pluslong_operand (h2, DImode))
+ h2 = force_reg (DImode, h2);
+ emit_insn (gen_adddi3 (h0, h1, h2));
+ }
+ else
+ {
+ emit_insn (gen_adddi3_compareC (l0, l1, force_reg (DImode, l2)));
+ emit_insn (gen_adddi3_carryin (h0, h1, force_reg (DImode, h2)));
+ }
+
+ emit_move_insn (gen_lowpart (DImode, operands[0]), l0);
+ emit_move_insn (gen_highpart (DImode, operands[0]), h0);
- emit_move_insn (gen_lowpart (DImode, operands[0]), low);
- emit_move_insn (gen_highpart (DImode, operands[0]), high);
DONE;
})
+(define_expand "addvti4"
+ [(match_operand:TI 0 "register_operand" "")
+ (match_operand:TI 1 "register_operand" "")
+ (match_operand:TI 2 "aarch64_reg_or_imm" "")
+ (match_operand 3 "")]
+ ""
+{
+ rtx l0 = gen_reg_rtx (DImode);
+ rtx l1 = gen_lowpart (DImode, operands[1]);
+ rtx l2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_lowpart_offset (DImode, TImode));
+ rtx h0 = gen_reg_rtx (DImode);
+ rtx h1 = gen_highpart (DImode, operands[1]);
+ rtx h2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_highpart_offset (DImode, TImode));
+
+ if (l2 == const0_rtx)
+ {
+ l0 = l1;
+ emit_insn (gen_adddi3_compareV (h0, h1, force_reg (DImode, h2)));
+ }
+ else
+ {
+ emit_insn (gen_adddi3_compareC (l0, l1, force_reg (DImode, l2)));
+ emit_insn (gen_adddi3_carryinV (h0, h1, force_reg (DImode, h2)));
+ }
+
+ emit_move_insn (gen_lowpart (DImode, operands[0]), l0);
+ emit_move_insn (gen_highpart (DImode, operands[0]), h0);
+
+ rtx x;
+ x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CC_Vmode, CC_REGNUM), const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (pc_rtx, x));
+ DONE;
+})
+
+(define_expand "uaddvti4"
+ [(match_operand:TI 0 "register_operand" "")
+ (match_operand:TI 1 "register_operand" "")
+ (match_operand:TI 2 "aarch64_reg_or_imm" "")
+ (match_operand 3 "")]
+ ""
+{
+ rtx l0 = gen_reg_rtx (DImode);
+ rtx l1 = gen_lowpart (DImode, operands[1]);
+ rtx l2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_lowpart_offset (DImode, TImode));
+ rtx h0 = gen_reg_rtx (DImode);
+ rtx h1 = gen_highpart (DImode, operands[1]);
+ rtx h2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_highpart_offset (DImode, TImode));
+
+ if (l2 == const0_rtx)
+ {
+ l0 = l1;
+ emit_insn (gen_adddi3_compareC (h0, h1, force_reg (DImode, h2)));
+ }
+ else
+ {
+ emit_insn (gen_adddi3_compareC (l0, l1, force_reg (DImode, l2)));
+ emit_insn (gen_adddi3_carryinC (h0, h1, force_reg (DImode, h2)));
+ }
+
+ emit_move_insn (gen_lowpart (DImode, operands[0]), l0);
+ emit_move_insn (gen_highpart (DImode, operands[0]), h0);
+
+ rtx x;
+ x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CC_Cmode, CC_REGNUM), const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (pc_rtx, x));
+ DONE;
+ })
+
(define_insn "add<mode>3_compare0"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
@@ -1837,6 +1967,66 @@
[(set_attr "type" "alus_sreg")]
)
+;; Note that since we're sign-extending, match the immediate in GPI
+;; rather than in DWI. Since CONST_INT is modeless, this works fine.
+(define_insn "*add<mode>3_compareV_cconly_imm"
+ [(set (reg:CC_V CC_REGNUM)
+ (ne:CC_V
+ (plus:<DWI>
+ (sign_extend:<DWI> (match_operand:GPI 0 "register_operand" "r,r"))
+ (match_operand:GPI 1 "aarch64_plus_immediate" "I,J"))
+ (sign_extend:<DWI> (plus:GPI (match_dup 0) (match_dup 1)))))]
+ ""
+ "@
+ cmn\\t%<w>0, %<w>1
+ cmp\\t%<w>0, #%n1"
+ [(set_attr "type" "alus_imm")]
+)
+
+(define_insn "*add<mode>3_compareV_cconly"
+ [(set (reg:CC_V CC_REGNUM)
+ (ne:CC_V
+ (plus:<DWI>
+ (sign_extend:<DWI> (match_operand:GPI 0 "register_operand" "r"))
+ (sign_extend:<DWI> (match_operand:GPI 1 "register_operand" "r")))
+ (sign_extend:<DWI> (plus:GPI (match_dup 0) (match_dup 1)))))]
+ ""
+ "cmn\\t%<w>0, %<w>1"
+ [(set_attr "type" "alus_sreg")]
+)
+
+(define_insn "*add<mode>3_compareV_imm"
+ [(set (reg:CC_V CC_REGNUM)
+ (ne:CC_V
+ (plus:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:GPI 1 "register_operand" "r,r"))
+ (match_operand:GPI 2 "aarch64_plus_immediate" "I,J"))
+ (sign_extend:<DWI>
+ (plus:GPI (match_dup 1) (match_dup 2)))))
+ (set (match_operand:GPI 0 "register_operand" "=r,r")
+ (plus:GPI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ adds\\t%<w>0, %<w>1, %<w>2
+ subs\\t%<w>0, %<w>1, #%n2"
+ [(set_attr "type" "alus_imm,alus_imm")]
+)
+
+(define_insn "add<mode>3_compareV"
+ [(set (reg:CC_V CC_REGNUM)
+ (ne:CC_V
+ (plus:<DWI>
+ (sign_extend:<DWI> (match_operand:GPI 1 "register_operand" "r"))
+ (sign_extend:<DWI> (match_operand:GPI 2 "register_operand" "r")))
+ (sign_extend:<DWI> (plus:GPI (match_dup 1) (match_dup 2)))))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (match_dup 1) (match_dup 2)))]
+ ""
+ "adds\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "alus_sreg")]
+ )
+
(define_insn "*adds_shift_imm_<mode>"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
@@ -2196,6 +2386,138 @@
[(set_attr "type" "adc_reg")]
)
+(define_expand "add<mode>3_carryinC"
+ [(parallel
+ [(set (match_dup 3)
+ (ne:CC_C
+ (plus:<DWI>
+ (plus:<DWI>
+ (match_dup 4)
+ (zero_extend:<DWI>
+ (match_operand:GPI 1 "register_operand" "r")))
+ (zero_extend:<DWI>
+ (match_operand:GPI 2 "register_operand" "r")))
+ (zero_extend:<DWI>
+ (plus:GPI
+ (plus:GPI (match_dup 5) (match_dup 1))
+ (match_dup 2)))))
+ (set (match_operand:GPI 0 "register_operand")
+ (plus:GPI
+ (plus:GPI (match_dup 5) (match_dup 1))
+ (match_dup 2)))])]
+ ""
+{
+ operands[3] = gen_rtx_REG (CC_Cmode, CC_REGNUM);
+ operands[4] = gen_rtx_NE (<DWI>mode, operands[3], const0_rtx);
+ operands[5] = gen_rtx_NE (<MODE>mode, operands[3], const0_rtx);
+})
+
+(define_insn "*add<mode>3_carryinC_zero"
+ [(set (reg:CC_C CC_REGNUM)
+ (ne:CC_C
+ (plus:<DWI>
+ (match_operand:<DWI> 2 "aarch64_carry_operation" "")
+ (zero_extend:<DWI> (match_operand:GPI 1 "register_operand" "r")))
+ (zero_extend:<DWI>
+ (plus:GPI
+ (match_operand:GPI 3 "aarch64_carry_operation" "")
+ (match_dup 1)))))
+ (set (match_operand:GPI 0 "register_operand")
+ (plus:GPI (match_dup 3) (match_dup 1)))]
+ ""
+ "adcs\\t%<w>0, %<w>1, <w>zr"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*add<mode>3_carryinC"
+ [(set (reg:CC_C CC_REGNUM)
+ (ne:CC_C
+ (plus:<DWI>
+ (plus:<DWI>
+ (match_operand:<DWI> 3 "aarch64_carry_operation" "")
+ (zero_extend:<DWI> (match_operand:GPI 1 "register_operand" "r")))
+ (zero_extend:<DWI> (match_operand:GPI 2 "register_operand" "r")))
+ (zero_extend:<DWI>
+ (plus:GPI
+ (plus:GPI
+ (match_operand:GPI 4 "aarch64_carry_operation" "")
+ (match_dup 1))
+ (match_dup 2)))))
+ (set (match_operand:GPI 0 "register_operand")
+ (plus:GPI
+ (plus:GPI (match_dup 4) (match_dup 1))
+ (match_dup 2)))]
+ ""
+ "adcs\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_expand "add<mode>3_carryinV"
+ [(parallel
+ [(set (reg:CC_V CC_REGNUM)
+ (ne:CC_V
+ (plus:<DWI>
+ (plus:<DWI>
+ (match_dup 3)
+ (sign_extend:<DWI>
+ (match_operand:GPI 1 "register_operand" "r")))
+ (sign_extend:<DWI>
+ (match_operand:GPI 2 "register_operand" "r")))
+ (sign_extend:<DWI>
+ (plus:GPI
+ (plus:GPI (match_dup 4) (match_dup 1))
+ (match_dup 2)))))
+ (set (match_operand:GPI 0 "register_operand")
+ (plus:GPI
+ (plus:GPI (match_dup 4) (match_dup 1))
+ (match_dup 2)))])]
+ ""
+{
+ rtx cc = gen_rtx_REG (CC_Cmode, CC_REGNUM);
+ operands[3] = gen_rtx_NE (<DWI>mode, cc, const0_rtx);
+ operands[4] = gen_rtx_NE (<MODE>mode, cc, const0_rtx);
+})
+
+(define_insn "*add<mode>3_carryinV_zero"
+ [(set (reg:CC_V CC_REGNUM)
+ (ne:CC_V
+ (plus:<DWI>
+ (match_operand:<DWI> 2 "aarch64_carry_operation" "")
+ (sign_extend:<DWI> (match_operand:GPI 1 "register_operand" "r")))
+ (sign_extend:<DWI>
+ (plus:GPI
+ (match_operand:GPI 3 "aarch64_carry_operation" "")
+ (match_dup 1)))))
+ (set (match_operand:GPI 0 "register_operand")
+ (plus:GPI (match_dup 3) (match_dup 1)))]
+ ""
+ "adcs\\t%<w>0, %<w>1, <w>zr"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*add<mode>3_carryinV"
+ [(set (reg:CC_V CC_REGNUM)
+ (ne:CC_V
+ (plus:<DWI>
+ (plus:<DWI>
+ (match_operand:<DWI> 3 "aarch64_carry_operation" "")
+ (sign_extend:<DWI> (match_operand:GPI 1 "register_operand" "r")))
+ (sign_extend:<DWI> (match_operand:GPI 2 "register_operand" "r")))
+ (sign_extend:<DWI>
+ (plus:GPI
+ (plus:GPI
+ (match_operand:GPI 4 "aarch64_carry_operation" "")
+ (match_dup 1))
+ (match_dup 2)))))
+ (set (match_operand:GPI 0 "register_operand")
+ (plus:GPI
+ (plus:GPI (match_dup 4) (match_dup 1))
+ (match_dup 2)))]
+ ""
+ "adcs\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "adc_reg")]
+)
+
(define_insn "*add_uxt<mode>_shift2"
[(set (match_operand:GPI 0 "register_operand" "=rk")
(plus:GPI (and:GPI
@@ -2292,22 +2614,158 @@
(set_attr "simd" "*,yes")]
)
+(define_expand "subv<mode>4"
+ [(match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "aarch64_reg_or_zero")
+ (match_operand:GPI 2 "aarch64_reg_or_zero")
+ (match_operand 3 "")]
+ ""
+{
+ emit_insn (gen_sub<mode>3_compare1 (operands[0], operands[1], operands[2]));
+
+ rtx x;
+ x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CC_Vmode, CC_REGNUM), const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (pc_rtx, x));
+ DONE;
+})
+
+(define_expand "usubv<mode>4"
+ [(match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "aarch64_reg_or_zero")
+ (match_operand:GPI 2 "aarch64_reg_or_zero")
+ (match_operand 3 "")]
+ ""
+{
+ emit_insn (gen_sub<mode>3_compare1 (operands[0], operands[1], operands[2]));
+
+ rtx x;
+ x = gen_rtx_LTU (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM), const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (pc_rtx, x));
+ DONE;
+})
+
(define_expand "subti3"
[(set (match_operand:TI 0 "register_operand" "")
- (minus:TI (match_operand:TI 1 "register_operand" "")
+ (minus:TI (match_operand:TI 1 "aarch64_reg_or_zero" "")
(match_operand:TI 2 "register_operand" "")))]
""
{
- rtx low = gen_reg_rtx (DImode);
- emit_insn (gen_subdi3_compare1 (low, gen_lowpart (DImode, operands[1]),
- gen_lowpart (DImode, operands[2])));
+ rtx l0 = gen_reg_rtx (DImode);
+ rtx l1 = simplify_gen_subreg (DImode, operands[1], TImode,
+ subreg_lowpart_offset (DImode, TImode));
+ rtx l2 = gen_lowpart (DImode, operands[2]);
+ rtx h0 = gen_reg_rtx (DImode);
+ rtx h1 = simplify_gen_subreg (DImode, operands[1], TImode,
+ subreg_highpart_offset (DImode, TImode));
+ rtx h2 = gen_highpart (DImode, operands[2]);
- rtx high = gen_reg_rtx (DImode);
- emit_insn (gen_subdi3_carryin (high, gen_highpart (DImode, operands[1]),
- gen_highpart (DImode, operands[2])));
+ emit_insn (gen_subdi3_compare1 (l0, l1, l2));
+ emit_insn (gen_subdi3_carryin (h0, h1, h2));
- emit_move_insn (gen_lowpart (DImode, operands[0]), low);
- emit_move_insn (gen_highpart (DImode, operands[0]), high);
+ emit_move_insn (gen_lowpart (DImode, operands[0]), l0);
+ emit_move_insn (gen_highpart (DImode, operands[0]), h0);
+ DONE;
+})
+
+(define_expand "subvti4"
+ [(match_operand:TI 0 "register_operand")
+ (match_operand:TI 1 "aarch64_reg_or_zero")
+ (match_operand:TI 2 "aarch64_reg_or_imm")
+ (match_operand 3 "")]
+ ""
+{
+ rtx l0 = gen_reg_rtx (DImode);
+ rtx l1 = simplify_gen_subreg (DImode, operands[1], TImode,
+ subreg_lowpart_offset (DImode, TImode));
+ rtx l2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_lowpart_offset (DImode, TImode));
+ rtx h0 = gen_reg_rtx (DImode);
+ rtx h1 = simplify_gen_subreg (DImode, operands[1], TImode,
+ subreg_highpart_offset (DImode, TImode));
+ rtx h2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_highpart_offset (DImode, TImode));
+
+ if (l2 == const0_rtx)
+ {
+ l0 = l1;
+ emit_insn (gen_subdi3_compare1 (h0, h1, force_reg (DImode, h2)));
+ }
+ else
+ {
+ if (CONST_INT_P (l2))
+ {
+ l2 = force_reg (DImode, GEN_INT (-UINTVAL (l2)));
+ h2 = force_reg (DImode, h2);
+ emit_insn (gen_adddi3_compareC (l0, l1, l2));
+ }
+ else
+ emit_insn (gen_subdi3_compare1 (l0, l1, l2));
+ emit_insn (gen_subdi3_carryinCV (h0, force_reg (DImode, h1), h2));
+ }
+
+ emit_move_insn (gen_lowpart (DImode, operands[0]), l0);
+ emit_move_insn (gen_highpart (DImode, operands[0]), h0);
+
+ rtx x;
+ x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CC_Vmode, CC_REGNUM), const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (pc_rtx, x));
+ DONE;
+})
+
+(define_expand "usubvti4"
+ [(match_operand:TI 0 "register_operand")
+ (match_operand:TI 1 "aarch64_reg_or_zero")
+ (match_operand:TI 2 "aarch64_reg_or_imm")
+ (match_operand 3 "")]
+ ""
+{
+ rtx l0 = gen_reg_rtx (DImode);
+ rtx l1 = simplify_gen_subreg (DImode, operands[1], TImode,
+ subreg_lowpart_offset (DImode, TImode));
+ rtx l2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_lowpart_offset (DImode, TImode));
+ rtx h0 = gen_reg_rtx (DImode);
+ rtx h1 = simplify_gen_subreg (DImode, operands[1], TImode,
+ subreg_highpart_offset (DImode, TImode));
+ rtx h2 = simplify_gen_subreg (DImode, operands[2], TImode,
+ subreg_highpart_offset (DImode, TImode));
+
+ if (l2 == const0_rtx)
+ {
+ l0 = l1;
+ emit_insn (gen_subdi3_compare1 (h0, h1, force_reg (DImode, h2)));
+ }
+ else
+ {
+ if (CONST_INT_P (l2))
+ {
+ l2 = force_reg (DImode, GEN_INT (-UINTVAL (l2)));
+ h2 = force_reg (DImode, h2);
+ emit_insn (gen_adddi3_compareC (l0, l1, l2));
+ }
+ else
+ emit_insn (gen_subdi3_compare1 (l0, l1, l2));
+ emit_insn (gen_subdi3_carryinCV (h0, force_reg (DImode, h1), h2));
+ }
+
+ emit_move_insn (gen_lowpart (DImode, operands[0]), l0);
+ emit_move_insn (gen_highpart (DImode, operands[0]), h0);
+
+ rtx x;
+ x = gen_rtx_LTU (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM), const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (pc_rtx, x));
DONE;
})
@@ -2336,6 +2794,22 @@
[(set_attr "type" "alus_sreg")]
)
+(define_insn "*sub<mode>3_compare1_imm"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (match_operand:GPI 1 "aarch64_reg_or_zero" "rZ,rZ")
+ (match_operand:GPI 2 "aarch64_plus_immediate" "I,J")))
+ (set (match_operand:GPI 0 "register_operand" "=r,r")
+ (plus:GPI
+ (match_dup 1)
+ (match_operand:GPI 3 "aarch64_plus_immediate" "J,I")))]
+ "UINTVAL (operands[2]) == -UINTVAL (operands[3])"
+ "@
+ subs\\t%<w>0, %<w>1, %<w>2
+ adds\\t%<w>0, %<w>1, %<w>3"
+ [(set_attr "type" "alus_imm")]
+)
+
(define_insn "sub<mode>3_compare1"
[(set (reg:CC CC_REGNUM)
(compare:CC
@@ -2563,6 +3037,85 @@
[(set_attr "type" "adc_reg")]
)
+(define_expand "sub<mode>3_carryinCV"
+ [(parallel
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (sign_extend:<DWI>
+ (match_operand:GPI 1 "aarch64_reg_or_zero" "rZ"))
+ (plus:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:GPI 2 "register_operand" "r"))
+ (ltu:<DWI> (reg:CC CC_REGNUM) (const_int 0)))))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI
+ (minus:GPI (match_dup 1) (match_dup 2))
+ (ltu:GPI (reg:CC CC_REGNUM) (const_int 0))))])]
+ ""
+)
+
+(define_insn "*sub<mode>3_carryinCV_z1_z2"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (const_int 0)
+ (match_operand:<DWI> 2 "aarch64_borrow_operation" "")))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (match_operand:GPI 1 "aarch64_borrow_operation" "")))]
+ ""
+ "sbcs\\t%<w>0, <w>zr, <w>zr"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*sub<mode>3_carryinCV_z1"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (const_int 0)
+ (plus:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:<DWI> 2 "aarch64_borrow_operation" ""))))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI
+ (neg:GPI (match_dup 1))
+ (match_operand:GPI 3 "aarch64_borrow_operation" "")))]
+ ""
+ "sbcs\\t%<w>0, <w>zr, %<w>1"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*sub<mode>3_carryinCV_z2"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (sign_extend:<DWI>
+ (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:<DWI> 2 "aarch64_borrow_operation" "")))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI
+ (match_dup 1)
+ (match_operand:GPI 3 "aarch64_borrow_operation" "")))]
+ ""
+ "sbcs\\t%<w>0, %<w>1, <w>zr"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*sub<mode>3_carryinCV"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (sign_extend:<DWI>
+ (match_operand:GPI 1 "register_operand" "r"))
+ (plus:<DWI>
+ (sign_extend:<DWI>
+ (match_operand:GPI 2 "register_operand" "r"))
+ (match_operand:<DWI> 3 "aarch64_borrow_operation" ""))))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI
+ (minus:GPI (match_dup 1) (match_dup 2))
+ (match_operand:GPI 4 "aarch64_borrow_operation" "")))]
+ ""
+ "sbcs\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "adc_reg")]
+)
+
(define_insn "*sub_uxt<mode>_shift2"
[(set (match_operand:GPI 0 "register_operand" "=rk")
(minus:GPI (match_operand:GPI 4 "register_operand" "rk")
--
1.9.1
next reply other threads:[~2016-11-30 23:06 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-11-30 23:06 Michael Collison [this message]
2017-05-19 6:27 Michael Collison
2017-05-19 11:00 ` Christophe Lyon
2017-05-19 21:42 ` Michael Collison
2017-07-05 9:38 ` Richard Earnshaw (lists)
2017-07-06 7:29 ` Michael Collison
2017-07-06 8:22 ` Richard Earnshaw (lists)
2017-08-01 6:33 ` Michael Collison
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=HE1PR0802MB2377F28869A4E53BA8235772958C0@HE1PR0802MB2377.eurprd08.prod.outlook.com \
--to=michael.collison@arm.com \
--cc=James.Greenhalgh@arm.com \
--cc=gcc-patches@gcc.gnu.org \
--cc=nd@arm.com \
--cc=rth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).