public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Daniel Engel <gnu@danielengel.com>
To: Richard Earnshaw <Richard.Earnshaw@foss.arm.com>,
	gcc-patches@gcc.gnu.org
Cc: Daniel Engel <gnu@danielengel.com>,
	Christophe Lyon <christophe.lyon@linaro.org>
Subject: [PATCH v7 27/34] Import float multiplication from the CM0 library
Date: Mon, 31 Oct 2022 08:45:22 -0700	[thread overview]
Message-ID: <20221031154529.3627576-28-gnu@danielengel.com> (raw)
In-Reply-To: <20221031154529.3627576-1-gnu@danielengel.com>

gcc/libgcc/ChangeLog:
2022-10-09 Daniel Engel <gnu@danielengel.com>

	* config/arm/eabi/fmul.S (__mulsf3): New file.
	* config/arm/lib1funcs.S: #include eabi/fmul.S (v6m only).
	* config/arm/t-elf (LIB1ASMFUNCS): Moved _mulsf3 to global scope
	(this object was previously blocked on v6m builds).
---
 libgcc/config/arm/eabi/fmul.S | 215 ++++++++++++++++++++++++++++++++++
 libgcc/config/arm/lib1funcs.S |   1 +
 libgcc/config/arm/t-elf       |   3 +-
 3 files changed, 218 insertions(+), 1 deletion(-)
 create mode 100644 libgcc/config/arm/eabi/fmul.S

diff --git a/libgcc/config/arm/eabi/fmul.S b/libgcc/config/arm/eabi/fmul.S
new file mode 100644
index 00000000000..4ebd5a66f47
--- /dev/null
+++ b/libgcc/config/arm/eabi/fmul.S
@@ -0,0 +1,215 @@
+/* fmul.S: Thumb-1 optimized 32-bit float multiplication
+
+   Copyright (C) 2018-2022 Free Software Foundation, Inc.
+   Contributed by Daniel Engel, Senva Inc (gnu@danielengel.com)
+
+   This file is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by the
+   Free Software Foundation; either version 3, or (at your option) any
+   later version.
+
+   This file is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+
+#ifdef L_arm_mulsf3
+
+// float __aeabi_fmul(float, float)
+// Returns $r0 after multiplication by $r1.
+// Subsection ordering within fpcore keeps conditional branches within range.
+FUNC_START_SECTION aeabi_fmul .text.sorted.libgcc.fpcore.m.fmul
+FUNC_ALIAS mulsf3 aeabi_fmul
+    CFI_START_FUNCTION
+
+        // Standard registers, compatible with exception handling.
+        push    { rT, lr }
+                .cfi_remember_state
+                .cfi_remember_state
+                .cfi_adjust_cfa_offset 8
+                .cfi_rel_offset rT, 0
+                .cfi_rel_offset lr, 4
+
+        // Save the sign of the result.
+        movs    rT,     r1
+        eors    rT,     r0
+        lsrs    rT,     #31
+        lsls    rT,     #31
+        mov     ip,     rT
+
+        // Set up INF for comparison.
+        movs    rT,     #255
+        lsls    rT,     #24
+
+        // Check for multiplication by zero.
+        lsls    r2,     r0,     #1
+        beq     LLSYM(__fmul_zero1)
+
+        lsls    r3,     r1,     #1
+        beq     LLSYM(__fmul_zero2)
+
+        // Check for INF/NAN.
+        cmp     r3,     rT
+        bhs     LLSYM(__fmul_special2)
+
+        cmp     r2,     rT
+        bhs     LLSYM(__fmul_special1)
+
+        // Because neither operand is INF/NAN, the result will be finite.
+        // It is now safe to modify the original operand registers.
+        lsls    r0,     #9
+
+        // Isolate the first exponent.  When normal, add back the implicit '1'.
+        // The result is always aligned with the MSB in bit [31].
+        // Subnormal mantissas remain effectively multiplied by 2x relative to
+        //  normals, but this works because the weight of a subnormal is -126.
+        lsrs    r2,     #24
+        beq     LLSYM(__fmul_normalize2)
+        adds    r0,     #1
+        rors    r0,     r0
+
+    LLSYM(__fmul_normalize2):
+        // IMPORTANT: exp10i() jumps in here!
+        // Repeat for the mantissa of the second operand.
+        // Short-circuit when the mantissa is 1.0, as the
+        //  first mantissa is already prepared in $r0
+        lsls    r1,     #9
+
+        // When normal, add back the implicit '1'.
+        lsrs    r3,     #24
+        beq     LLSYM(__fmul_go)
+        adds    r1,     #1
+        rors    r1,     r1
+
+    LLSYM(__fmul_go):
+        // Calculate the final exponent, relative to bit [30].
+        adds    rT,     r2,     r3
+        subs    rT,     #127
+
+  #if !defined(__OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__
+        // Short-circuit on multiplication by powers of 2.
+        lsls    r3,     r0,     #1
+        beq     LLSYM(__fmul_simple1)
+
+        lsls    r3,     r1,     #1
+        beq     LLSYM(__fmul_simple2)
+  #endif
+
+        // Save $ip across the call.
+        // (Alternatively, could push/pop a separate register,
+        //  but the four instructions here are equivally fast)
+        //  without imposing on the stack.
+        add     rT,     ip
+
+        // 32x32 unsigned multiplication, 64 bit result.
+        bl      SYM(__umulsidi3) __PLT__
+
+        // Separate the saved exponent and sign.
+        sxth    r2,     rT
+        subs    rT,     r2
+        mov     ip,     rT
+
+        b       SYM(__fp_assemble)
+
+  #if !defined(__OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__
+    LLSYM(__fmul_simple2):
+        // Move the high bits of the result to $r1.
+        movs    r1,     r0
+
+    LLSYM(__fmul_simple1):
+        // Clear the remainder.
+        eors    r0,     r0
+
+        // Adjust mantissa to match the exponent, relative to bit[30].
+        subs    r2,     rT,     #1
+        b       SYM(__fp_assemble)
+  #endif
+
+    LLSYM(__fmul_zero1):
+        // $r0 was equal to 0, set up to check $r1 for INF/NAN.
+        lsls    r2,     r1,     #1
+
+    LLSYM(__fmul_zero2):
+      #if defined(EXCEPTION_CODES) && EXCEPTION_CODES
+        movs    r3,     #(INFINITY_TIMES_ZERO)
+      #endif
+
+        // Check the non-zero operand for INF/NAN.
+        // If NAN, it should be returned.
+        // If INF, the result should be NAN.
+        // Otherwise, the result will be +/-0.
+        cmp     r2,     rT
+        beq     SYM(__fp_exception)
+
+        // If the second operand is finite, the result is 0.
+        blo     SYM(__fp_zero)
+
+      #if defined(STRICT_NANS) && STRICT_NANS
+        // Restore values that got mixed in zero testing, then go back
+        //  to sort out which one is the NAN.
+        lsls    r3,     r1,     #1
+        lsls    r2,     r0,     #1
+      #elif defined(TRAP_NANS) && TRAP_NANS
+        // Return NAN with the sign bit cleared.
+        lsrs    r0,     r2,     #1
+        b       SYM(__fp_check_nan)
+      #else
+        lsrs    r0,     r2,     #1
+        // Return NAN with the sign bit cleared.
+        pop     { rT, pc }
+                .cfi_restore_state
+      #endif
+
+    LLSYM(__fmul_special2):
+        // $r1 is INF/NAN.  In case of INF, check $r0 for NAN.
+        cmp     r2,     rT
+
+      #if defined(TRAP_NANS) && TRAP_NANS
+        // Force swap if $r0 is not NAN.
+        bls     LLSYM(__fmul_swap)
+
+        // $r0 is NAN, keep if $r1 is INF
+        cmp     r3,     rT
+        beq     LLSYM(__fmul_special1)
+
+        // Both are NAN, keep the smaller value (more likely to signal).
+        cmp     r2,     r3
+      #endif
+
+        // Prefer the NAN already in $r0.
+        //  (If TRAP_NANS, this is the smaller NAN).
+        bhi     LLSYM(__fmul_special1)
+
+    LLSYM(__fmul_swap):
+        movs    r0,     r1
+
+    LLSYM(__fmul_special1):
+        // $r0 is either INF or NAN.  $r1 has already been examined.
+        // Flags are already set correctly.
+        lsls    r2,     r0,     #1
+        cmp     r2,     rT
+        beq     SYM(__fp_infinity)
+
+      #if defined(TRAP_NANS) && TRAP_NANS
+        b       SYM(__fp_check_nan)
+      #else
+        pop     { rT, pc }
+                .cfi_restore_state
+      #endif
+
+    CFI_END_FUNCTION
+FUNC_END mulsf3
+FUNC_END aeabi_fmul
+
+#endif /* L_arm_mulsf3 */
+
diff --git a/libgcc/config/arm/lib1funcs.S b/libgcc/config/arm/lib1funcs.S
index bfe3397d892..92245353442 100644
--- a/libgcc/config/arm/lib1funcs.S
+++ b/libgcc/config/arm/lib1funcs.S
@@ -2015,6 +2015,7 @@ LSYM(Lchange_\register):
 #include "eabi/fneg.S"
 #include "eabi/fadd.S"
 #include "eabi/futil.S"
+#include "eabi/fmul.S"
 #endif /* NOT_ISA_TARGET_32BIT */
 #include "eabi/lcmp.S"
 #endif /* !__symbian__ */
diff --git a/libgcc/config/arm/t-elf b/libgcc/config/arm/t-elf
index c57d9ef50ac..682f273a1d2 100644
--- a/libgcc/config/arm/t-elf
+++ b/libgcc/config/arm/t-elf
@@ -10,7 +10,7 @@ THUMB1_ISA:=$(findstring __ARM_ARCH_ISA_THUMB 1,$(shell $(gcc_compile_bare) -dM
 # inclusion create when only multiplication is used, thus avoiding pulling in
 # useless division code.
 ifneq (__ARM_ARCH_ISA_THUMB 1,$(ARM_ISA)$(THUMB1_ISA))
-LIB1ASMFUNCS += _arm_muldf3 _arm_mulsf3
+LIB1ASMFUNCS += _arm_muldf3
 endif
 endif # !__symbian__
 
@@ -26,6 +26,7 @@ LIB1ASMFUNCS += \
 	_ctzsi2 \
 	_paritysi2 \
 	_popcountsi2 \
+	_arm_mulsf3 \
 
 ifeq (__ARM_ARCH_ISA_THUMB 1,$(ARM_ISA)$(THUMB1_ISA))
 # Group 0B: WEAK overridable function objects built for v6m only.
-- 
2.34.1


  parent reply	other threads:[~2022-10-31 15:48 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-31 15:44 [PATCH v7 00/34] libgcc: Thumb-1 Floating-Point Assembly for Cortex M0 Daniel Engel
2022-10-31 15:44 ` [PATCH v7 01/34] Add and restructure function declaration macros Daniel Engel
2022-10-31 15:44 ` [PATCH v7 02/34] Rename THUMB_FUNC_START to THUMB_FUNC_ENTRY Daniel Engel
2022-10-31 15:44 ` [PATCH v7 03/34] Fix syntax warnings on conditional instructions Daniel Engel
2022-10-31 15:44 ` [PATCH v7 04/34] Reorganize LIB1ASMFUNCS object wrapper macros Daniel Engel
2022-10-31 15:45 ` [PATCH v7 05/34] Add the __HAVE_FEATURE_IT and IT() macros Daniel Engel
2022-10-31 15:45 ` [PATCH v7 06/34] Refactor 'clz' functions into a new file Daniel Engel
2022-10-31 15:45 ` [PATCH v7 07/34] Refactor 'ctz' " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 08/34] Refactor 64-bit shift " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 09/34] Import 'clz' functions from the CM0 library Daniel Engel
2022-10-31 15:45 ` [PATCH v7 10/34] Import 'ctz' " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 11/34] Import 64-bit shift " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 12/34] Import 'clrsb' " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 13/34] Import 'ffs' " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 14/34] Import 'parity' " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 15/34] Import 'popcnt' " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 16/34] Refactor Thumb-1 64-bit comparison into a new file Daniel Engel
2022-10-31 15:45 ` [PATCH v7 17/34] Import 64-bit comparison from CM0 library Daniel Engel
2022-10-31 15:45 ` [PATCH v7 18/34] Merge Thumb-2 optimizations for 64-bit comparison Daniel Engel
2022-10-31 15:45 ` [PATCH v7 19/34] Import 32-bit division from the CM0 library Daniel Engel
2022-10-31 15:45 ` [PATCH v7 20/34] Refactor Thumb-1 64-bit division into a new file Daniel Engel
2022-10-31 15:45 ` [PATCH v7 21/34] Import 64-bit division from the CM0 library Daniel Engel
2022-10-31 15:45 ` [PATCH v7 22/34] Import integer multiplication " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 23/34] Refactor Thumb-1 float comparison into a new file Daniel Engel
2022-10-31 15:45 ` [PATCH v7 24/34] Import float comparison from the CM0 library Daniel Engel
2022-10-31 15:45 ` [PATCH v7 25/34] Refactor Thumb-1 float subtraction into a new file Daniel Engel
2022-10-31 15:45 ` [PATCH v7 26/34] Import float addition and subtraction from the CM0 library Daniel Engel
2022-10-31 15:45 ` Daniel Engel [this message]
2022-10-31 15:45 ` [PATCH v7 28/34] Import float division " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 29/34] Import integer-to-float conversion " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 30/34] Import float-to-integer " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 31/34] Import float<->double " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 32/34] Import float<->__fp16 " Daniel Engel
2022-10-31 15:45 ` [PATCH v7 33/34] Drop single-precision Thumb-1 soft-float functions Daniel Engel
2022-10-31 15:45 ` [PATCH v7 34/34] Add -mpure-code support to the CM0 functions Daniel Engel
2022-11-15 15:27 ` [PING] Re: [PATCH v7 00/34] libgcc: Thumb-1 Floating-Point Assembly for Cortex M0 Daniel Engel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221031154529.3627576-28-gnu@danielengel.com \
    --to=gnu@danielengel.com \
    --cc=Richard.Earnshaw@foss.arm.com \
    --cc=christophe.lyon@linaro.org \
    --cc=gcc-patches@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).