public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
* [PATCH 1/4] aarch64: Add vector implementations of cos routines
@ 2023-06-08 13:39 Joe Ramsay
  2023-06-08 13:39 ` [PATCH 2/4] aarch64: Add vector implementations of sin routines Joe Ramsay
                   ` (4 more replies)
  0 siblings, 5 replies; 11+ messages in thread
From: Joe Ramsay @ 2023-06-08 13:39 UTC (permalink / raw)
  To: libc-alpha; +Cc: Joe Ramsay

Replace the loop-over-scalar placeholder routines with optimised
implementations from Arm Optimized Routines (AOR).

Also add some headers containing utilities for aarch64 libmvec
routines, and update libm-test-ulps.

AOR exposes a config option, WANT_SIMD_EXCEPT, to enable
selective masking (and later fixing up) of invalid lanes, in
order to trigger fp exceptions correctly (AdvSIMD only). This is
tested and maintained in AOR, however it is configured off at
source level here for performance reasons. We keep the
WANT_SIMD_EXCEPT blocks in routine sources to greatly simplify
the upstreaming process from AOR to glibc.
---
 sysdeps/aarch64/fpu/cos_advsimd.c             |  81 ++++++-
 sysdeps/aarch64/fpu/cos_sve.c                 |  73 ++++++-
 sysdeps/aarch64/fpu/cosf_advsimd.c            |  76 ++++++-
 sysdeps/aarch64/fpu/cosf_sve.c                |  70 ++++++-
 sysdeps/aarch64/fpu/sv_math.h                 | 141 +++++++++++++
 sysdeps/aarch64/fpu/sve_utils.h               |  55 -----
 sysdeps/aarch64/fpu/v_math.h                  | 197 ++++++++++++++++++
 .../fpu/{advsimd_utils.h => vecmath_config.h} |  30 ++-
 sysdeps/aarch64/libm-test-ulps                |   2 +-
 9 files changed, 629 insertions(+), 96 deletions(-)
 create mode 100644 sysdeps/aarch64/fpu/sv_math.h
 delete mode 100644 sysdeps/aarch64/fpu/sve_utils.h
 create mode 100644 sysdeps/aarch64/fpu/v_math.h
 rename sysdeps/aarch64/fpu/{advsimd_utils.h => vecmath_config.h} (57%)

diff --git a/sysdeps/aarch64/fpu/cos_advsimd.c b/sysdeps/aarch64/fpu/cos_advsimd.c
index 40831e6b0d..1f7a7023f5 100644
--- a/sysdeps/aarch64/fpu/cos_advsimd.c
+++ b/sysdeps/aarch64/fpu/cos_advsimd.c
@@ -17,13 +17,82 @@
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#include <math.h>
+#include "v_math.h"
 
-#include "advsimd_utils.h"
+static const volatile struct
+{
+  float64x2_t poly[7];
+  float64x2_t range_val, shift, inv_pi, half_pi, pi_1, pi_2, pi_3;
+} data = {
+  /* Worst-case error is 3.3 ulp in [-pi/2, pi/2].  */
+  .poly = { V2 (-0x1.555555555547bp-3), V2 (0x1.1111111108a4dp-7),
+	    V2 (-0x1.a01a019936f27p-13), V2 (0x1.71de37a97d93ep-19),
+	    V2 (-0x1.ae633919987c6p-26), V2 (0x1.60e277ae07cecp-33),
+	    V2 (-0x1.9e9540300a1p-41) },
+  .inv_pi = V2 (0x1.45f306dc9c883p-2),
+  .half_pi = V2 (0x1.921fb54442d18p+0),
+  .pi_1 = V2 (0x1.921fb54442d18p+1),
+  .pi_2 = V2 (0x1.1a62633145c06p-53),
+  .pi_3 = V2 (0x1.c1cd129024e09p-106),
+  .shift = V2 (0x1.8p52),
+  .range_val = V2 (0x1p23)
+};
+
+#define C(i) data.poly[i]
+
+static float64x2_t VPCS_ATTR NOINLINE
+special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp)
+{
+  y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
+  return v_call_f64 (cos, x, y, cmp);
+}
 
-VPCS_ATTR
-float64x2_t
-V_NAME_D1 (cos) (float64x2_t x)
+float64x2_t VPCS_ATTR V_NAME_D1 (cos) (float64x2_t x)
 {
-  return v_call_f64 (cos, x);
+  float64x2_t n, r, r2, r3, r4, t1, t2, t3, y;
+  uint64x2_t odd, cmp;
+
+#if WANT_SIMD_EXCEPT
+  r = vabsq_f64 (x);
+  cmp = vcgeq_u64 (vreinterpretq_u64_f64 (r),
+		   vreinterpretq_u64_f64 (data.range_val));
+  if (unlikely (v_any_u64 (cmp)))
+    /* If fenv exceptions are to be triggered correctly, set any special lanes
+       to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
+       special-case handler later.  */
+    r = vbslq_f64 (cmp, v_f64 (1.0), r);
+#else
+  cmp = vcageq_f64 (data.range_val, x);
+  cmp = vceqzq_u64 (cmp); /* cmp = ~cmp.  */
+  r = x;
+#endif
+
+  /* n = rint((|x|+pi/2)/pi) - 0.5.  */
+  n = vfmaq_f64 (data.shift, data.inv_pi, vaddq_f64 (r, data.half_pi));
+  odd = vshlq_n_u64 (vreinterpretq_u64_f64 (n), 63);
+  n = vsubq_f64 (n, data.shift);
+  n = vsubq_f64 (n, v_f64 (0.5));
+
+  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
+  r = vfmsq_f64 (r, data.pi_1, n);
+  r = vfmsq_f64 (r, data.pi_2, n);
+  r = vfmsq_f64 (r, data.pi_3, n);
+
+  /* sin(r) poly approx.  */
+  r2 = vmulq_f64 (r, r);
+  r3 = vmulq_f64 (r2, r);
+  r4 = vmulq_f64 (r2, r2);
+
+  t1 = vfmaq_f64 (C (4), C (5), r2);
+  t2 = vfmaq_f64 (C (2), C (3), r2);
+  t3 = vfmaq_f64 (C (0), C (1), r2);
+
+  y = vfmaq_f64 (t1, C (6), r4);
+  y = vfmaq_f64 (t2, y, r4);
+  y = vfmaq_f64 (t3, y, r4);
+  y = vfmaq_f64 (r, y, r3);
+
+  if (unlikely (v_any_u64 (cmp)))
+    return special_case (x, y, odd, cmp);
+  return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
 }
diff --git a/sysdeps/aarch64/fpu/cos_sve.c b/sysdeps/aarch64/fpu/cos_sve.c
index 55501e5000..b93de076bb 100644
--- a/sysdeps/aarch64/fpu/cos_sve.c
+++ b/sysdeps/aarch64/fpu/cos_sve.c
@@ -17,12 +17,75 @@
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#include <math.h>
+#include "sv_math.h"
 
-#include "sve_utils.h"
+static struct
+{
+  double inv_pio2, pio2_1, pio2_2, pio2_3, shift;
+} data = {
+  /* Polynomial coefficients are hardwired in FTMAD instructions.  */
+  .inv_pio2 = 0x1.45f306dc9c882p-1,
+  .pio2_1 = 0x1.921fb50000000p+0,
+  .pio2_2 = 0x1.110b460000000p-26,
+  .pio2_3 = 0x1.1a62633145c07p-54,
+  /* Original shift used in AdvSIMD cos,
+     plus a contribution to set the bit #0 of q
+     as expected by trigonometric instructions.  */
+  .shift = 0x1.8000000000001p52
+};
+
+#define RangeVal 0x4160000000000000 /* asuint64 (0x1p23).  */
+
+static svfloat64_t NOINLINE
+special_case (svfloat64_t x, svfloat64_t y, svbool_t out_of_bounds)
+{
+  return sv_call_f64 (cos, x, y, out_of_bounds);
+}
 
-svfloat64_t
-SV_NAME_D1 (cos) (svfloat64_t x, svbool_t pg)
+/* A fast SVE implementation of cos based on trigonometric
+   instructions (FTMAD, FTSSEL, FTSMUL).
+   Maximum measured error: 2.108 ULPs.
+   SV_NAME_D1 (cos)(0x1.9b0ba158c98f3p+7) got -0x1.fddd4c65c7f07p-3
+					 want -0x1.fddd4c65c7f05p-3.  */
+svfloat64_t SV_NAME_D1 (cos) (svfloat64_t x, const svbool_t pg)
 {
-  return sv_call_f64 (cos, x, svdup_n_f64 (0), pg);
+  svfloat64_t r = svabs_f64_x (pg, x);
+  svbool_t out_of_bounds
+      = svcmpge_n_u64 (pg, svreinterpret_u64_f64 (r), RangeVal);
+
+  /* Load some constants in quad-word chunks to minimise memory access.  */
+  svbool_t ptrue = svptrue_b64 ();
+  svfloat64_t invpio2_and_pio2_1 = svld1rq_f64 (ptrue, &data.inv_pio2);
+  svfloat64_t pio2_23 = svld1rq_f64 (ptrue, &data.pio2_2);
+
+  /* n = rint(|x|/(pi/2)).  */
+  svfloat64_t q
+      = svmla_lane_f64 (sv_f64 (data.shift), r, invpio2_and_pio2_1, 0);
+  svfloat64_t n = svsub_n_f64_x (pg, q, data.shift);
+
+  /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
+  r = svmls_lane_f64 (r, n, invpio2_and_pio2_1, 1);
+  r = svmls_lane_f64 (r, n, pio2_23, 0);
+  r = svmls_lane_f64 (r, n, pio2_23, 1);
+
+  /* cos(r) poly approx.  */
+  svfloat64_t r2 = svtsmul_f64 (r, svreinterpret_u64_f64 (q));
+  svfloat64_t y = sv_f64 (0.0);
+  y = svtmad_f64 (y, r2, 7);
+  y = svtmad_f64 (y, r2, 6);
+  y = svtmad_f64 (y, r2, 5);
+  y = svtmad_f64 (y, r2, 4);
+  y = svtmad_f64 (y, r2, 3);
+  y = svtmad_f64 (y, r2, 2);
+  y = svtmad_f64 (y, r2, 1);
+  y = svtmad_f64 (y, r2, 0);
+
+  /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
+  svfloat64_t f = svtssel_f64 (r, svreinterpret_u64_f64 (q));
+  /* Apply factor.  */
+  y = svmul_f64_x (pg, f, y);
+
+  if (unlikely (svptest_any (pg, out_of_bounds)))
+    return special_case (x, y, out_of_bounds);
+  return y;
 }
diff --git a/sysdeps/aarch64/fpu/cosf_advsimd.c b/sysdeps/aarch64/fpu/cosf_advsimd.c
index 35bb81aead..a5c7437bfb 100644
--- a/sysdeps/aarch64/fpu/cosf_advsimd.c
+++ b/sysdeps/aarch64/fpu/cosf_advsimd.c
@@ -17,13 +17,77 @@
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#include <math.h>
+#include "v_math.h"
 
-#include "advsimd_utils.h"
+static const volatile struct
+{
+  float32x4_t poly[4];
+  float32x4_t range_val, inv_pi, half_pi, shift, pi_1, pi_2, pi_3;
+} data = {
+  /* 1.886 ulp error.  */
+  .poly = { V4 (-0x1.555548p-3f), V4 (0x1.110df4p-7f), V4 (-0x1.9f42eap-13f),
+	    V4 (0x1.5b2e76p-19f) },
+
+  .pi_1 = V4 (0x1.921fb6p+1f),
+  .pi_2 = V4 (-0x1.777a5cp-24f),
+  .pi_3 = V4 (-0x1.ee59dap-49f),
+
+  .inv_pi = V4 (0x1.45f306p-2f),
+  .shift = V4 (0x1.8p+23f),
+  .half_pi = V4 (0x1.921fb6p0f),
+  .range_val = V4 (0x1p20f)
+};
+
+#define C(i) data.poly[i]
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp)
+{
+  /* Fall back to scalar code.  */
+  y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
+  return v_call_f32 (cosf, x, y, cmp);
+}
 
-VPCS_ATTR
-float32x4_t
-V_NAME_F1 (cos) (float32x4_t x)
+float32x4_t VPCS_ATTR V_NAME_F1 (cos) (float32x4_t x)
 {
-  return v_call_f32 (cosf, x);
+  float32x4_t n, r, r2, r3, y;
+  uint32x4_t odd, cmp;
+
+#if WANT_SIMD_EXCEPT
+  r = vabsq_f32 (x);
+  cmp = vcgeq_u32 (vreinterpretq_u32_f32 (r),
+		   vreinterpretq_u32_f32 (data.range_val));
+  if (unlikely (v_any_u32 (cmp)))
+    /* If fenv exceptions are to be triggered correctly, set any special lanes
+       to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
+       special-case handler later.  */
+    r = vbslq_f32 (cmp, v_f32 (1.0f), r);
+#else
+  cmp = vcageq_f32 (data.range_val, x);
+  cmp = vceqzq_u32 (cmp); /* cmp = ~cmp.  */
+  r = x;
+#endif
+
+  /* n = rint((|x|+pi/2)/pi) - 0.5.  */
+  n = vfmaq_f32 (data.shift, data.inv_pi, vaddq_f32 (r, data.half_pi));
+  odd = vshlq_n_u32 (vreinterpretq_u32_f32 (n), 31);
+  n = vsubq_f32 (n, data.shift);
+  n = vsubq_f32 (n, v_f32 (0.5f));
+
+  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
+  r = vfmsq_f32 (r, data.pi_1, n);
+  r = vfmsq_f32 (r, data.pi_2, n);
+  r = vfmsq_f32 (r, data.pi_3, n);
+
+  /* y = sin(r).  */
+  r2 = vmulq_f32 (r, r);
+  r3 = vmulq_f32 (r2, r);
+  y = vfmaq_f32 (C (2), C (3), r2);
+  y = vfmaq_f32 (C (1), y, r2);
+  y = vfmaq_f32 (C (0), y, r2);
+  y = vfmaq_f32 (r, y, r3);
+
+  if (unlikely (v_any_u32 (cmp)))
+    return special_case (x, y, odd, cmp);
+  return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
 }
diff --git a/sysdeps/aarch64/fpu/cosf_sve.c b/sysdeps/aarch64/fpu/cosf_sve.c
index 16c68f387b..d7cfc45fc4 100644
--- a/sysdeps/aarch64/fpu/cosf_sve.c
+++ b/sysdeps/aarch64/fpu/cosf_sve.c
@@ -17,12 +17,72 @@
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#include <math.h>
+#include "sv_math.h"
 
-#include "sve_utils.h"
+static struct
+{
+  float neg_pio2_1, neg_pio2_2, neg_pio2_3, inv_pio2, shift;
+} data = {
+  /* Polynomial coefficients are hard-wired in FTMAD instructions.  */
+  .neg_pio2_1 = -0x1.921fb6p+0f,
+  .neg_pio2_2 = 0x1.777a5cp-25f,
+  .neg_pio2_3 = 0x1.ee59dap-50f,
+  .inv_pio2 = 0x1.45f306p-1f,
+  /* Original shift used in AdvSIMD cosf,
+     plus a contribution to set the bit #0 of q
+     as expected by trigonometric instructions.  */
+  .shift = 0x1.800002p+23f
+};
+
+#define RangeVal 0x49800000 /* asuint32(0x1p20f).  */
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t out_of_bounds)
+{
+  return sv_call_f32 (cosf, x, y, out_of_bounds);
+}
 
-svfloat32_t
-SV_NAME_F1 (cos) (svfloat32_t x, svbool_t pg)
+/* A fast SVE implementation of cosf based on trigonometric
+   instructions (FTMAD, FTSSEL, FTSMUL).
+   Maximum measured error: 2.06 ULPs.
+   SV_NAME_F1 (cos)(0x1.dea2f2p+19) got 0x1.fffe7ap-6
+				   want 0x1.fffe76p-6.  */
+svfloat32_t SV_NAME_F1 (cos) (svfloat32_t x, const svbool_t pg)
 {
-  return sv_call_f32 (cosf, x, svdup_n_f32 (0), pg);
+  svfloat32_t r = svabs_f32_x (pg, x);
+  svbool_t out_of_bounds
+    = svcmpge_n_u32 (pg, svreinterpret_u32_f32 (r), RangeVal);
+
+  /* Load some constants in quad-word chunks to minimise memory access.  */
+  svfloat32_t negpio2_and_invpio2
+      = svld1rq_f32 (svptrue_b32 (), &data.neg_pio2_1);
+
+  /* n = rint(|x|/(pi/2)).  */
+  svfloat32_t q
+    = svmla_lane_f32 (sv_f32 (data.shift), r, negpio2_and_invpio2, 3);
+  svfloat32_t n = svsub_n_f32_x (pg, q, data.shift);
+
+  /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
+  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 0);
+  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 1);
+  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 2);
+
+  /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
+  svfloat32_t f = svtssel_f32 (r, svreinterpret_u32_f32 (q));
+
+  /* cos(r) poly approx.  */
+  svfloat32_t r2 = svtsmul_f32 (r, svreinterpret_u32_f32 (q));
+  svfloat32_t y = sv_f32 (0.0f);
+  y = svtmad_f32 (y, r2, 4);
+  y = svtmad_f32 (y, r2, 3);
+  y = svtmad_f32 (y, r2, 2);
+  y = svtmad_f32 (y, r2, 1);
+  y = svtmad_f32 (y, r2, 0);
+
+  /* Apply factor.  */
+  y = svmul_f32_x (pg, f, y);
+
+  if (unlikely (svptest_any (pg, out_of_bounds)))
+    return special_case (x, y, out_of_bounds);
+  return y;
 }
diff --git a/sysdeps/aarch64/fpu/sv_math.h b/sysdeps/aarch64/fpu/sv_math.h
new file mode 100644
index 0000000000..b63a99b24f
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sv_math.h
@@ -0,0 +1,141 @@
+/* Utilities for SVE libmvec routines.
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#ifndef SV_MATH_H
+#define SV_MATH_H
+
+#include <arm_sve.h>
+#include <stdbool.h>
+
+#include "vecmath_config.h"
+
+#define SV_NAME_F1(fun) _ZGVsMxv_##fun##f
+#define SV_NAME_D1(fun) _ZGVsMxv_##fun
+#define SV_NAME_F2(fun) _ZGVsMxvv_##fun##f
+#define SV_NAME_D2(fun) _ZGVsMxvv_##fun
+
+/* Double precision.  */
+static inline svint64_t
+sv_s64 (int64_t x)
+{
+  return svdup_n_s64 (x);
+}
+
+static inline svuint64_t
+sv_u64 (uint64_t x)
+{
+  return svdup_n_u64 (x);
+}
+
+static inline svfloat64_t
+sv_f64 (double x)
+{
+  return svdup_n_f64 (x);
+}
+
+static inline svfloat64_t
+sv_call_f64 (double (*f) (double), svfloat64_t x, svfloat64_t y, svbool_t cmp)
+{
+  svbool_t p = svpfirst (cmp, svpfalse ());
+  while (svptest_any (cmp, p))
+    {
+      double elem = svclastb_n_f64 (p, 0, x);
+      elem = (*f) (elem);
+      svfloat64_t y2 = svdup_n_f64 (elem);
+      y = svsel_f64 (p, y2, y);
+      p = svpnext_b64 (cmp, p);
+    }
+  return y;
+}
+
+static inline svfloat64_t
+sv_call2_f64 (double (*f) (double, double), svfloat64_t x1, svfloat64_t x2,
+	      svfloat64_t y, svbool_t cmp)
+{
+  svbool_t p = svpfirst (cmp, svpfalse ());
+  while (svptest_any (cmp, p))
+    {
+      double elem1 = svclastb_n_f64 (p, 0, x1);
+      double elem2 = svclastb_n_f64 (p, 0, x2);
+      double ret = (*f) (elem1, elem2);
+      svfloat64_t y2 = svdup_n_f64 (ret);
+      y = svsel_f64 (p, y2, y);
+      p = svpnext_b64 (cmp, p);
+    }
+  return y;
+}
+
+static inline svuint64_t
+sv_mod_n_u64_x (svbool_t pg, svuint64_t x, uint64_t y)
+{
+  svuint64_t q = svdiv_n_u64_x (pg, x, y);
+  return svmls_n_u64_x (pg, x, q, y);
+}
+
+/* Single precision.  */
+static inline svint32_t
+sv_s32 (int32_t x)
+{
+  return svdup_n_s32 (x);
+}
+
+static inline svuint32_t
+sv_u32 (uint32_t x)
+{
+  return svdup_n_u32 (x);
+}
+
+static inline svfloat32_t
+sv_f32 (float x)
+{
+  return svdup_n_f32 (x);
+}
+
+static inline svfloat32_t
+sv_call_f32 (float (*f) (float), svfloat32_t x, svfloat32_t y, svbool_t cmp)
+{
+  svbool_t p = svpfirst (cmp, svpfalse ());
+  while (svptest_any (cmp, p))
+    {
+      float elem = svclastb_n_f32 (p, 0, x);
+      elem = (*f) (elem);
+      svfloat32_t y2 = svdup_n_f32 (elem);
+      y = svsel_f32 (p, y2, y);
+      p = svpnext_b32 (cmp, p);
+    }
+  return y;
+}
+
+static inline svfloat32_t
+sv_call2_f32 (float (*f) (float, float), svfloat32_t x1, svfloat32_t x2,
+	      svfloat32_t y, svbool_t cmp)
+{
+  svbool_t p = svpfirst (cmp, svpfalse ());
+  while (svptest_any (cmp, p))
+    {
+      float elem1 = svclastb_n_f32 (p, 0, x1);
+      float elem2 = svclastb_n_f32 (p, 0, x2);
+      float ret = (*f) (elem1, elem2);
+      svfloat32_t y2 = svdup_n_f32 (ret);
+      y = svsel_f32 (p, y2, y);
+      p = svpnext_b32 (cmp, p);
+    }
+  return y;
+}
+
+#endif
diff --git a/sysdeps/aarch64/fpu/sve_utils.h b/sysdeps/aarch64/fpu/sve_utils.h
deleted file mode 100644
index 5ce3d2e8d6..0000000000
--- a/sysdeps/aarch64/fpu/sve_utils.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Helpers for SVE vector math functions.
-
-   Copyright (C) 2023 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <https://www.gnu.org/licenses/>.  */
-
-#include <arm_sve.h>
-
-#define SV_NAME_F1(fun) _ZGVsMxv_##fun##f
-#define SV_NAME_D1(fun) _ZGVsMxv_##fun
-#define SV_NAME_F2(fun) _ZGVsMxvv_##fun##f
-#define SV_NAME_D2(fun) _ZGVsMxvv_##fun
-
-static __always_inline svfloat32_t
-sv_call_f32 (float (*f) (float), svfloat32_t x, svfloat32_t y, svbool_t cmp)
-{
-  svbool_t p = svpfirst (cmp, svpfalse ());
-  while (svptest_any (cmp, p))
-    {
-      float elem = svclastb_n_f32 (p, 0, x);
-      elem = (*f) (elem);
-      svfloat32_t y2 = svdup_n_f32 (elem);
-      y = svsel_f32 (p, y2, y);
-      p = svpnext_b32 (cmp, p);
-    }
-  return y;
-}
-
-static __always_inline svfloat64_t
-sv_call_f64 (double (*f) (double), svfloat64_t x, svfloat64_t y, svbool_t cmp)
-{
-  svbool_t p = svpfirst (cmp, svpfalse ());
-  while (svptest_any (cmp, p))
-    {
-      double elem = svclastb_n_f64 (p, 0, x);
-      elem = (*f) (elem);
-      svfloat64_t y2 = svdup_n_f64 (elem);
-      y = svsel_f64 (p, y2, y);
-      p = svpnext_b64 (cmp, p);
-    }
-  return y;
-}
diff --git a/sysdeps/aarch64/fpu/v_math.h b/sysdeps/aarch64/fpu/v_math.h
new file mode 100644
index 0000000000..77df815c33
--- /dev/null
+++ b/sysdeps/aarch64/fpu/v_math.h
@@ -0,0 +1,197 @@
+/* Utilities for Advanced SIMD libmvec routines.
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#ifndef _V_MATH_H
+#define _V_MATH_H
+
+#include <arm_neon.h>
+#include "vecmath_config.h"
+
+#define VPCS_ATTR __attribute__ ((aarch64_vector_pcs))
+
+#define V_NAME_F1(fun) _ZGVnN4v_##fun##f
+#define V_NAME_D1(fun) _ZGVnN2v_##fun
+#define V_NAME_F2(fun) _ZGVnN4vv_##fun##f
+#define V_NAME_D2(fun) _ZGVnN2vv_##fun
+
+/* Shorthand helpers for declaring constants.  */
+#define V2(x)                                                                  \
+  {                                                                            \
+    x, x                                                                       \
+  }
+
+#define V4(x)                                                                  \
+  {                                                                            \
+    x, x, x, x                                                                 \
+  }
+
+static inline int
+v_lanes32 (void)
+{
+  return 4;
+}
+
+static inline float32x4_t
+v_f32 (float x)
+{
+  return (float32x4_t) V4 (x);
+}
+static inline uint32x4_t
+v_u32 (uint32_t x)
+{
+  return (uint32x4_t) V4 (x);
+}
+static inline int32x4_t
+v_s32 (int32_t x)
+{
+  return (int32x4_t) V4 (x);
+}
+
+static inline float
+v_get_f32 (float32x4_t x, int i)
+{
+  return x[i];
+}
+static inline uint32_t
+v_get_u32 (uint32x4_t x, int i)
+{
+  return x[i];
+}
+static inline int32_t
+v_get_s32 (int32x4_t x, int i)
+{
+  return x[i];
+}
+
+static inline void
+v_set_f32 (float32x4_t *x, int i, float v)
+{
+  (*x)[i] = v;
+}
+static inline void
+v_set_u32 (uint32x4_t *x, int i, uint32_t v)
+{
+  (*x)[i] = v;
+}
+static inline void
+v_set_s32 (int32x4_t *x, int i, int32_t v)
+{
+  (*x)[i] = v;
+}
+
+/* true if any elements of a vector compare result is non-zero.  */
+static inline int
+v_any_u32 (uint32x4_t x)
+{
+  /* assume elements in x are either 0 or -1u.  */
+  return vpaddd_u64 (vreinterpretq_u64_u32 (x)) != 0;
+}
+static inline float32x4_t
+v_lookup_f32 (const float *tab, uint32x4_t idx)
+{
+  return (float32x4_t){ tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]] };
+}
+static inline uint32x4_t
+v_lookup_u32 (const uint32_t *tab, uint32x4_t idx)
+{
+  return (uint32x4_t){ tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]] };
+}
+static inline float32x4_t
+v_call_f32 (float (*f) (float), float32x4_t x, float32x4_t y, uint32x4_t p)
+{
+  return (float32x4_t){ p[0] ? f (x[0]) : y[0], p[1] ? f (x[1]) : y[1],
+			p[2] ? f (x[2]) : y[2], p[3] ? f (x[3]) : y[3] };
+}
+static inline float32x4_t
+v_call2_f32 (float (*f) (float, float), float32x4_t x1, float32x4_t x2,
+	     float32x4_t y, uint32x4_t p)
+{
+  return (float32x4_t){ p[0] ? f (x1[0], x2[0]) : y[0],
+			p[1] ? f (x1[1], x2[1]) : y[1],
+			p[2] ? f (x1[2], x2[2]) : y[2],
+			p[3] ? f (x1[3], x2[3]) : y[3] };
+}
+
+static inline int
+v_lanes64 (void)
+{
+  return 2;
+}
+static inline float64x2_t
+v_f64 (double x)
+{
+  return (float64x2_t) V2 (x);
+}
+static inline uint64x2_t
+v_u64 (uint64_t x)
+{
+  return (uint64x2_t) V2 (x);
+}
+static inline int64x2_t
+v_s64 (int64_t x)
+{
+  return (int64x2_t) V2 (x);
+}
+static inline double
+v_get_f64 (float64x2_t x, int i)
+{
+  return x[i];
+}
+static inline void
+v_set_f64 (float64x2_t *x, int i, double v)
+{
+  (*x)[i] = v;
+}
+/* true if any elements of a vector compare result is non-zero.  */
+static inline int
+v_any_u64 (uint64x2_t x)
+{
+  /* assume elements in x are either 0 or -1u.  */
+  return vpaddd_u64 (x) != 0;
+}
+/* true if all elements of a vector compare result is 1.  */
+static inline int
+v_all_u64 (uint64x2_t x)
+{
+  /* assume elements in x are either 0 or -1u.  */
+  return vpaddd_s64 (vreinterpretq_s64_u64 (x)) == -2;
+}
+static inline float64x2_t
+v_lookup_f64 (const double *tab, uint64x2_t idx)
+{
+  return (float64x2_t){ tab[idx[0]], tab[idx[1]] };
+}
+static inline uint64x2_t
+v_lookup_u64 (const uint64_t *tab, uint64x2_t idx)
+{
+  return (uint64x2_t){ tab[idx[0]], tab[idx[1]] };
+}
+static inline float64x2_t
+v_call_f64 (double (*f) (double), float64x2_t x, float64x2_t y, uint64x2_t p)
+{
+  return (float64x2_t){ p[0] ? f (x[0]) : y[0], p[1] ? f (x[1]) : y[1] };
+}
+static inline float64x2_t
+v_call2_f64 (double (*f) (double, double), float64x2_t x1, float64x2_t x2,
+	     float64x2_t y, uint64x2_t p)
+{
+  return (float64x2_t){ p[0] ? f (x1[0], x2[0]) : y[0],
+			p[1] ? f (x1[1], x2[1]) : y[1] };
+}
+
+#endif
diff --git a/sysdeps/aarch64/fpu/advsimd_utils.h b/sysdeps/aarch64/fpu/vecmath_config.h
similarity index 57%
rename from sysdeps/aarch64/fpu/advsimd_utils.h
rename to sysdeps/aarch64/fpu/vecmath_config.h
index 8a0fcc0e06..c8f45af63b 100644
--- a/sysdeps/aarch64/fpu/advsimd_utils.h
+++ b/sysdeps/aarch64/fpu/vecmath_config.h
@@ -1,5 +1,4 @@
-/* Helpers for Advanced SIMD vector math functions.
-
+/* Configuration for libmvec routines.
    Copyright (C) 2023 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
@@ -17,23 +16,18 @@
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#include <arm_neon.h>
+#ifndef _VECMATH_CONFIG_H
+#define _VECMATH_CONFIG_H
 
-#define VPCS_ATTR __attribute__ ((aarch64_vector_pcs))
+#include <math.h>
 
-#define V_NAME_F1(fun) _ZGVnN4v_##fun##f
-#define V_NAME_D1(fun) _ZGVnN2v_##fun
-#define V_NAME_F2(fun) _ZGVnN4vv_##fun##f
-#define V_NAME_D2(fun) _ZGVnN2vv_##fun
+#define NOINLINE __attribute__ ((noinline))
+#define likely(x) __glibc_likely (x)
+#define unlikely(x) __glibc_unlikely (x)
 
-static __always_inline float32x4_t
-v_call_f32 (float (*f) (float), float32x4_t x)
-{
-  return (float32x4_t){ f (x[0]), f (x[1]), f (x[2]), f (x[3]) };
-}
+/* Deprecated config option from Arm Optimized Routines which ensures
+   fp exceptions are correctly triggered. This is not intended to be
+   supported in GLIBC, however we keep it for ease of development.  */
+#define WANT_SIMD_EXCEPT 0
 
-static __always_inline float64x2_t
-v_call_f64 (double (*f) (double), float64x2_t x)
-{
-  return (float64x2_t){ f (x[0]), f (x[1]) };
-}
+#endif
diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
index da7c64942c..07da4ab843 100644
--- a/sysdeps/aarch64/libm-test-ulps
+++ b/sysdeps/aarch64/libm-test-ulps
@@ -642,7 +642,7 @@ float: 1
 ldouble: 2
 
 Function: "cos_advsimd":
-double: 1
+double: 2
 float: 1
 
 Function: "cos_downward":
-- 
2.27.0


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 2/4] aarch64: Add vector implementations of sin routines
  2023-06-08 13:39 [PATCH 1/4] aarch64: Add vector implementations of cos routines Joe Ramsay
@ 2023-06-08 13:39 ` Joe Ramsay
  2023-06-13 18:16   ` Adhemerval Zanella Netto
  2023-06-08 13:39 ` [PATCH 3/4] aarch64: Add vector implementations of log routines Joe Ramsay
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 11+ messages in thread
From: Joe Ramsay @ 2023-06-08 13:39 UTC (permalink / raw)
  To: libc-alpha; +Cc: Joe Ramsay

Optimised implementations for single and double precision, Advanced
SIMD and SVE, copied from Arm Optimized Routines. Also allow
certain tests to be skipped for mathvec routines, for example both
AdvSIMD algorithms discard the sign of 0.
---
 math/auto-libm-test-out-sin                   |   4 +-
 math/gen-libm-test.py                         |   3 +-
 sysdeps/aarch64/fpu/Makefile                  |   8 +-
 sysdeps/aarch64/fpu/Versions                  |   4 +
 sysdeps/aarch64/fpu/bits/math-vector.h        |   6 ++
 sysdeps/aarch64/fpu/sin_advsimd.c             | 100 ++++++++++++++++++
 sysdeps/aarch64/fpu/sin_sve.c                 |  96 +++++++++++++++++
 sysdeps/aarch64/fpu/sinf_advsimd.c            |  93 ++++++++++++++++
 sysdeps/aarch64/fpu/sinf_sve.c                |  92 ++++++++++++++++
 sysdeps/aarch64/fpu/sv_horner_wrap.h          |  55 ++++++++++
 sysdeps/aarch64/fpu/sv_hornerf.h              |  24 +++++
 .../fpu/test-double-advsimd-wrappers.c        |   1 +
 .../aarch64/fpu/test-double-sve-wrappers.c    |   1 +
 .../aarch64/fpu/test-float-advsimd-wrappers.c |   1 +
 sysdeps/aarch64/fpu/test-float-sve-wrappers.c |   1 +
 sysdeps/aarch64/libm-test-ulps                |   8 ++
 .../unix/sysv/linux/aarch64/libmvec.abilist   |   4 +
 17 files changed, 494 insertions(+), 7 deletions(-)
 create mode 100644 sysdeps/aarch64/fpu/sin_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/sin_sve.c
 create mode 100644 sysdeps/aarch64/fpu/sinf_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/sinf_sve.c
 create mode 100644 sysdeps/aarch64/fpu/sv_horner_wrap.h
 create mode 100644 sysdeps/aarch64/fpu/sv_hornerf.h

diff --git a/math/auto-libm-test-out-sin b/math/auto-libm-test-out-sin
index f1d21b179c..27ccaff1aa 100644
--- a/math/auto-libm-test-out-sin
+++ b/math/auto-libm-test-out-sin
@@ -25,11 +25,11 @@ sin 0
 = sin upward ibm128 0x0p+0 : 0x0p+0 : inexact-ok
 sin -0
 = sin downward binary32 -0x0p+0 : -0x0p+0 : inexact-ok
-= sin tonearest binary32 -0x0p+0 : -0x0p+0 : inexact-ok
+= sin tonearest binary32 -0x0p+0 : -0x0p+0 : inexact-ok no-mathvec
 = sin towardzero binary32 -0x0p+0 : -0x0p+0 : inexact-ok
 = sin upward binary32 -0x0p+0 : -0x0p+0 : inexact-ok
 = sin downward binary64 -0x0p+0 : -0x0p+0 : inexact-ok
-= sin tonearest binary64 -0x0p+0 : -0x0p+0 : inexact-ok
+= sin tonearest binary64 -0x0p+0 : -0x0p+0 : inexact-ok no-mathvec
 = sin towardzero binary64 -0x0p+0 : -0x0p+0 : inexact-ok
 = sin upward binary64 -0x0p+0 : -0x0p+0 : inexact-ok
 = sin downward intel96 -0x0p+0 : -0x0p+0 : inexact-ok
diff --git a/math/gen-libm-test.py b/math/gen-libm-test.py
index 6ae78beb01..a573c3b8cb 100755
--- a/math/gen-libm-test.py
+++ b/math/gen-libm-test.py
@@ -93,7 +93,8 @@ BEAUTIFY_MAP = {'minus_zero': '-0',
 
 # Flags in auto-libm-test-out that map directly to C flags.
 FLAGS_SIMPLE = {'ignore-zero-inf-sign': 'IGNORE_ZERO_INF_SIGN',
-                'xfail': 'XFAIL_TEST'}
+                'xfail': 'XFAIL_TEST',
+                'no-mathvec': 'NO_TEST_MATHVEC'}
 
 # Exceptions in auto-libm-test-out, and their corresponding C flags
 # for being required, OK or required to be absent.
diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile
index 850cfb9012..b3285542ea 100644
--- a/sysdeps/aarch64/fpu/Makefile
+++ b/sysdeps/aarch64/fpu/Makefile
@@ -1,10 +1,10 @@
-float-advsimd-funcs = cos
+float-advsimd-funcs = cos sin
 
-double-advsimd-funcs = cos
+double-advsimd-funcs = cos sin
 
-float-sve-funcs = cos
+float-sve-funcs = cos sin
 
-double-sve-funcs = cos
+double-sve-funcs = cos sin
 
 ifeq ($(subdir),mathvec)
 libmvec-support = $(addsuffix f_advsimd,$(float-advsimd-funcs)) \
diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions
index 5222a6f180..d26b3968a9 100644
--- a/sysdeps/aarch64/fpu/Versions
+++ b/sysdeps/aarch64/fpu/Versions
@@ -1,8 +1,12 @@
 libmvec {
   GLIBC_2.38 {
     _ZGVnN2v_cos;
+    _ZGVnN2v_sin;
     _ZGVnN4v_cosf;
+    _ZGVnN4v_sinf;
     _ZGVsMxv_cos;
     _ZGVsMxv_cosf;
+    _ZGVsMxv_sin;
+    _ZGVsMxv_sinf;
   }
 }
diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h
index a2f2277591..ad9c9945e8 100644
--- a/sysdeps/aarch64/fpu/bits/math-vector.h
+++ b/sysdeps/aarch64/fpu/bits/math-vector.h
@@ -50,7 +50,10 @@ typedef __SVBool_t __sv_bool_t;
 #  define __vpcs __attribute__ ((__aarch64_vector_pcs__))
 
 __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
+__vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
+
 __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
+__vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
 
 #  undef __ADVSIMD_VEC_MATH_SUPPORTED
 #endif /* __ADVSIMD_VEC_MATH_SUPPORTED */
@@ -58,7 +61,10 @@ __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
 #ifdef __SVE_VEC_MATH_SUPPORTED
 
 __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
+__sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
+
 __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
+__sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
 
 #  undef __SVE_VEC_MATH_SUPPORTED
 #endif /* __SVE_VEC_MATH_SUPPORTED */
diff --git a/sysdeps/aarch64/fpu/sin_advsimd.c b/sysdeps/aarch64/fpu/sin_advsimd.c
new file mode 100644
index 0000000000..1206a5d760
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sin_advsimd.c
@@ -0,0 +1,100 @@
+/* Double-precision vector (Advanced SIMD) sin function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+
+static const volatile struct
+{
+  float64x2_t poly[7];
+  float64x2_t range_val, inv_pi, shift, pi_1, pi_2, pi_3;
+} data = {
+  /* Worst-case error is 2.8 ulp in [-pi/2, pi/2].  */
+  .poly = { V2 (-0x1.555555555547bp-3), V2 (0x1.1111111108a4dp-7),
+	    V2 (-0x1.a01a019936f27p-13), V2 (0x1.71de37a97d93ep-19),
+	    V2 (-0x1.ae633919987c6p-26), V2 (0x1.60e277ae07cecp-33),
+	    V2 (-0x1.9e9540300a1p-41) },
+
+  .range_val = V2 (0x1p23),
+  .inv_pi = V2 (0x1.45f306dc9c883p-2),
+  .pi_1 = V2 (0x1.921fb54442d18p+1),
+  .pi_2 = V2 (0x1.1a62633145c06p-53),
+  .pi_3 = V2 (0x1.c1cd129024e09p-106),
+  .shift = V2 (0x1.8p52),
+};
+
+#if WANT_SIMD_EXCEPT
+# define TinyBound v_u64 (0x3000000000000000) /* asuint64 (0x1p-255).  */
+# define Thresh v_u64 (0x1160000000000000)    /* RangeVal - TinyBound.  */
+#endif
+
+#define C(i) data.poly[i]
+
+static float64x2_t VPCS_ATTR NOINLINE
+special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp)
+{
+  y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
+  return v_call_f64 (sin, x, y, cmp);
+}
+
+float64x2_t VPCS_ATTR V_NAME_D1 (sin) (float64x2_t x)
+{
+  float64x2_t n, r, r2, r3, r4, y, t1, t2, t3;
+  uint64x2_t odd, cmp;
+
+#if WANT_SIMD_EXCEPT
+  /* Detect |x| <= TinyBound or |x| >= RangeVal. If fenv exceptions are to be
+     triggered correctly, set any special lanes to 1 (which is neutral w.r.t.
+     fenv). These lanes will be fixed by special-case handler later.  */
+  uint64x2_t ir = vreinterpretq_u64_f64 (vabsq_f64 (x));
+  cmp = vcgeq_u64 (vsubq_u64 (ir, TinyBound), Thresh);
+  r = vbslq_f64 (cmp, vreinterpretq_f64_u64 (cmp), x);
+#else
+  r = x;
+  cmp = vcageq_f64 (data.range_val, x);
+  cmp = vceqzq_u64 (cmp); /* cmp = ~cmp.  */
+#endif
+
+  /* n = rint(|x|/pi).  */
+  n = vfmaq_f64 (data.shift, data.inv_pi, r);
+  odd = vshlq_n_u64 (vreinterpretq_u64_f64 (n), 63);
+  n = vsubq_f64 (n, data.shift);
+
+  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
+  r = vfmsq_f64 (r, data.pi_1, n);
+  r = vfmsq_f64 (r, data.pi_2, n);
+  r = vfmsq_f64 (r, data.pi_3, n);
+
+  /* sin(r) poly approx.  */
+  r2 = vmulq_f64 (r, r);
+  r3 = vmulq_f64 (r2, r);
+  r4 = vmulq_f64 (r2, r2);
+
+  t1 = vfmaq_f64 (C (4), C (5), r2);
+  t2 = vfmaq_f64 (C (2), C (3), r2);
+  t3 = vfmaq_f64 (C (0), C (1), r2);
+
+  y = vfmaq_f64 (t1, C (6), r4);
+  y = vfmaq_f64 (t2, y, r4);
+  y = vfmaq_f64 (t3, y, r4);
+  y = vfmaq_f64 (r, y, r3);
+
+  if (unlikely (v_any_u64 (cmp)))
+    return special_case (x, y, odd, cmp);
+  return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
+}
diff --git a/sysdeps/aarch64/fpu/sin_sve.c b/sysdeps/aarch64/fpu/sin_sve.c
new file mode 100644
index 0000000000..3750700759
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sin_sve.c
@@ -0,0 +1,96 @@
+/* Double-precision vector (SVE) sin function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+
+static struct
+{
+  double inv_pi, half_pi, inv_pi_over_2, pi_over_2_1, pi_over_2_2, pi_over_2_3,
+      shift;
+} data = {
+  /* Polynomial coefficients are hard-wired in the FTMAD instruction.  */
+  .inv_pi = 0x1.45f306dc9c883p-2,
+  .half_pi = 0x1.921fb54442d18p+0,
+  .inv_pi_over_2 = 0x1.45f306dc9c882p-1,
+  .pi_over_2_1 = 0x1.921fb50000000p+0,
+  .pi_over_2_2 = 0x1.110b460000000p-26,
+  .pi_over_2_3 = 0x1.1a62633145c07p-54,
+  .shift = 0x1.8p52
+};
+
+#define RangeVal 0x4160000000000000 /* asuint64 (0x1p23).  */
+
+static svfloat64_t NOINLINE
+special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
+{
+  return sv_call_f64 (sin, x, y, cmp);
+}
+
+/* A fast SVE implementation of sin based on trigonometric
+   instructions (FTMAD, FTSSEL, FTSMUL).
+   Maximum observed error in 2.52 ULP:
+   SV_NAME_D1 (sin)(0x1.2d2b00df69661p+19) got 0x1.10ace8f3e786bp-40
+					  want 0x1.10ace8f3e7868p-40.  */
+svfloat64_t SV_NAME_D1 (sin) (svfloat64_t x, const svbool_t pg)
+{
+  svfloat64_t r = svabs_f64_x (pg, x);
+  svuint64_t sign
+      = sveor_u64_x (pg, svreinterpret_u64_f64 (x), svreinterpret_u64_f64 (r));
+  svbool_t cmp = svcmpge_n_u64 (pg, svreinterpret_u64_f64 (r), RangeVal);
+
+  /* Load first two pio2-related constants to one vector.  */
+  svfloat64_t invpio2_and_pio2_1
+      = svld1rq_f64 (svptrue_b64 (), &data.inv_pi_over_2);
+
+  /* n = rint(|x|/(pi/2)).  */
+  svfloat64_t q
+      = svmla_lane_f64 (sv_f64 (data.shift), r, invpio2_and_pio2_1, 0);
+  svfloat64_t n = svsub_n_f64_x (pg, q, data.shift);
+
+  /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
+  r = svmls_lane_f64 (r, n, invpio2_and_pio2_1, 1);
+  r = svmls_n_f64_x (pg, r, n, data.pi_over_2_2);
+  r = svmls_n_f64_x (pg, r, n, data.pi_over_2_3);
+
+  /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
+  svfloat64_t f = svtssel_f64 (r, svreinterpret_u64_f64 (q));
+
+  /* sin(r) poly approx.  */
+  svfloat64_t r2 = svtsmul_f64 (r, svreinterpret_u64_f64 (q));
+  svfloat64_t y = sv_f64 (0.0);
+  y = svtmad_f64 (y, r2, 7);
+  y = svtmad_f64 (y, r2, 6);
+  y = svtmad_f64 (y, r2, 5);
+  y = svtmad_f64 (y, r2, 4);
+  y = svtmad_f64 (y, r2, 3);
+  y = svtmad_f64 (y, r2, 2);
+  y = svtmad_f64 (y, r2, 1);
+  y = svtmad_f64 (y, r2, 0);
+
+  /* Apply factor.  */
+  y = svmul_f64_x (pg, f, y);
+
+  /* sign = y^sign.  */
+  y = svreinterpret_f64_u64 (
+      sveor_u64_x (pg, svreinterpret_u64_f64 (y), sign));
+
+  if (unlikely (svptest_any (pg, cmp)))
+    return special_case (x, y, cmp);
+  return y;
+}
diff --git a/sysdeps/aarch64/fpu/sinf_advsimd.c b/sysdeps/aarch64/fpu/sinf_advsimd.c
new file mode 100644
index 0000000000..6267594000
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sinf_advsimd.c
@@ -0,0 +1,93 @@
+/* Single-precision vector (Advanced SIMD) sin function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+
+static const volatile struct
+{
+  float32x4_t poly[4];
+  float32x4_t range_val, inv_pi, shift, pi_1, pi_2, pi_3;
+} data = {
+  /* 1.886 ulp error.  */
+  .poly = { V4 (-0x1.555548p-3f), V4 (0x1.110df4p-7f), V4 (-0x1.9f42eap-13f),
+	    V4 (0x1.5b2e76p-19f) },
+
+  .pi_1 = V4 (0x1.921fb6p+1f),
+  .pi_2 = V4 (-0x1.777a5cp-24f),
+  .pi_3 = V4 (-0x1.ee59dap-49f),
+
+  .inv_pi = V4 (0x1.45f306p-2f),
+  .shift = V4 (0x1.8p+23f),
+  .range_val = V4 (0x1p20f)
+};
+
+#if WANT_SIMD_EXCEPT
+# define TinyBound v_u32 (0x21000000) /* asuint32(0x1p-61f).  */
+# define Thresh v_u32 (0x28800000)    /* RangeVal - TinyBound.  */
+#endif
+
+#define C(i) data.poly[i]
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp)
+{
+  /* Fall back to scalar code.  */
+  y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
+  return v_call_f32 (sinf, x, y, cmp);
+}
+
+float32x4_t VPCS_ATTR V_NAME_F1 (sin) (float32x4_t x)
+{
+  float32x4_t n, r, r2, y;
+  uint32x4_t odd, cmp;
+
+#if WANT_SIMD_EXCEPT
+  uint32x4_t ir = vreinterpretq_u32_f32 (vabsq_f32 (x));
+  cmp = vcgeq_u32 (vsubq_u32 (ir, TinyBound), Thresh);
+  /* If fenv exceptions are to be triggered correctly, set any special lanes
+     to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
+     special-case handler later.  */
+  r = vbslq_f32 (cmp, vreinterpretq_f32_u32 (cmp), x);
+#else
+  r = x;
+  cmp = vcageq_f32 (data.range_val, x);
+  cmp = vceqzq_u32 (cmp); /* cmp = ~cmp.  */
+#endif
+
+  /* n = rint(|x|/pi) */
+  n = vfmaq_f32 (data.shift, data.inv_pi, r);
+  odd = vshlq_n_u32 (vreinterpretq_u32_f32 (n), 31);
+  n = vsubq_f32 (n, data.shift);
+
+  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2) */
+  r = vfmsq_f32 (r, data.pi_1, n);
+  r = vfmsq_f32 (r, data.pi_2, n);
+  r = vfmsq_f32 (r, data.pi_3, n);
+
+  /* y = sin(r) */
+  r2 = vmulq_f32 (r, r);
+  y = vfmaq_f32 (C (2), C (3), r2);
+  y = vfmaq_f32 (C (1), y, r2);
+  y = vfmaq_f32 (C (0), y, r2);
+  y = vfmaq_f32 (r, vmulq_f32 (y, r2), r);
+
+  if (unlikely (v_any_u32 (cmp)))
+    return special_case (x, y, odd, cmp);
+  return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
+}
diff --git a/sysdeps/aarch64/fpu/sinf_sve.c b/sysdeps/aarch64/fpu/sinf_sve.c
new file mode 100644
index 0000000000..4159d90534
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sinf_sve.c
@@ -0,0 +1,92 @@
+/* Single-precision vector (SVE) sin function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+#include "sv_hornerf.h"
+
+static struct
+{
+  float poly[4];
+  /* Pi-related values to be loaded as one quad-word and used with
+     svmla_lane_f32.  */
+  float negpi1, negpi2, negpi3, invpi;
+  float shift;
+} data = {
+  .poly = {
+    /* Non-zero coefficients from the degree 9 Taylor series expansion of
+       sin.  */
+    -0x1.555548p-3f, 0x1.110df4p-7f, -0x1.9f42eap-13f, 0x1.5b2e76p-19f
+  },
+  .negpi1 = -0x1.921fb6p+1f,
+  .negpi2 = 0x1.777a5cp-24f,
+  .negpi3 = 0x1.ee59dap-49f,
+  .invpi = 0x1.45f306p-2f,
+  .shift = 0x1.8p+23f
+};
+
+#define RangeVal 0x49800000 /* asuint32 (0x1p20f).  */
+#define C(i) data.poly[i]
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t cmp)
+{
+  return sv_call_f32 (sinf, x, y, cmp);
+}
+
+/* A fast SVE implementation of sinf.
+   Maximum error: 1.89 ULPs.
+   This maximum error is achieved at multiple values in [-2^18, 2^18]
+   but one example is:
+   SV_NAME_F1 (sin)(0x1.9247a4p+0) got 0x1.fffff6p-1 want 0x1.fffffap-1.  */
+svfloat32_t SV_NAME_F1 (sin) (svfloat32_t x, const svbool_t pg)
+{
+  svfloat32_t ax = svabs_f32_x (pg, x);
+  svuint32_t sign = sveor_u32_x (pg, svreinterpret_u32_f32 (x),
+				 svreinterpret_u32_f32 (ax));
+  svbool_t cmp = svcmpge_n_u32 (pg, svreinterpret_u32_f32 (ax), RangeVal);
+
+  /* pi_vals are a quad-word of helper values - the first 3 elements contain
+     -pi in extended precision, the last contains 1 / pi.  */
+  svfloat32_t pi_vals = svld1rq_f32 (svptrue_b32 (), &data.negpi1);
+
+  /* n = rint(|x|/pi).  */
+  svfloat32_t n = svmla_lane_f32 (sv_f32 (data.shift), ax, pi_vals, 3);
+  svuint32_t odd = svlsl_n_u32_x (pg, svreinterpret_u32_f32 (n), 31);
+  n = svsub_n_f32_x (pg, n, data.shift);
+
+  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
+  svfloat32_t r;
+  r = svmla_lane_f32 (ax, n, pi_vals, 0);
+  r = svmla_lane_f32 (r, n, pi_vals, 1);
+  r = svmla_lane_f32 (r, n, pi_vals, 2);
+
+  /* sin(r) approx using a degree 9 polynomial from the Taylor series
+     expansion. Note that only the odd terms of this are non-zero.  */
+  svfloat32_t r2 = svmul_f32_x (pg, r, r);
+  svfloat32_t y = HORNER_3 (pg, r2, C);
+  y = svmla_f32_x (pg, r, r, svmul_f32_x (pg, y, r2));
+
+  /* sign = y^sign^odd.  */
+  y = svreinterpret_f32_u32 (sveor_u32_x (pg, svreinterpret_u32_f32 (y),
+					  sveor_u32_x (pg, sign, odd)));
+
+  if (unlikely (svptest_any (pg, cmp)))
+    return special_case (x, y, cmp);
+  return y;
+}
diff --git a/sysdeps/aarch64/fpu/sv_horner_wrap.h b/sysdeps/aarch64/fpu/sv_horner_wrap.h
new file mode 100644
index 0000000000..142a06d5c4
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sv_horner_wrap.h
@@ -0,0 +1,55 @@
+/* Helper macros for Horner polynomial evaluation in SVE routines.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define HORNER_1_(pg, x, c, i) FMA (pg, VECTOR (c (i + 1)), x, VECTOR (c (i)))
+#define HORNER_2_(pg, x, c, i)                                                \
+  FMA (pg, HORNER_1_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_3_(pg, x, c, i)                                                \
+  FMA (pg, HORNER_2_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_4_(pg, x, c, i)                                                \
+  FMA (pg, HORNER_3_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_5_(pg, x, c, i)                                                \
+  FMA (pg, HORNER_4_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_6_(pg, x, c, i)                                                \
+  FMA (pg, HORNER_5_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_7_(pg, x, c, i)                                                \
+  FMA (pg, HORNER_6_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_8_(pg, x, c, i)                                                \
+  FMA (pg, HORNER_7_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_9_(pg, x, c, i)                                                \
+  FMA (pg, HORNER_8_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_10_(pg, x, c, i)                                               \
+  FMA (pg, HORNER_9_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_11_(pg, x, c, i)                                               \
+  FMA (pg, HORNER_10_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+#define HORNER_12_(pg, x, c, i)                                               \
+  FMA (pg, HORNER_11_ (pg, x, c, i + 1), x, VECTOR (c (i)))
+
+#define HORNER_1(pg, x, c) HORNER_1_ (pg, x, c, 0)
+#define HORNER_2(pg, x, c) HORNER_2_ (pg, x, c, 0)
+#define HORNER_3(pg, x, c) HORNER_3_ (pg, x, c, 0)
+#define HORNER_4(pg, x, c) HORNER_4_ (pg, x, c, 0)
+#define HORNER_5(pg, x, c) HORNER_5_ (pg, x, c, 0)
+#define HORNER_6(pg, x, c) HORNER_6_ (pg, x, c, 0)
+#define HORNER_7(pg, x, c) HORNER_7_ (pg, x, c, 0)
+#define HORNER_8(pg, x, c) HORNER_8_ (pg, x, c, 0)
+#define HORNER_9(pg, x, c) HORNER_9_ (pg, x, c, 0)
+#define HORNER_10(pg, x, c) HORNER_10_ (pg, x, c, 0)
+#define HORNER_11(pg, x, c) HORNER_11_ (pg, x, c, 0)
+#define HORNER_12(pg, x, c) HORNER_12_ (pg, x, c, 0)
diff --git a/sysdeps/aarch64/fpu/sv_hornerf.h b/sysdeps/aarch64/fpu/sv_hornerf.h
new file mode 100644
index 0000000000..146c117019
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sv_hornerf.h
@@ -0,0 +1,24 @@
+/* Helper macros for single-precision Horner polynomial evaluation
+   in SVE routines.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define FMA(pg, x, y, z) svmla_f32_x (pg, z, x, y)
+#define VECTOR sv_f32
+
+#include "sv_horner_wrap.h"
diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
index cb45fd3298..4af97a25a2 100644
--- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
@@ -24,3 +24,4 @@
 #define VEC_TYPE float64x2_t
 
 VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
+VPCS_VECTOR_WRAPPER (sin_advsimd, _ZGVnN2v_sin)
diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
index cf72ef83b7..64c790adc5 100644
--- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
@@ -33,3 +33,4 @@
   }
 
 SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
+SVE_VECTOR_WRAPPER (sin_sve, _ZGVsMxv_sin)
diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
index fa146862b0..50e776b952 100644
--- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
@@ -24,3 +24,4 @@
 #define VEC_TYPE float32x4_t
 
 VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
+VPCS_VECTOR_WRAPPER (sinf_advsimd, _ZGVnN4v_sinf)
diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
index bc26558c62..7355032929 100644
--- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
@@ -33,3 +33,4 @@
   }
 
 SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
+SVE_VECTOR_WRAPPER (sinf_sve, _ZGVsMxv_sinf)
diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
index 07da4ab843..4145662b2d 100644
--- a/sysdeps/aarch64/libm-test-ulps
+++ b/sysdeps/aarch64/libm-test-ulps
@@ -1257,11 +1257,19 @@ double: 1
 float: 1
 ldouble: 2
 
+Function: "sin_advsimd":
+double: 2
+float: 1
+
 Function: "sin_downward":
 double: 1
 float: 1
 ldouble: 3
 
+Function: "sin_sve":
+double: 2
+float: 1
+
 Function: "sin_towardzero":
 double: 1
 float: 1
diff --git a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
index 13af421af2..a4c564859c 100644
--- a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
+++ b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
@@ -1,4 +1,8 @@
 GLIBC_2.38 _ZGVnN2v_cos F
+GLIBC_2.38 _ZGVnN2v_sin F
 GLIBC_2.38 _ZGVnN4v_cosf F
+GLIBC_2.38 _ZGVnN4v_sinf F
 GLIBC_2.38 _ZGVsMxv_cos F
 GLIBC_2.38 _ZGVsMxv_cosf F
+GLIBC_2.38 _ZGVsMxv_sin F
+GLIBC_2.38 _ZGVsMxv_sinf F
-- 
2.27.0


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 3/4] aarch64: Add vector implementations of log routines
  2023-06-08 13:39 [PATCH 1/4] aarch64: Add vector implementations of cos routines Joe Ramsay
  2023-06-08 13:39 ` [PATCH 2/4] aarch64: Add vector implementations of sin routines Joe Ramsay
@ 2023-06-08 13:39 ` Joe Ramsay
  2023-06-14 13:27   ` Adhemerval Zanella Netto
  2023-06-08 13:39 ` [PATCH 4/4] aarch64: Add vector implementations of exp routines Joe Ramsay
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 11+ messages in thread
From: Joe Ramsay @ 2023-06-08 13:39 UTC (permalink / raw)
  To: libc-alpha; +Cc: Joe Ramsay

Optimised implementations for single and double precision, Advanced
SIMD and SVE, copied from Arm Optimized Routines. Log lookup table
added as HIDDEN symbol to allow it to be shared between AdvSIMD and
SVE variants.
---
 sysdeps/aarch64/fpu/Makefile                  |  11 +-
 sysdeps/aarch64/fpu/Versions                  |   4 +
 sysdeps/aarch64/fpu/bits/math-vector.h        |   4 +
 sysdeps/aarch64/fpu/log_advsimd.c             | 104 +++++++++++
 sysdeps/aarch64/fpu/log_sve.c                 |  80 ++++++++
 sysdeps/aarch64/fpu/logf_advsimd.c            |  80 ++++++++
 sysdeps/aarch64/fpu/logf_sve.c                |  85 +++++++++
 .../fpu/test-double-advsimd-wrappers.c        |   1 +
 .../aarch64/fpu/test-double-sve-wrappers.c    |   1 +
 .../aarch64/fpu/test-float-advsimd-wrappers.c |   1 +
 sysdeps/aarch64/fpu/test-float-sve-wrappers.c |   1 +
 sysdeps/aarch64/fpu/v_log_data.c              | 173 ++++++++++++++++++
 sysdeps/aarch64/fpu/vecmath_config.h          |  11 ++
 sysdeps/aarch64/libm-test-ulps                |   8 +
 .../unix/sysv/linux/aarch64/libmvec.abilist   |   4 +
 15 files changed, 563 insertions(+), 5 deletions(-)
 create mode 100644 sysdeps/aarch64/fpu/log_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/log_sve.c
 create mode 100644 sysdeps/aarch64/fpu/logf_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/logf_sve.c
 create mode 100644 sysdeps/aarch64/fpu/v_log_data.c

diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile
index b3285542ea..3f9cd2d000 100644
--- a/sysdeps/aarch64/fpu/Makefile
+++ b/sysdeps/aarch64/fpu/Makefile
@@ -1,16 +1,17 @@
-float-advsimd-funcs = cos sin
+float-advsimd-funcs = cos sin log
 
-double-advsimd-funcs = cos sin
+double-advsimd-funcs = cos sin log
 
-float-sve-funcs = cos sin
+float-sve-funcs = cos sin log
 
-double-sve-funcs = cos sin
+double-sve-funcs = cos sin log
 
 ifeq ($(subdir),mathvec)
 libmvec-support = $(addsuffix f_advsimd,$(float-advsimd-funcs)) \
                   $(addsuffix _advsimd,$(double-advsimd-funcs)) \
                   $(addsuffix f_sve,$(float-sve-funcs)) \
-                  $(addsuffix _sve,$(double-sve-funcs))
+                  $(addsuffix _sve,$(double-sve-funcs)) \
+                  v_log_data
 endif
 
 sve-cflags = -march=armv8-a+sve
diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions
index d26b3968a9..902446f40d 100644
--- a/sysdeps/aarch64/fpu/Versions
+++ b/sysdeps/aarch64/fpu/Versions
@@ -1,11 +1,15 @@
 libmvec {
   GLIBC_2.38 {
     _ZGVnN2v_cos;
+    _ZGVnN2v_log;
     _ZGVnN2v_sin;
     _ZGVnN4v_cosf;
+    _ZGVnN4v_logf;
     _ZGVnN4v_sinf;
     _ZGVsMxv_cos;
     _ZGVsMxv_cosf;
+    _ZGVsMxv_log;
+    _ZGVsMxv_logf;
     _ZGVsMxv_sin;
     _ZGVsMxv_sinf;
   }
diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h
index ad9c9945e8..70c737338e 100644
--- a/sysdeps/aarch64/fpu/bits/math-vector.h
+++ b/sysdeps/aarch64/fpu/bits/math-vector.h
@@ -50,9 +50,11 @@ typedef __SVBool_t __sv_bool_t;
 #  define __vpcs __attribute__ ((__aarch64_vector_pcs__))
 
 __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
+__vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
 
 __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
+__vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
 
 #  undef __ADVSIMD_VEC_MATH_SUPPORTED
@@ -61,9 +63,11 @@ __vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
 #ifdef __SVE_VEC_MATH_SUPPORTED
 
 __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
+__sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
 
 __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
+__sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
 
 #  undef __SVE_VEC_MATH_SUPPORTED
diff --git a/sysdeps/aarch64/fpu/log_advsimd.c b/sysdeps/aarch64/fpu/log_advsimd.c
new file mode 100644
index 0000000000..b8f10efe35
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log_advsimd.c
@@ -0,0 +1,104 @@
+/* Double-precision vector (Advanced SIMD) log function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+
+static const volatile struct
+{
+  float64x2_t poly[5];
+  float64x2_t ln2;
+  uint64x2_t min_norm, special_bound, sign_exp_mask;
+} data = {
+  /* Worst-case error: 1.17 + 0.5 ulp.
+     Rel error: 0x1.6272e588p-56 in [ -0x1.fc1p-9 0x1.009p-8 ].  */
+  .poly = { V2 (-0x1.ffffffffffff7p-2), V2 (0x1.55555555170d4p-2),
+	    V2 (-0x1.0000000399c27p-2), V2 (0x1.999b2e90e94cap-3),
+	    V2 (-0x1.554e550bd501ep-3) },
+  .ln2 = V2 (0x1.62e42fefa39efp-1),
+  .min_norm = V2 (0x0010000000000000),
+  .special_bound = V2 (0x7fe0000000000000), /* asuint64(inf) - min_norm.  */
+  .sign_exp_mask = V2 (0xfff0000000000000)
+};
+
+#define A(i) data.poly[i]
+#define N (1 << V_LOG_TABLE_BITS)
+#define IndexMask (N - 1)
+#define Off v_u64 (0x3fe6900900000000)
+
+struct entry
+{
+  float64x2_t invc;
+  float64x2_t logc;
+};
+
+static inline struct entry
+lookup (uint64x2_t i)
+{
+  /* Since N is a power of 2, n % N = n & (N - 1).  */
+  struct entry e;
+  e.invc[0] = __v_log_data.invc[i[0] & IndexMask];
+  e.logc[0] = __v_log_data.logc[i[0] & IndexMask];
+  e.invc[1] = __v_log_data.invc[i[1] & IndexMask];
+  e.logc[1] = __v_log_data.logc[i[1] & IndexMask];
+  return e;
+}
+
+static float64x2_t VPCS_ATTR NOINLINE
+special_case (float64x2_t x, float64x2_t y, uint64x2_t cmp)
+{
+  return v_call_f64 (log, x, y, cmp);
+}
+
+float64x2_t VPCS_ATTR V_NAME_D1 (log) (float64x2_t x)
+{
+  float64x2_t z, r, r2, p, y, kd, hi;
+  uint64x2_t ix, iz, tmp, cmp;
+  int64x2_t k;
+  struct entry e;
+
+  ix = vreinterpretq_u64_f64 (x);
+  cmp = vcgeq_u64 (vsubq_u64 (ix, data.min_norm), data.special_bound);
+
+  /* x = 2^k z; where z is in range [Off,2*Off) and exact.
+     The range is split into N subintervals.
+     The ith subinterval contains z and c is near its center.  */
+  tmp = vsubq_u64 (ix, Off);
+  k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); /* arithmetic shift.  */
+  iz = vsubq_u64 (ix, vandq_u64 (tmp, data.sign_exp_mask));
+  z = vreinterpretq_f64_u64 (iz);
+  e = lookup (vshrq_n_u64 (tmp, 52 - V_LOG_TABLE_BITS));
+
+  /* log(x) = log1p(z/c-1) + log(c) + k*Ln2.  */
+  r = vfmaq_f64 (v_f64 (-1.0), z, e.invc);
+  kd = vcvtq_f64_s64 (k);
+
+  /* hi = r + log(c) + k*Ln2.  */
+  hi = vfmaq_f64 (vaddq_f64 (e.logc, r), kd, data.ln2);
+  /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi.  */
+  r2 = vmulq_f64 (r, r);
+  y = vfmaq_f64 (A (2), A (3), r);
+  p = vfmaq_f64 (A (0), A (1), r);
+  y = vfmaq_f64 (y, A (4), r2);
+  y = vfmaq_f64 (p, y, r2);
+  y = vfmaq_f64 (hi, y, r2);
+
+  if (unlikely (v_any_u64 (cmp)))
+    return special_case (x, y, cmp);
+  return y;
+}
diff --git a/sysdeps/aarch64/fpu/log_sve.c b/sysdeps/aarch64/fpu/log_sve.c
new file mode 100644
index 0000000000..eedc5679e7
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log_sve.c
@@ -0,0 +1,80 @@
+/* Double-precision vector (SVE) log function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+
+#define P(i) sv_f64 (__v_log_data.poly[i])
+#define N (1 << V_LOG_TABLE_BITS)
+#define Off (0x3fe6900900000000)
+#define MaxTop (0x7ff)
+#define MinTop (0x001)
+#define ThreshTop (0x7fe) /* MaxTop - MinTop.  */
+
+static svfloat64_t NOINLINE
+special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
+{
+  return sv_call_f64 (log, x, y, cmp);
+}
+
+/* SVE port of AdvSIMD log algorithm.
+   Maximum measured error is 2.17 ulp:
+   SV_NAME_D1 (log)(0x1.a6129884398a3p+0) got 0x1.ffffff1cca043p-2
+					 want 0x1.ffffff1cca045p-2.  */
+svfloat64_t SV_NAME_D1 (log) (svfloat64_t x, const svbool_t pg)
+{
+  svuint64_t ix = svreinterpret_u64_f64 (x);
+  svuint64_t top = svlsr_n_u64_x (pg, ix, 52);
+  svbool_t cmp
+      = svcmpge_u64 (pg, svsub_n_u64_x (pg, top, MinTop), sv_u64 (ThreshTop));
+
+  /* x = 2^k z; where z is in range [Off,2*Off) and exact.
+     The range is split into N subintervals.
+     The ith subinterval contains z and c is near its center.  */
+  svuint64_t tmp = svsub_n_u64_x (pg, ix, Off);
+  /* Equivalent to (tmp >> (52 - V_LOG_TABLE_BITS)) % N, since N is a power
+     of 2.  */
+  svuint64_t i = svand_n_u64_x (
+      pg, svlsr_n_u64_x (pg, tmp, (52 - V_LOG_TABLE_BITS)), N - 1);
+  svint64_t k = svasr_n_s64_x (pg, svreinterpret_s64_u64 (tmp),
+			       52); /* Arithmetic shift.  */
+  svuint64_t iz
+      = svsub_u64_x (pg, ix, svand_n_u64_x (pg, tmp, 0xfffULL << 52));
+  svfloat64_t z = svreinterpret_f64_u64 (iz);
+  /* Lookup in 2 global lists (length N).  */
+  svfloat64_t invc = svld1_gather_u64index_f64 (pg, __v_log_data.invc, i);
+  svfloat64_t logc = svld1_gather_u64index_f64 (pg, __v_log_data.logc, i);
+
+  /* log(x) = log1p(z/c-1) + log(c) + k*Ln2.  */
+  svfloat64_t r = svmad_n_f64_x (pg, invc, z, -1);
+  svfloat64_t kd = svcvt_f64_s64_x (pg, k);
+  /* hi = r + log(c) + k*Ln2.  */
+  svfloat64_t hi
+      = svmla_n_f64_x (pg, svadd_f64_x (pg, logc, r), kd, __v_log_data.ln2);
+  /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi.  */
+  svfloat64_t r2 = svmul_f64_x (pg, r, r);
+  svfloat64_t y = svmla_f64_x (pg, P (2), r, P (3));
+  svfloat64_t p = svmla_f64_x (pg, P (0), r, P (1));
+  y = svmla_f64_x (pg, y, r2, P (4));
+  y = svmla_f64_x (pg, p, r2, y);
+  y = svmla_f64_x (pg, hi, r2, y);
+
+  if (unlikely (svptest_any (pg, cmp)))
+    return special_case (x, y, cmp);
+  return y;
+}
diff --git a/sysdeps/aarch64/fpu/logf_advsimd.c b/sysdeps/aarch64/fpu/logf_advsimd.c
new file mode 100644
index 0000000000..5ca56ab27c
--- /dev/null
+++ b/sysdeps/aarch64/fpu/logf_advsimd.c
@@ -0,0 +1,80 @@
+/* Single-precision vector (Advanced SIMD) log function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+
+static const volatile struct
+{
+  float32x4_t poly[7];
+  float32x4_t ln2, tiny_bound;
+  uint32x4_t min_norm, special_bound, off, mantissa_mask;
+} data = {
+  /* 3.34 ulp error.  */
+  .poly = { V4 (-0x1.3e737cp-3f), V4 (0x1.5a9aa2p-3f), V4 (-0x1.4f9934p-3f),
+	    V4 (0x1.961348p-3f), V4 (-0x1.00187cp-2f), V4 (0x1.555d7cp-2f),
+	    V4 (-0x1.ffffc8p-2f) },
+  .ln2 = V4 (0x1.62e43p-1f),
+  .tiny_bound = V4 (0x1p-126),
+  .min_norm = V4 (0x00800000),
+  .special_bound = V4 (0x7f000000), /* asuint32(inf) - min_norm.  */
+  .off = V4 (0x3f2aaaab),	    /* 0.666667.  */
+  .mantissa_mask = V4 (0x007fffff)
+};
+
+#define P(i) data.poly[7 - i]
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t x, float32x4_t y, uint32x4_t cmp)
+{
+  /* Fall back to scalar code.  */
+  return v_call_f32 (logf, x, y, cmp);
+}
+
+float32x4_t VPCS_ATTR V_NAME_F1 (log) (float32x4_t x)
+{
+  float32x4_t n, p, q, r, r2, y;
+  uint32x4_t u, cmp;
+
+  u = vreinterpretq_u32_f32 (x);
+  cmp = vcgeq_u32 (vsubq_u32 (u, data.min_norm), data.special_bound);
+
+  /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
+  u = vsubq_u32 (u, data.off);
+  n = vcvtq_f32_s32 (
+      vshrq_n_s32 (vreinterpretq_s32_u32 (u), 23)); /* signextend.  */
+  u = vandq_u32 (u, data.mantissa_mask);
+  u = vaddq_u32 (u, data.off);
+  r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f));
+
+  /* y = log(1+r) + n*ln2.  */
+  r2 = vmulq_f32 (r, r);
+  /* n*ln2 + r + r2*(P1 + r*P2 + r2*(P3 + r*P4 + r2*(P5 + r*P6 + r2*P7))).  */
+  p = vfmaq_f32 (P (5), P (6), r);
+  q = vfmaq_f32 (P (3), P (4), r);
+  y = vfmaq_f32 (P (1), P (2), r);
+  p = vfmaq_f32 (p, P (7), r2);
+  q = vfmaq_f32 (q, p, r2);
+  y = vfmaq_f32 (y, q, r2);
+  p = vfmaq_f32 (r, data.ln2, n);
+  y = vfmaq_f32 (p, y, r2);
+
+  if (unlikely (v_any_u32 (cmp)))
+    return special_case (x, y, cmp);
+  return y;
+}
diff --git a/sysdeps/aarch64/fpu/logf_sve.c b/sysdeps/aarch64/fpu/logf_sve.c
new file mode 100644
index 0000000000..446a3ba3cf
--- /dev/null
+++ b/sysdeps/aarch64/fpu/logf_sve.c
@@ -0,0 +1,85 @@
+/* Single-precision vector (SVE) log function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+
+static struct
+{
+  float poly_0135[4];
+  float poly_246[3];
+  float ln2;
+} data = {
+  .poly_0135 = {
+    /* Coefficients copied from the AdvSIMD routine in math/, then rearranged so
+       that coeffs 0, 1, 3 and 5 can be loaded as a single quad-word, hence used
+       with _lane variant of MLA intrinsic.  */
+    -0x1.3e737cp-3f, 0x1.5a9aa2p-3f, 0x1.961348p-3f, 0x1.555d7cp-2f
+  },
+  .poly_246 = { -0x1.4f9934p-3f, -0x1.00187cp-2f, -0x1.ffffc8p-2f },
+  .ln2 = 0x1.62e43p-1f
+};
+
+#define Min (0x00800000)
+#define Max (0x7f800000)
+#define Thresh (0x7f000000) /* Max - Min.  */
+#define Mask (0x007fffff)
+#define Off (0x3f2aaaab) /* 0.666667.  */
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t cmp)
+{
+  return sv_call_f32 (logf, x, y, cmp);
+}
+
+/* Optimised implementation of SVE logf, using the same algorithm and
+   polynomial as the AdvSIMD routine. Maximum error is 3.34 ULPs:
+   SV_NAME_F1 (log)(0x1.557298p+0) got 0x1.26edecp-2
+				  want 0x1.26ede6p-2.  */
+svfloat32_t SV_NAME_F1 (log) (svfloat32_t x, const svbool_t pg)
+{
+  svuint32_t u = svreinterpret_u32_f32 (x);
+  svbool_t cmp = svcmpge_n_u32 (pg, svsub_n_u32_x (pg, u, Min), Thresh);
+
+  /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
+  u = svsub_n_u32_x (pg, u, Off);
+  svfloat32_t n
+      = svcvt_f32_s32_x (pg, svasr_n_s32_x (pg, svreinterpret_s32_u32 (u),
+					    23)); /* Sign-extend.  */
+  u = svand_n_u32_x (pg, u, Mask);
+  u = svadd_n_u32_x (pg, u, Off);
+  svfloat32_t r = svsub_n_f32_x (pg, svreinterpret_f32_u32 (u), 1.0f);
+
+  /* y = log(1+r) + n*ln2.  */
+  svfloat32_t r2 = svmul_f32_x (pg, r, r);
+  /* n*ln2 + r + r2*(P6 + r*P5 + r2*(P4 + r*P3 + r2*(P2 + r*P1 + r2*P0))).  */
+  svfloat32_t p_0135 = svld1rq_f32 (svptrue_b32 (), &data.poly_0135[0]);
+  svfloat32_t p = svmla_lane_f32 (sv_f32 (data.poly_246[0]), r, p_0135, 1);
+  svfloat32_t q = svmla_lane_f32 (sv_f32 (data.poly_246[1]), r, p_0135, 2);
+  svfloat32_t y = svmla_lane_f32 (sv_f32 (data.poly_246[2]), r, p_0135, 3);
+  p = svmla_lane_f32 (p, r2, p_0135, 0);
+
+  q = svmla_f32_x (pg, q, r2, p);
+  y = svmla_f32_x (pg, y, r2, q);
+  p = svmla_n_f32_x (pg, r, n, data.ln2);
+  y = svmla_f32_x (pg, p, r2, y);
+
+  if (unlikely (svptest_any (pg, cmp)))
+    return special_case (x, y, cmp);
+  return y;
+}
diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
index 4af97a25a2..c5f6fcd7c4 100644
--- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
@@ -24,4 +24,5 @@
 #define VEC_TYPE float64x2_t
 
 VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
+VPCS_VECTOR_WRAPPER (log_advsimd, _ZGVnN2v_log)
 VPCS_VECTOR_WRAPPER (sin_advsimd, _ZGVnN2v_sin)
diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
index 64c790adc5..d5e2ec6dc5 100644
--- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
@@ -33,4 +33,5 @@
   }
 
 SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
+SVE_VECTOR_WRAPPER (log_sve, _ZGVsMxv_log)
 SVE_VECTOR_WRAPPER (sin_sve, _ZGVsMxv_sin)
diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
index 50e776b952..c240738837 100644
--- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
@@ -24,4 +24,5 @@
 #define VEC_TYPE float32x4_t
 
 VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
+VPCS_VECTOR_WRAPPER (logf_advsimd, _ZGVnN4v_logf)
 VPCS_VECTOR_WRAPPER (sinf_advsimd, _ZGVnN4v_sinf)
diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
index 7355032929..5a06b75857 100644
--- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
@@ -33,4 +33,5 @@
   }
 
 SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
+SVE_VECTOR_WRAPPER (logf_sve, _ZGVsMxv_logf)
 SVE_VECTOR_WRAPPER (sinf_sve, _ZGVsMxv_sinf)
diff --git a/sysdeps/aarch64/fpu/v_log_data.c b/sysdeps/aarch64/fpu/v_log_data.c
new file mode 100644
index 0000000000..6fd6f43695
--- /dev/null
+++ b/sysdeps/aarch64/fpu/v_log_data.c
@@ -0,0 +1,173 @@
+/* Lookup table for double-precision log(x) vector function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "vecmath_config.h"
+
+const struct v_log_data __v_log_data = {
+  /* Worst-case error: 1.17 + 0.5 ulp.
+     Rel error: 0x1.6272e588p-56 in [ -0x1.fc1p-9 0x1.009p-8 ].  */
+  .poly = { -0x1.ffffffffffff7p-2, 0x1.55555555170d4p-2, -0x1.0000000399c27p-2,
+	    0x1.999b2e90e94cap-3, -0x1.554e550bd501ep-3 },
+  .ln2 = 0x1.62e42fefa39efp-1,
+  /* Algorithm:
+
+	x = 2^k z
+	log(x) = k ln2 + log(c) + poly(z/c - 1)
+
+     where z is in [a;2a) which is split into N subintervals (a=0x1.69009p-1,
+     N=128) and log(c) and 1/c for the ith subinterval comes from two lookup
+     tables:
+
+	invc[i] = 1/c
+	logc[i] = (double)log(c)
+
+     where c is near the center of the subinterval and is chosen by trying
+     several floating point invc candidates around 1/center and selecting one
+     for which the error in (double)log(c) is minimized (< 0x1p-74), except the
+     subinterval that contains 1 and the previous one got tweaked to avoid
+     cancellation.  */
+  .invc = { 0x1.6a133d0dec120p+0, 0x1.6815f2f3e42edp+0,
+	    0x1.661e39be1ac9ep+0, 0x1.642bfa30ac371p+0,
+	    0x1.623f1d916f323p+0, 0x1.60578da220f65p+0,
+	    0x1.5e75349dea571p+0, 0x1.5c97fd387a75ap+0,
+	    0x1.5abfd2981f200p+0, 0x1.58eca051dc99cp+0,
+	    0x1.571e526d9df12p+0, 0x1.5554d555b3fcbp+0,
+	    0x1.539015e2a20cdp+0, 0x1.51d0014ee0164p+0,
+	    0x1.50148538cd9eep+0, 0x1.4e5d8f9f698a1p+0,
+	    0x1.4cab0edca66bep+0, 0x1.4afcf1a9db874p+0,
+	    0x1.495327136e16fp+0, 0x1.47ad9e84af28fp+0,
+	    0x1.460c47b39ae15p+0, 0x1.446f12b278001p+0,
+	    0x1.42d5efdd720ecp+0, 0x1.4140cfe001a0fp+0,
+	    0x1.3fafa3b421f69p+0, 0x1.3e225c9c8ece5p+0,
+	    0x1.3c98ec29a211ap+0, 0x1.3b13442a413fep+0,
+	    0x1.399156baa3c54p+0, 0x1.38131639b4cdbp+0,
+	    0x1.36987540fbf53p+0, 0x1.352166b648f61p+0,
+	    0x1.33adddb3eb575p+0, 0x1.323dcd99fc1d3p+0,
+	    0x1.30d129fefc7d2p+0, 0x1.2f67e6b72fe7dp+0,
+	    0x1.2e01f7cf8b187p+0, 0x1.2c9f518ddc86ep+0,
+	    0x1.2b3fe86e5f413p+0, 0x1.29e3b1211b25cp+0,
+	    0x1.288aa08b373cfp+0, 0x1.2734abcaa8467p+0,
+	    0x1.25e1c82459b81p+0, 0x1.2491eb1ad59c5p+0,
+	    0x1.23450a54048b5p+0, 0x1.21fb1bb09e578p+0,
+	    0x1.20b415346d8f7p+0, 0x1.1f6fed179a1acp+0,
+	    0x1.1e2e99b93c7b3p+0, 0x1.1cf011a7a882ap+0,
+	    0x1.1bb44b97dba5ap+0, 0x1.1a7b3e66cdd4fp+0,
+	    0x1.1944e11dc56cdp+0, 0x1.18112aebb1a6ep+0,
+	    0x1.16e013231b7e9p+0, 0x1.15b1913f156cfp+0,
+	    0x1.14859cdedde13p+0, 0x1.135c2dc68cfa4p+0,
+	    0x1.12353bdb01684p+0, 0x1.1110bf25b85b4p+0,
+	    0x1.0feeafd2f8577p+0, 0x1.0ecf062c51c3bp+0,
+	    0x1.0db1baa076c8bp+0, 0x1.0c96c5bb3048ep+0,
+	    0x1.0b7e20263e070p+0, 0x1.0a67c2acd0ce3p+0,
+	    0x1.0953a6391e982p+0, 0x1.0841c3caea380p+0,
+	    0x1.07321489b13eap+0, 0x1.062491aee9904p+0,
+	    0x1.05193497a7cc5p+0, 0x1.040ff6b5f5e9fp+0,
+	    0x1.0308d19aa6127p+0, 0x1.0203beedb0c67p+0,
+	    0x1.010037d38bcc2p+0, 1.0,
+	    0x1.fc06d493cca10p-1, 0x1.f81e6ac3b918fp-1,
+	    0x1.f44546ef18996p-1, 0x1.f07b10382c84bp-1,
+	    0x1.ecbf7070e59d4p-1, 0x1.e91213f715939p-1,
+	    0x1.e572a9a75f7b7p-1, 0x1.e1e0e2c530207p-1,
+	    0x1.de5c72d8a8be3p-1, 0x1.dae50fa5658ccp-1,
+	    0x1.d77a71145a2dap-1, 0x1.d41c51166623ep-1,
+	    0x1.d0ca6ba0bb29fp-1, 0x1.cd847e8e59681p-1,
+	    0x1.ca4a499693e00p-1, 0x1.c71b8e399e821p-1,
+	    0x1.c3f80faf19077p-1, 0x1.c0df92dc2b0ecp-1,
+	    0x1.bdd1de3cbb542p-1, 0x1.baceb9e1007a3p-1,
+	    0x1.b7d5ef543e55ep-1, 0x1.b4e749977d953p-1,
+	    0x1.b20295155478ep-1, 0x1.af279f8e82be2p-1,
+	    0x1.ac5638197fdf3p-1, 0x1.a98e2f102e087p-1,
+	    0x1.a6cf5606d05c1p-1, 0x1.a4197fc04d746p-1,
+	    0x1.a16c80293dc01p-1, 0x1.9ec82c4dc5bc9p-1,
+	    0x1.9c2c5a491f534p-1, 0x1.9998e1480b618p-1,
+	    0x1.970d9977c6c2dp-1, 0x1.948a5c023d212p-1,
+	    0x1.920f0303d6809p-1, 0x1.8f9b698a98b45p-1,
+	    0x1.8d2f6b81726f6p-1, 0x1.8acae5bb55badp-1,
+	    0x1.886db5d9275b8p-1, 0x1.8617ba567c13cp-1,
+	    0x1.83c8d27487800p-1, 0x1.8180de3c5dbe7p-1,
+	    0x1.7f3fbe71cdb71p-1, 0x1.7d055498071c1p-1,
+	    0x1.7ad182e54f65ap-1, 0x1.78a42c3c90125p-1,
+	    0x1.767d342f76944p-1, 0x1.745c7ef26b00ap-1,
+	    0x1.7241f15769d0fp-1, 0x1.702d70d396e41p-1,
+	    0x1.6e1ee3700cd11p-1, 0x1.6c162fc9cbe02p-1 },
+  .logc = { -0x1.62fe995eb963ap-2, -0x1.5d5a48dad6b67p-2,
+	    -0x1.57bde257d2769p-2, -0x1.52294fbf2af55p-2,
+	    -0x1.4c9c7b598aa38p-2, -0x1.47174fc5ff560p-2,
+	    -0x1.4199b7fa7b5cap-2, -0x1.3c239f48cfb99p-2,
+	    -0x1.36b4f154d2aebp-2, -0x1.314d9a0ff32fbp-2,
+	    -0x1.2bed85cca3cffp-2, -0x1.2694a11421af9p-2,
+	    -0x1.2142d8d014fb2p-2, -0x1.1bf81a2c77776p-2,
+	    -0x1.16b452a39c6a4p-2, -0x1.11776ffa6c67ep-2,
+	    -0x1.0c416035020e0p-2, -0x1.071211aa10fdap-2,
+	    -0x1.01e972e293b1bp-2, -0x1.f98ee587fd434p-3,
+	    -0x1.ef5800ad716fbp-3, -0x1.e52e160484698p-3,
+	    -0x1.db1104b19352ep-3, -0x1.d100ac59e0bd6p-3,
+	    -0x1.c6fced287c3bdp-3, -0x1.bd05a7b317c29p-3,
+	    -0x1.b31abd229164fp-3, -0x1.a93c0edadb0a3p-3,
+	    -0x1.9f697ee30d7ddp-3, -0x1.95a2efa9aa40ap-3,
+	    -0x1.8be843d796044p-3, -0x1.82395ecc477edp-3,
+	    -0x1.7896240966422p-3, -0x1.6efe77aca8c55p-3,
+	    -0x1.65723e117ec5cp-3, -0x1.5bf15c0955706p-3,
+	    -0x1.527bb6c111da1p-3, -0x1.491133c939f8fp-3,
+	    -0x1.3fb1b90c7fc58p-3, -0x1.365d2cc485f8dp-3,
+	    -0x1.2d13758970de7p-3, -0x1.23d47a721fd47p-3,
+	    -0x1.1aa0229f25ec2p-3, -0x1.117655ddebc3bp-3,
+	    -0x1.0856fbf83ab6bp-3, -0x1.fe83fabbaa106p-4,
+	    -0x1.ec6e8507a56cdp-4, -0x1.da6d68c7cc2eap-4,
+	    -0x1.c88078462be0cp-4, -0x1.b6a786a423565p-4,
+	    -0x1.a4e2676ac7f85p-4, -0x1.9330eea777e76p-4,
+	    -0x1.8192f134d5ad9p-4, -0x1.70084464f0538p-4,
+	    -0x1.5e90bdec5cb1fp-4, -0x1.4d2c3433c5536p-4,
+	    -0x1.3bda7e219879ap-4, -0x1.2a9b732d27194p-4,
+	    -0x1.196eeb2b10807p-4, -0x1.0854be8ef8a7ep-4,
+	    -0x1.ee998cb277432p-5, -0x1.ccadb79919fb9p-5,
+	    -0x1.aae5b1d8618b0p-5, -0x1.89413015d7442p-5,
+	    -0x1.67bfe7bf158dep-5, -0x1.46618f83941bep-5,
+	    -0x1.2525df1b0618ap-5, -0x1.040c8e2f77c6ap-5,
+	    -0x1.c62aad39f738ap-6, -0x1.847fe3bdead9cp-6,
+	    -0x1.43183683400acp-6, -0x1.01f31c4e1d544p-6,
+	    -0x1.82201d1e6b69ap-7, -0x1.00dd0f3e1bfd6p-7,
+	    -0x1.ff6fe1feb4e53p-9, 0.0,
+	    0x1.fe91885ec8e20p-8,  0x1.fc516f716296dp-7,
+	    0x1.7bb4dd70a015bp-6,  0x1.f84c99b34b674p-6,
+	    0x1.39f9ce4fb2d71p-5,  0x1.7756c0fd22e78p-5,
+	    0x1.b43ee82db8f3ap-5,  0x1.f0b3fced60034p-5,
+	    0x1.165bd78d4878ep-4,  0x1.3425d2715ebe6p-4,
+	    0x1.51b8bd91b7915p-4,  0x1.6f15632c76a47p-4,
+	    0x1.8c3c88ecbe503p-4,  0x1.a92ef077625dap-4,
+	    0x1.c5ed5745fa006p-4,  0x1.e27876de1c993p-4,
+	    0x1.fed104fce4cdcp-4,  0x1.0d7bd9c17d78bp-3,
+	    0x1.1b76986cef97bp-3,  0x1.295913d24f750p-3,
+	    0x1.37239fa295d17p-3,  0x1.44d68dd78714bp-3,
+	    0x1.52722ebe5d780p-3,  0x1.5ff6d12671f98p-3,
+	    0x1.6d64c2389484bp-3,  0x1.7abc4da40fddap-3,
+	    0x1.87fdbda1e8452p-3,  0x1.95295b06a5f37p-3,
+	    0x1.a23f6d34abbc5p-3,  0x1.af403a28e04f2p-3,
+	    0x1.bc2c06a85721ap-3,  0x1.c903161240163p-3,
+	    0x1.d5c5aa93287ebp-3,  0x1.e274051823fa9p-3,
+	    0x1.ef0e656300c16p-3,  0x1.fb9509f05aa2ap-3,
+	    0x1.04041821f37afp-2,  0x1.0a340a49b3029p-2,
+	    0x1.105a7918a126dp-2,  0x1.1677819812b84p-2,
+	    0x1.1c8b405b40c0ep-2,  0x1.2295d16cfa6b1p-2,
+	    0x1.28975066318a2p-2,  0x1.2e8fd855d86fcp-2,
+	    0x1.347f83d605e59p-2,  0x1.3a666d1244588p-2,
+	    0x1.4044adb6f8ec4p-2,  0x1.461a5f077558cp-2,
+	    0x1.4be799e20b9c8p-2,  0x1.51ac76a6b79dfp-2,
+	    0x1.57690d5744a45p-2,  0x1.5d1d758e45217p-2 }
+};
diff --git a/sysdeps/aarch64/fpu/vecmath_config.h b/sysdeps/aarch64/fpu/vecmath_config.h
index c8f45af63b..84cf5b2eef 100644
--- a/sysdeps/aarch64/fpu/vecmath_config.h
+++ b/sysdeps/aarch64/fpu/vecmath_config.h
@@ -21,6 +21,7 @@
 
 #include <math.h>
 
+#define HIDDEN attribute_hidden
 #define NOINLINE __attribute__ ((noinline))
 #define likely(x) __glibc_likely (x)
 #define unlikely(x) __glibc_unlikely (x)
@@ -30,4 +31,14 @@
    supported in GLIBC, however we keep it for ease of development.  */
 #define WANT_SIMD_EXCEPT 0
 
+#define V_LOG_POLY_ORDER 6
+#define V_LOG_TABLE_BITS 7
+extern const struct v_log_data
+{
+  /* Shared data for vector log and log-derived routines (e.g. asinh).  */
+  double poly[V_LOG_POLY_ORDER - 1];
+  double ln2;
+  double invc[1 << V_LOG_TABLE_BITS];
+  double logc[1 << V_LOG_TABLE_BITS];
+} __v_log_data HIDDEN;
 #endif
diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
index 4145662b2d..7bdbf4614d 100644
--- a/sysdeps/aarch64/libm-test-ulps
+++ b/sysdeps/aarch64/libm-test-ulps
@@ -1219,10 +1219,18 @@ double: 3
 float: 3
 ldouble: 1
 
+Function: "log_advsimd":
+double: 1
+float: 3
+
 Function: "log_downward":
 float: 2
 ldouble: 1
 
+Function: "log_sve":
+double: 1
+float: 3
+
 Function: "log_towardzero":
 float: 2
 ldouble: 2
diff --git a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
index a4c564859c..1922191886 100644
--- a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
+++ b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
@@ -1,8 +1,12 @@
 GLIBC_2.38 _ZGVnN2v_cos F
+GLIBC_2.38 _ZGVnN2v_log F
 GLIBC_2.38 _ZGVnN2v_sin F
 GLIBC_2.38 _ZGVnN4v_cosf F
+GLIBC_2.38 _ZGVnN4v_logf F
 GLIBC_2.38 _ZGVnN4v_sinf F
 GLIBC_2.38 _ZGVsMxv_cos F
 GLIBC_2.38 _ZGVsMxv_cosf F
+GLIBC_2.38 _ZGVsMxv_log F
+GLIBC_2.38 _ZGVsMxv_logf F
 GLIBC_2.38 _ZGVsMxv_sin F
 GLIBC_2.38 _ZGVsMxv_sinf F
-- 
2.27.0


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 4/4] aarch64: Add vector implementations of exp routines
  2023-06-08 13:39 [PATCH 1/4] aarch64: Add vector implementations of cos routines Joe Ramsay
  2023-06-08 13:39 ` [PATCH 2/4] aarch64: Add vector implementations of sin routines Joe Ramsay
  2023-06-08 13:39 ` [PATCH 3/4] aarch64: Add vector implementations of log routines Joe Ramsay
@ 2023-06-08 13:39 ` Joe Ramsay
  2023-06-13 17:29 ` [PATCH 1/4] aarch64: Add vector implementations of cos routines Adhemerval Zanella Netto
  2023-06-13 19:56 ` Adhemerval Zanella Netto
  4 siblings, 0 replies; 11+ messages in thread
From: Joe Ramsay @ 2023-06-08 13:39 UTC (permalink / raw)
  To: libc-alpha; +Cc: Joe Ramsay

Optimised implementations for single and double precision, Advanced
SIMD and SVE, copied from Arm Optimized Routines.
---
 sysdeps/aarch64/fpu/Makefile                  |  11 +-
 sysdeps/aarch64/fpu/Versions                  |   4 +
 sysdeps/aarch64/fpu/bits/math-vector.h        |   4 +
 sysdeps/aarch64/fpu/exp_advsimd.c             | 136 +++++++++++++++++
 sysdeps/aarch64/fpu/exp_sve.c                 | 138 ++++++++++++++++++
 sysdeps/aarch64/fpu/expf_advsimd.c            | 132 +++++++++++++++++
 sysdeps/aarch64/fpu/expf_sve.c                |  89 +++++++++++
 sysdeps/aarch64/fpu/sv_estrin.h               |  23 +++
 sysdeps/aarch64/fpu/sv_estrin_wrap.h          |  89 +++++++++++
 sysdeps/aarch64/fpu/sv_estrinf.h              |  23 +++
 .../fpu/test-double-advsimd-wrappers.c        |   1 +
 .../aarch64/fpu/test-double-sve-wrappers.c    |   1 +
 .../aarch64/fpu/test-float-advsimd-wrappers.c |   1 +
 sysdeps/aarch64/fpu/test-float-sve-wrappers.c |   1 +
 sysdeps/aarch64/fpu/v_exp_data.c              |  66 +++++++++
 sysdeps/aarch64/fpu/vecmath_config.h          |   3 +
 sysdeps/aarch64/libm-test-ulps                |   8 +
 .../unix/sysv/linux/aarch64/libmvec.abilist   |   4 +
 18 files changed, 729 insertions(+), 5 deletions(-)
 create mode 100644 sysdeps/aarch64/fpu/exp_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/exp_sve.c
 create mode 100644 sysdeps/aarch64/fpu/expf_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/expf_sve.c
 create mode 100644 sysdeps/aarch64/fpu/sv_estrin.h
 create mode 100644 sysdeps/aarch64/fpu/sv_estrin_wrap.h
 create mode 100644 sysdeps/aarch64/fpu/sv_estrinf.h
 create mode 100644 sysdeps/aarch64/fpu/v_exp_data.c

diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile
index 3f9cd2d000..8853ee0b80 100644
--- a/sysdeps/aarch64/fpu/Makefile
+++ b/sysdeps/aarch64/fpu/Makefile
@@ -1,17 +1,18 @@
-float-advsimd-funcs = cos sin log
+float-advsimd-funcs = cos sin log exp
 
-double-advsimd-funcs = cos sin log
+double-advsimd-funcs = cos sin log exp
 
-float-sve-funcs = cos sin log
+float-sve-funcs = cos sin log exp
 
-double-sve-funcs = cos sin log
+double-sve-funcs = cos sin log exp
 
 ifeq ($(subdir),mathvec)
 libmvec-support = $(addsuffix f_advsimd,$(float-advsimd-funcs)) \
                   $(addsuffix _advsimd,$(double-advsimd-funcs)) \
                   $(addsuffix f_sve,$(float-sve-funcs)) \
                   $(addsuffix _sve,$(double-sve-funcs)) \
-                  v_log_data
+                  v_log_data \
+                  v_exp_data
 endif
 
 sve-cflags = -march=armv8-a+sve
diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions
index 902446f40d..c85c0f3efb 100644
--- a/sysdeps/aarch64/fpu/Versions
+++ b/sysdeps/aarch64/fpu/Versions
@@ -1,13 +1,17 @@
 libmvec {
   GLIBC_2.38 {
     _ZGVnN2v_cos;
+    _ZGVnN2v_exp;
     _ZGVnN2v_log;
     _ZGVnN2v_sin;
     _ZGVnN4v_cosf;
+    _ZGVnN4v_expf;
     _ZGVnN4v_logf;
     _ZGVnN4v_sinf;
     _ZGVsMxv_cos;
     _ZGVsMxv_cosf;
+    _ZGVsMxv_exp;
+    _ZGVsMxv_expf;
     _ZGVsMxv_log;
     _ZGVsMxv_logf;
     _ZGVsMxv_sin;
diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h
index 70c737338e..7c200599c1 100644
--- a/sysdeps/aarch64/fpu/bits/math-vector.h
+++ b/sysdeps/aarch64/fpu/bits/math-vector.h
@@ -50,10 +50,12 @@ typedef __SVBool_t __sv_bool_t;
 #  define __vpcs __attribute__ ((__aarch64_vector_pcs__))
 
 __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
+__vpcs __f32x4_t _ZGVnN4v_expf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
 
 __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
+__vpcs __f64x2_t _ZGVnN2v_exp (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
 
@@ -63,10 +65,12 @@ __vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
 #ifdef __SVE_VEC_MATH_SUPPORTED
 
 __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
+__sv_f32_t _ZGVsMxv_expf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
 
 __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
+__sv_f64_t _ZGVsMxv_exp (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
 
diff --git a/sysdeps/aarch64/fpu/exp_advsimd.c b/sysdeps/aarch64/fpu/exp_advsimd.c
new file mode 100644
index 0000000000..5c9f3e99f3
--- /dev/null
+++ b/sysdeps/aarch64/fpu/exp_advsimd.c
@@ -0,0 +1,136 @@
+/* Double-precision vector (Advanced SIMD) exp function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+
+#define N (1 << V_EXP_TABLE_BITS)
+#define IndexMask (N - 1)
+
+const static volatile struct
+{
+  float64x2_t poly[3];
+  float64x2_t inv_ln2, ln2_hi, ln2_lo, shift;
+#if !WANT_SIMD_EXCEPT
+  float64x2_t special_bound, scale_thresh;
+#endif
+} data = {
+  /* maxerr: 1.88 +0.5 ulp
+     rel error: 1.4337*2^-53
+     abs error: 1.4299*2^-53 in [ -ln2/256, ln2/256 ].  */
+  .poly = { V2 (0x1.ffffffffffd43p-2), V2 (0x1.55555c75adbb2p-3),
+	    V2 (0x1.55555da646206p-5) },
+#if !WANT_SIMD_EXCEPT
+  .scale_thresh = V2 (163840.0), /* 1280.0 * N.  */
+  .special_bound = V2 (704.0),
+#endif
+  .inv_ln2 = V2 (0x1.71547652b82fep7), /* N/ln2.  */
+  .ln2_hi = V2 (0x1.62e42fefa39efp-8), /* ln2/N.  */
+  .ln2_lo = V2 (0x1.abc9e3b39803f3p-63),
+  .shift = V2 (0x1.8p+52)
+};
+
+#define C(i) data.poly[i]
+#define Tab __v_exp_data
+
+#if WANT_SIMD_EXCEPT
+
+# define TinyBound v_u64 (0x2000000000000000) /* asuint64 (0x1p-511).  */
+# define BigBound v_u64 (0x4080000000000000) /* asuint64 (0x1p9).  */
+# define SpecialBound v_u64 (0x2080000000000000) /* BigBound - TinyBound.  */
+
+static inline float64x2_t VPCS_ATTR
+special_case (float64x2_t x, float64x2_t y, uint64x2_t cmp)
+{
+  /* If fenv exceptions are to be triggered correctly, fall back to the scalar
+     routine to special lanes.  */
+  return v_call_f64 (exp, x, y, cmp);
+}
+
+#else
+
+# define SpecialOffset v_u64 (0x6000000000000000) /* 0x1p513.  */
+/* SpecialBias1 + SpecialBias1 = asuint(1.0).  */
+# define SpecialBias1 v_u64 (0x7000000000000000) /* 0x1p769.  */
+# define SpecialBias2 v_u64 (0x3010000000000000) /* 0x1p-254.  */
+
+static float64x2_t VPCS_ATTR NOINLINE
+special_case (float64x2_t s, float64x2_t y, float64x2_t n)
+{
+  /* 2^(n/N) may overflow, break it up into s1*s2.  */
+  uint64x2_t b = vandq_u64 (vcltzq_f64 (n), SpecialOffset);
+  float64x2_t s1 = vreinterpretq_f64_u64 (vsubq_u64 (SpecialBias1, b));
+  float64x2_t s2 = vreinterpretq_f64_u64 (
+      vaddq_u64 (vsubq_u64 (vreinterpretq_u64_f64 (s), SpecialBias2), b));
+  uint64x2_t cmp = vcagtq_f64 (n, data.scale_thresh);
+  float64x2_t r1 = vmulq_f64 (s1, s1);
+  float64x2_t r0 = vmulq_f64 (vfmaq_f64 (s2, y, s2), s1);
+  return vbslq_f64 (cmp, r1, r0);
+}
+
+#endif
+
+float64x2_t VPCS_ATTR V_NAME_D1 (exp) (float64x2_t x)
+{
+  float64x2_t n, r, r2, s, y, z;
+  uint64x2_t cmp, u, e;
+
+#if WANT_SIMD_EXCEPT
+  /* If any lanes are special, mask them with 1 and retain a copy of x to allow
+     special_case to fix special lanes later. This is only necessary if fenv
+     exceptions are to be triggered correctly.  */
+  float64x2_t xm = x;
+  uint64x2_t iax = vreinterpretq_u64_f64 (vabsq_f64 (x));
+  cmp = vcgeq_u64 (vsubq_u64 (iax, TinyBound), SpecialBound);
+  if (unlikely (v_any_u64 (cmp)))
+    x = vbslq_f64 (cmp, v_f64 (1), x);
+#else
+  cmp = vcagtq_f64 (x, data.special_bound);
+#endif
+
+  /* n = round(x/(ln2/N)).  */
+  z = vfmaq_f64 (data.shift, x, data.inv_ln2);
+  u = vreinterpretq_u64_f64 (z);
+  n = vsubq_f64 (z, data.shift);
+
+  /* r = x - n*ln2/N.  */
+  r = x;
+  r = vfmsq_f64 (r, data.ln2_hi, n);
+  r = vfmsq_f64 (r, data.ln2_lo, n);
+
+  e = vshlq_n_u64 (u, 52 - V_EXP_TABLE_BITS);
+
+  /* y = exp(r) - 1 ~= r + C0 r^2 + C1 r^3 + C2 r^4.  */
+  r2 = vmulq_f64 (r, r);
+  y = vfmaq_f64 (C (0), C (1), r);
+  y = vfmaq_f64 (y, C (2), r2);
+  y = vfmaq_f64 (r, y, r2);
+
+  /* s = 2^(n/N).  */
+  u = (uint64x2_t){ Tab[u[0] & IndexMask], Tab[u[1] & IndexMask] };
+  s = vreinterpretq_f64_u64 (vaddq_u64 (u, e));
+
+  if (unlikely (v_any_u64 (cmp)))
+#if WANT_SIMD_EXCEPT
+    return special_case (xm, vfmaq_f64 (s, y, s), cmp);
+#else
+    return special_case (s, y, n);
+#endif
+
+  return vfmaq_f64 (s, y, s);
+}
diff --git a/sysdeps/aarch64/fpu/exp_sve.c b/sysdeps/aarch64/fpu/exp_sve.c
new file mode 100644
index 0000000000..1840895e21
--- /dev/null
+++ b/sysdeps/aarch64/fpu/exp_sve.c
@@ -0,0 +1,138 @@
+/* Double-precision vector (SVE) exp function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+#include "sv_estrin.h"
+
+static struct
+{
+  double poly[4];
+  double ln2_hi, ln2_lo, inv_ln2, shift, thres;
+} data = {
+  .poly = { /* ulp error: 0.53.  */
+	    0x1.fffffffffdbcdp-2, 0x1.555555555444cp-3, 0x1.555573c6a9f7dp-5,
+	    0x1.1111266d28935p-7 },
+  .ln2_hi = 0x1.62e42fefa3800p-1,
+  .ln2_lo = 0x1.ef35793c76730p-45,
+  /* 1/ln2.  */
+  .inv_ln2 = 0x1.71547652b82fep+0,
+  /* 1.5*2^46+1023. This value is further explained below.  */
+  .shift = 0x1.800000000ffc0p+46,
+  .thres = 704.0,
+};
+
+#define C(i) sv_f64 (data.poly[i])
+#define SpecialOffset 0x6000000000000000 /* 0x1p513.  */
+/* SpecialBias1 + SpecialBias1 = asuint(1.0).  */
+#define SpecialBias1 0x7000000000000000 /* 0x1p769.  */
+#define SpecialBias2 0x3010000000000000 /* 0x1p-254.  */
+
+/* Update of both special and non-special cases, if any special case is
+   detected.  */
+static inline svfloat64_t
+special_case (svbool_t pg, svfloat64_t s, svfloat64_t y, svfloat64_t n)
+{
+  /* s=2^n may overflow, break it up into s=s1*s2,
+     such that exp = s + s*y can be computed as s1*(s2+s2*y)
+     and s1*s1 overflows only if n>0.  */
+
+  /* If n<=0 then set b to 0x6, 0 otherwise.  */
+  svbool_t p_sign = svcmple_n_f64 (pg, n, 0.0); /* n <= 0.  */
+  svuint64_t b
+      = svdup_n_u64_z (p_sign, SpecialOffset); /* Inactive lanes set to 0.  */
+
+  /* Set s1 to generate overflow depending on sign of exponent n.  */
+  svfloat64_t s1 = svreinterpret_f64_u64 (
+      svsubr_n_u64_x (pg, b, SpecialBias1)); /* 0x70...0 - b.  */
+  /* Offset s to avoid overflow in final result if n is below threshold.  */
+  svfloat64_t s2 = svreinterpret_f64_u64 (svadd_u64_x (
+      pg, svsub_n_u64_x (pg, svreinterpret_u64_f64 (s), SpecialBias2),
+      b)); /* as_u64 (s) - 0x3010...0 + b.  */
+
+  /* |n| > 1280 => 2^(n) overflows.  */
+  svbool_t p_cmp = svacgt_n_f64 (pg, n, 1280.0);
+
+  svfloat64_t r1 = svmul_f64_x (pg, s1, s1);
+  svfloat64_t r2 = svmla_f64_x (pg, s2, s2, y);
+  svfloat64_t r0 = svmul_f64_x (pg, r2, s1);
+
+  return svsel_f64 (p_cmp, r1, r0);
+}
+
+/* SVE exp algorithm. Maximum measured error is 1.01ulps:
+   SV_NAME_D1 (exp)(0x1.4619d7b04da41p+6) got 0x1.885d9acc41da7p+117
+					 want 0x1.885d9acc41da6p+117.  */
+svfloat64_t SV_NAME_D1 (exp) (svfloat64_t x, const svbool_t pg)
+{
+  svbool_t special = svacgt_n_f64 (pg, x, data.thres);
+
+  /* Use a modifed version of the shift used for flooring, such that x/ln2 is
+     rounded to a multiple of 2^-6=1/64, shift = 1.5 * 2^52 * 2^-6 = 1.5 *
+     2^46.
+
+     n is not an integer but can be written as n = m + i/64, with i and m
+     integer, 0 <= i < 64 and m <= n.
+
+     Bits 5:0 of z will be null every time x/ln2 reaches a new integer value
+     (n=m, i=0), and is incremented every time z (or n) is incremented by 1/64.
+     FEXPA expects i in bits 5:0 of the input so it can be used as index into
+     FEXPA hardwired table T[i] = 2^(i/64) for i = 0:63, that will in turn
+     populate the mantissa of the output. Therefore, we use u=asuint(z) as
+     input to FEXPA.
+
+     We add 1023 to the modified shift value in order to set bits 16:6 of u to
+     1, such that once these bits are moved to the exponent of the output of
+     FEXPA, we get the exponent of 2^n right, i.e. we get 2^m.  */
+  svfloat64_t z = svmla_n_f64_x (pg, sv_f64 (data.shift), x, data.inv_ln2);
+  svuint64_t u = svreinterpret_u64_f64 (z);
+  svfloat64_t n = svsub_n_f64_x (pg, z, data.shift);
+
+  /* r = x - n * ln2, r is in [-ln2/(2N), ln2/(2N)].  */
+  svfloat64_t ln2 = svld1rq_f64 (svptrue_b64 (), &data.ln2_hi);
+  svfloat64_t r = svmls_lane_f64 (x, n, ln2, 0);
+  r = svmls_lane_f64 (r, n, ln2, 1);
+
+  /* y = exp(r) - 1 ~= r + C0 r^2 + C1 r^3 + C2 r^4 + C3 r^5.  */
+  svfloat64_t r2 = svmul_f64_x (pg, r, r);
+  svfloat64_t y = svmla_f64_x (pg, r, ESTRIN_3 (pg, r, r2, C), r2);
+
+  /* s = 2^n, computed using FEXPA. FEXPA does not propagate NaNs, so for
+     consistent NaN handling we have to manually propagate them. This comes at
+     significant performance cost.  */
+  svfloat64_t s = svexpa_f64 (u);
+
+  /* Assemble result as exp(x) = 2^n * exp(r).  If |x| > Thresh the
+     multiplication may overflow, so use special case routine.  */
+
+  if (unlikely (svptest_any (pg, special)))
+    {
+      /* FEXPA zeroes the sign bit, however the sign is meaningful to the
+	 special case function so needs to be copied.
+	 e = sign bit of u << 46.  */
+      svuint64_t e
+	  = svand_n_u64_x (pg, svlsl_n_u64_x (pg, u, 46), 0x8000000000000000);
+      /* Copy sign to s.  */
+      s = svreinterpret_f64_u64 (
+	  svadd_u64_x (pg, e, svreinterpret_u64_f64 (s)));
+      return special_case (pg, s, y, n);
+    }
+
+  /* No special case.  */
+  return svmla_f64_x (pg, s, s, y);
+}
diff --git a/sysdeps/aarch64/fpu/expf_advsimd.c b/sysdeps/aarch64/fpu/expf_advsimd.c
new file mode 100644
index 0000000000..9d765ee81e
--- /dev/null
+++ b/sysdeps/aarch64/fpu/expf_advsimd.c
@@ -0,0 +1,132 @@
+/* Single-precision vector (Advanced SIMD) exp function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+
+static const volatile struct
+{
+  float32x4_t poly[5];
+  float32x4_t shift, inv_ln2, ln2_hi, ln2_lo;
+  uint32x4_t exponent_bias;
+#if !WANT_SIMD_EXCEPT
+  float32x4_t special_bound, scale_thresh;
+#endif
+} data = {
+  /* maxerr: 1.45358 +0.5 ulp.  */
+  .poly = { V4 (0x1.0e4020p-7f), V4 (0x1.573e2ep-5f), V4 (0x1.555e66p-3f),
+	    V4 (0x1.fffdb6p-2f), V4 (0x1.ffffecp-1f) },
+  .shift = V4 (0x1.8p23f),
+  .inv_ln2 = V4 (0x1.715476p+0f),
+  .ln2_hi = V4 (0x1.62e4p-1f),
+  .ln2_lo = V4 (0x1.7f7d1cp-20f),
+  .exponent_bias = V4 (0x3f800000),
+#if !WANT_SIMD_EXCEPT
+  .special_bound = V4 (126.0f),
+  .scale_thresh = V4 (192.0f),
+#endif
+};
+
+#define C(i) data.poly[i]
+
+#if WANT_SIMD_EXCEPT
+
+# define TinyBound v_u32 (0x20000000)	/* asuint (0x1p-63).  */
+# define BigBound v_u32 (0x42800000)	/* asuint (0x1p6).  */
+# define SpecialBound v_u32 (0x22800000) /* BigBound - TinyBound.  */
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t x, float32x4_t y, uint32x4_t cmp)
+{
+  /* If fenv exceptions are to be triggered correctly, fall back to the scalar
+     routine to special lanes.  */
+  return v_call_f32 (expf, x, y, cmp);
+}
+
+#else
+
+# define SpecialOffset v_u32 (0x82000000)
+# define SpecialBias v_u32 (0x7f000000)
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t poly, float32x4_t n, uint32x4_t e, uint32x4_t cmp1,
+	      float32x4_t scale)
+{
+  /* 2^n may overflow, break it up into s1*s2.  */
+  uint32x4_t b = vandq_u32 (vclezq_f32 (n), SpecialOffset);
+  float32x4_t s1 = vreinterpretq_f32_u32 (vaddq_u32 (b, SpecialBias));
+  float32x4_t s2 = vreinterpretq_f32_u32 (vsubq_u32 (e, b));
+  uint32x4_t cmp2 = vcagtq_f32 (n, data.scale_thresh);
+  float32x4_t r2 = vmulq_f32 (s1, s1);
+  float32x4_t r1 = vmulq_f32 (vfmaq_f32 (s2, poly, s2), s1);
+  /* Similar to r1 but avoids double rounding in the subnormal range.  */
+  float32x4_t r0 = vfmaq_f32 (scale, poly, scale);
+  float32x4_t r = vbslq_f32 (cmp1, r1, r0);
+  return vbslq_f32 (cmp2, r2, r);
+}
+
+#endif
+
+float32x4_t VPCS_ATTR V_NAME_F1 (exp) (float32x4_t x)
+{
+  float32x4_t n, r, r2, scale, p, q, poly, z;
+  uint32x4_t cmp, e;
+
+#if WANT_SIMD_EXCEPT
+  /* asuint(x) - TinyBound >= BigBound - TinyBound.  */
+  cmp = vcgeq_u32 (
+      vsubq_u32 (vandq_u32 (vreinterpretq_u32_f32 (x), v_u32 (0x7fffffff)),
+		 TinyBound),
+      SpecialBound);
+  float32x4_t xm = x;
+  /* If any lanes are special, mask them with 1 and retain a copy of x to allow
+     special case handler to fix special lanes later. This is only necessary if
+     fenv exceptions are to be triggered correctly.  */
+  if (unlikely (v_any_u32 (cmp)))
+    x = vbslq_f32 (cmp, v_f32 (1), x);
+#endif
+
+  /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
+     x = ln2*n + r, with r in [-ln2/2, ln2/2].  */
+  z = vfmaq_f32 (data.shift, x, data.inv_ln2);
+  n = vsubq_f32 (z, data.shift);
+  r = vfmsq_f32 (x, n, data.ln2_hi);
+  r = vfmsq_f32 (r, n, data.ln2_lo);
+  e = vshlq_n_u32 (vreinterpretq_u32_f32 (z), 23);
+  scale = vreinterpretq_f32_u32 (vaddq_u32 (e, data.exponent_bias));
+
+#if !WANT_SIMD_EXCEPT
+  cmp = vcagtq_f32 (n, data.special_bound);
+#endif
+
+  r2 = vmulq_f32 (r, r);
+  p = vfmaq_f32 (C (1), C (0), r);
+  q = vfmaq_f32 (C (3), C (2), r);
+  q = vfmaq_f32 (q, p, r2);
+  p = vmulq_f32 (C (4), r);
+  poly = vfmaq_f32 (p, q, r2);
+
+  if (unlikely (v_any_u32 (cmp)))
+#if WANT_SIMD_EXCEPT
+    return special_case (xm, vfmaq_f32 (scale, poly, scale), cmp);
+#else
+    return special_case (poly, n, e, cmp, scale);
+#endif
+
+  return vfmaq_f32 (scale, poly, scale);
+}
diff --git a/sysdeps/aarch64/fpu/expf_sve.c b/sysdeps/aarch64/fpu/expf_sve.c
new file mode 100644
index 0000000000..5fae525d78
--- /dev/null
+++ b/sysdeps/aarch64/fpu/expf_sve.c
@@ -0,0 +1,89 @@
+/* Single-precision vector (SVE) exp function.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+#include "sv_estrinf.h"
+
+static struct
+{
+  float poly[5];
+  float inv_ln2, ln2_hi, ln2_lo, shift, thres;
+} data = {
+  /* Coefficients copied from the polynomial in AdvSIMD variant, reversed for
+     compatibility with polynomial helpers.  */
+  .poly = { 0x1.ffffecp-1f, 0x1.fffdb6p-2f, 0x1.555e66p-3f, 0x1.573e2ep-5f,
+	    0x1.0e4020p-7f },
+  .inv_ln2 = 0x1.715476p+0f,
+  .ln2_hi = 0x1.62e4p-1f,
+  .ln2_lo = 0x1.7f7d1cp-20f,
+  /* 1.5*2^17 + 127.  */
+  .shift = 0x1.903f8p17f,
+  /* Roughly 87.3. For x < -Thres, the result is subnormal and not handled
+     correctly by FEXPA.  */
+  .thres = 0x1.5d5e2ap+6f,
+};
+
+#define C(i) sv_f32 (data.poly[i])
+#define ExponentBias 0x3f800000
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t special)
+{
+  return sv_call_f32 (expf, x, y, special);
+}
+
+/* Optimised single-precision SVE exp function.
+   Worst-case error is 1.04 ulp:
+   SV_NAME_F1 (exp)(0x1.a8eda4p+1) got 0x1.ba74bcp+4
+				  want 0x1.ba74bap+4.  */
+svfloat32_t SV_NAME_F1 (exp) (svfloat32_t x, const svbool_t pg)
+{
+  /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
+     x = ln2*n + r, with r in [-ln2/2, ln2/2].  */
+
+  /* Load some constants in quad-word chunks to minimise memory access (last
+     lane is wasted).  */
+  svfloat32_t invln2_and_ln2 = svld1rq_f32 (svptrue_b32 (), &data.inv_ln2);
+
+  /* n = round(x/(ln2/N)).  */
+  svfloat32_t z = svmla_lane_f32 (sv_f32 (data.shift), x, invln2_and_ln2, 0);
+  svfloat32_t n = svsub_n_f32_x (pg, z, data.shift);
+
+  /* r = x - n*ln2/N.  */
+  svfloat32_t r = svmls_lane_f32 (x, n, invln2_and_ln2, 1);
+  r = svmls_lane_f32 (r, n, invln2_and_ln2, 2);
+
+/* scale = 2^(n/N).  */
+  svbool_t is_special_case = svacgt_n_f32 (pg, x, data.thres);
+  svfloat32_t scale = svexpa_f32 (svreinterpret_u32_f32 (z));
+
+  /* y = exp(r) - 1 ~= r + C0 r^2 + C1 r^3 + C2 r^4 + C3 r^5 + C4 r^6.  */
+  svfloat32_t r2 = svmul_f32_x (pg, r, r);
+  /* Evaluate polynomial use hybrid scheme - offset variant of ESTRIN macro for
+     coefficients 1 to 4, and apply most significant coefficient directly.  */
+  svfloat32_t p14 = ESTRIN_3_ (pg, r, r2, C, 1);
+  svfloat32_t p0 = svmul_f32_x (pg, r, C (0));
+  svfloat32_t poly = svmla_f32_x (pg, p0, r2, p14);
+
+  if (unlikely (svptest_any (pg, is_special_case)))
+    return special_case (x, svmla_f32_x (pg, scale, scale, poly),
+			 is_special_case);
+
+  return svmla_f32_x (pg, scale, scale, poly);
+}
diff --git a/sysdeps/aarch64/fpu/sv_estrin.h b/sysdeps/aarch64/fpu/sv_estrin.h
new file mode 100644
index 0000000000..10d7df0929
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sv_estrin.h
@@ -0,0 +1,23 @@
+/* Helper macros for double-precision Estrin polynomial evaluation
+   in SVE routines.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define FMA(pg, x, y, z) svmla_f64_x (pg, z, x, y)
+
+#include "sv_estrin_wrap.h"
diff --git a/sysdeps/aarch64/fpu/sv_estrin_wrap.h b/sysdeps/aarch64/fpu/sv_estrin_wrap.h
new file mode 100644
index 0000000000..03702678dc
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sv_estrin_wrap.h
@@ -0,0 +1,89 @@
+/* Helper macros for Estrin polynomial evaluation in SVE routines.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define ESTRIN_1_(pg, x, c, i) FMA (pg, c (1 + i), x, c (i))
+#define ESTRIN_2_(pg, x, x2, c, i)                                            \
+  FMA (pg, c (2 + i), x2, ESTRIN_1_ (pg, x, c, i))
+#define ESTRIN_3_(pg, x, x2, c, i)                                            \
+  FMA (pg, ESTRIN_1_ (pg, x, c, 2 + i), x2, ESTRIN_1_ (pg, x, c, i))
+#define ESTRIN_4_(pg, x, x2, x4, c, i)                                        \
+  FMA (pg, c (4 + i), x4, ESTRIN_3_ (pg, x, x2, c, i))
+#define ESTRIN_5_(pg, x, x2, x4, c, i)                                        \
+  FMA (pg, ESTRIN_1_ (pg, x, c, 4 + i), x4, ESTRIN_3_ (pg, x, x2, c, i))
+#define ESTRIN_6_(pg, x, x2, x4, c, i)                                        \
+  FMA (pg, ESTRIN_2_ (pg, x, x2, c, 4 + i), x4, ESTRIN_3_ (pg, x, x2, c, i))
+#define ESTRIN_7_(pg, x, x2, x4, c, i)                                        \
+  FMA (pg, ESTRIN_3_ (pg, x, x2, c, 4 + i), x4, ESTRIN_3_ (pg, x, x2, c, i))
+#define ESTRIN_8_(pg, x, x2, x4, x8, c, i)                                    \
+  FMA (pg, c (8 + i), x8, ESTRIN_7_ (pg, x, x2, x4, c, i))
+#define ESTRIN_9_(pg, x, x2, x4, x8, c, i)                                    \
+  FMA (pg, ESTRIN_1_ (pg, x, c, 8 + i), x8, ESTRIN_7_ (pg, x, x2, x4, c, i))
+#define ESTRIN_10_(pg, x, x2, x4, x8, c, i)                                   \
+  FMA (pg, ESTRIN_2_ (pg, x, x2, c, 8 + i), x8,                               \
+       ESTRIN_7_ (pg, x, x2, x4, c, i))
+#define ESTRIN_11_(pg, x, x2, x4, x8, c, i)                                   \
+  FMA (pg, ESTRIN_3_ (pg, x, x2, c, 8 + i), x8,                               \
+       ESTRIN_7_ (pg, x, x2, x4, c, i))
+#define ESTRIN_12_(pg, x, x2, x4, x8, c, i)                                   \
+  FMA (pg, ESTRIN_4_ (pg, x, x2, x4, c, 8 + i), x8,                           \
+       ESTRIN_7_ (pg, x, x2, x4, c, i))
+#define ESTRIN_13_(pg, x, x2, x4, x8, c, i)                                   \
+  FMA (pg, ESTRIN_5_ (pg, x, x2, x4, c, 8 + i), x8,                           \
+       ESTRIN_7_ (pg, x, x2, x4, c, i))
+#define ESTRIN_14_(pg, x, x2, x4, x8, c, i)                                   \
+  FMA (pg, ESTRIN_6_ (pg, x, x2, x4, c, 8 + i), x8,                           \
+       ESTRIN_7_ (pg, x, x2, x4, c, i))
+#define ESTRIN_15_(pg, x, x2, x4, x8, c, i)                                   \
+  FMA (pg, ESTRIN_7_ (pg, x, x2, x4, c, 8 + i), x8,                           \
+       ESTRIN_7_ (pg, x, x2, x4, c, i))
+#define ESTRIN_16_(pg, x, x2, x4, x8, x16, c, i)                              \
+  FMA (pg, c (16 + i), x16, ESTRIN_15_ (pg, x, x2, x4, x8, c, i))
+#define ESTRIN_17_(pg, x, x2, x4, x8, x16, c, i)                              \
+  FMA (pg, ESTRIN_1_ (pg, x, c, 16 + i), x16,                                 \
+       ESTRIN_15_ (pg, x, x2, x4, x8, c, i))
+#define ESTRIN_18_(pg, x, x2, x4, x8, x16, c, i)                              \
+  FMA (pg, ESTRIN_2_ (pg, x, x2, c, 16 + i), x16,                             \
+       ESTRIN_15_ (pg, x, x2, x4, x8, c, i))
+#define ESTRIN_19_(pg, x, x2, x4, x8, x16, c, i)                              \
+  FMA (pg, ESTRIN_3_ (pg, x, x2, c, 16 + i), x16,                             \
+       ESTRIN_15_ (pg, x, x2, x4, x8, c, i))
+
+#define ESTRIN_1(pg, x, c) ESTRIN_1_ (pg, x, c, 0)
+#define ESTRIN_2(pg, x, x2, c) ESTRIN_2_ (pg, x, x2, c, 0)
+#define ESTRIN_3(pg, x, x2, c) ESTRIN_3_ (pg, x, x2, c, 0)
+#define ESTRIN_4(pg, x, x2, x4, c) ESTRIN_4_ (pg, x, x2, x4, c, 0)
+#define ESTRIN_5(pg, x, x2, x4, c) ESTRIN_5_ (pg, x, x2, x4, c, 0)
+#define ESTRIN_6(pg, x, x2, x4, c) ESTRIN_6_ (pg, x, x2, x4, c, 0)
+#define ESTRIN_7(pg, x, x2, x4, c) ESTRIN_7_ (pg, x, x2, x4, c, 0)
+#define ESTRIN_8(pg, x, x2, x4, x8, c) ESTRIN_8_ (pg, x, x2, x4, x8, c, 0)
+#define ESTRIN_9(pg, x, x2, x4, x8, c) ESTRIN_9_ (pg, x, x2, x4, x8, c, 0)
+#define ESTRIN_10(pg, x, x2, x4, x8, c) ESTRIN_10_ (pg, x, x2, x4, x8, c, 0)
+#define ESTRIN_11(pg, x, x2, x4, x8, c) ESTRIN_11_ (pg, x, x2, x4, x8, c, 0)
+#define ESTRIN_12(pg, x, x2, x4, x8, c) ESTRIN_12_ (pg, x, x2, x4, x8, c, 0)
+#define ESTRIN_13(pg, x, x2, x4, x8, c) ESTRIN_13_ (pg, x, x2, x4, x8, c, 0)
+#define ESTRIN_14(pg, x, x2, x4, x8, c) ESTRIN_14_ (pg, x, x2, x4, x8, c, 0)
+#define ESTRIN_15(pg, x, x2, x4, x8, c) ESTRIN_15_ (pg, x, x2, x4, x8, c, 0)
+#define ESTRIN_16(pg, x, x2, x4, x8, x16, c)                                  \
+  ESTRIN_16_ (pg, x, x2, x4, x8, x16, c, 0)
+#define ESTRIN_17(pg, x, x2, x4, x8, x16, c)                                  \
+  ESTRIN_17_ (pg, x, x2, x4, x8, x16, c, 0)
+#define ESTRIN_18(pg, x, x2, x4, x8, x16, c)                                  \
+  ESTRIN_18_ (pg, x, x2, x4, x8, x16, c, 0)
+#define ESTRIN_19(pg, x, x2, x4, x8, x16, c)                                  \
+  ESTRIN_19_ (pg, x, x2, x4, x8, x16, c, 0)
diff --git a/sysdeps/aarch64/fpu/sv_estrinf.h b/sysdeps/aarch64/fpu/sv_estrinf.h
new file mode 100644
index 0000000000..02f45884d4
--- /dev/null
+++ b/sysdeps/aarch64/fpu/sv_estrinf.h
@@ -0,0 +1,23 @@
+/* Helper macros for single-precision Estrin polynomial evaluation
+   in SVE routines.
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define FMA(pg, x, y, z) svmla_f32_x (pg, z, x, y)
+
+#include "sv_estrin_wrap.h"
diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
index c5f6fcd7c4..3b6b1e343d 100644
--- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
@@ -24,5 +24,6 @@
 #define VEC_TYPE float64x2_t
 
 VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
+VPCS_VECTOR_WRAPPER (exp_advsimd, _ZGVnN2v_exp)
 VPCS_VECTOR_WRAPPER (log_advsimd, _ZGVnN2v_log)
 VPCS_VECTOR_WRAPPER (sin_advsimd, _ZGVnN2v_sin)
diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
index d5e2ec6dc5..d7ac47ca22 100644
--- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
@@ -33,5 +33,6 @@
   }
 
 SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
+SVE_VECTOR_WRAPPER (exp_sve, _ZGVsMxv_exp)
 SVE_VECTOR_WRAPPER (log_sve, _ZGVsMxv_log)
 SVE_VECTOR_WRAPPER (sin_sve, _ZGVsMxv_sin)
diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
index c240738837..d4a9ac7154 100644
--- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
@@ -24,5 +24,6 @@
 #define VEC_TYPE float32x4_t
 
 VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
+VPCS_VECTOR_WRAPPER (expf_advsimd, _ZGVnN4v_expf)
 VPCS_VECTOR_WRAPPER (logf_advsimd, _ZGVnN4v_logf)
 VPCS_VECTOR_WRAPPER (sinf_advsimd, _ZGVnN4v_sinf)
diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
index 5a06b75857..d44033eab0 100644
--- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
@@ -33,5 +33,6 @@
   }
 
 SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
+SVE_VECTOR_WRAPPER (expf_sve, _ZGVsMxv_expf)
 SVE_VECTOR_WRAPPER (logf_sve, _ZGVsMxv_logf)
 SVE_VECTOR_WRAPPER (sinf_sve, _ZGVsMxv_sinf)
diff --git a/sysdeps/aarch64/fpu/v_exp_data.c b/sysdeps/aarch64/fpu/v_exp_data.c
new file mode 100644
index 0000000000..b857062f78
--- /dev/null
+++ b/sysdeps/aarch64/fpu/v_exp_data.c
@@ -0,0 +1,66 @@
+/* Scale values for vector exp and exp2
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "vecmath_config.h"
+
+const uint64_t __v_exp_data[] = {
+  0x3ff0000000000000, 0x3feff63da9fb3335, 0x3fefec9a3e778061,
+  0x3fefe315e86e7f85, 0x3fefd9b0d3158574, 0x3fefd06b29ddf6de,
+  0x3fefc74518759bc8, 0x3fefbe3ecac6f383, 0x3fefb5586cf9890f,
+  0x3fefac922b7247f7, 0x3fefa3ec32d3d1a2, 0x3fef9b66affed31b,
+  0x3fef9301d0125b51, 0x3fef8abdc06c31cc, 0x3fef829aaea92de0,
+  0x3fef7a98c8a58e51, 0x3fef72b83c7d517b, 0x3fef6af9388c8dea,
+  0x3fef635beb6fcb75, 0x3fef5be084045cd4, 0x3fef54873168b9aa,
+  0x3fef4d5022fcd91d, 0x3fef463b88628cd6, 0x3fef3f49917ddc96,
+  0x3fef387a6e756238, 0x3fef31ce4fb2a63f, 0x3fef2b4565e27cdd,
+  0x3fef24dfe1f56381, 0x3fef1e9df51fdee1, 0x3fef187fd0dad990,
+  0x3fef1285a6e4030b, 0x3fef0cafa93e2f56, 0x3fef06fe0a31b715,
+  0x3fef0170fc4cd831, 0x3feefc08b26416ff, 0x3feef6c55f929ff1,
+  0x3feef1a7373aa9cb, 0x3feeecae6d05d866, 0x3feee7db34e59ff7,
+  0x3feee32dc313a8e5, 0x3feedea64c123422, 0x3feeda4504ac801c,
+  0x3feed60a21f72e2a, 0x3feed1f5d950a897, 0x3feece086061892d,
+  0x3feeca41ed1d0057, 0x3feec6a2b5c13cd0, 0x3feec32af0d7d3de,
+  0x3feebfdad5362a27, 0x3feebcb299fddd0d, 0x3feeb9b2769d2ca7,
+  0x3feeb6daa2cf6642, 0x3feeb42b569d4f82, 0x3feeb1a4ca5d920f,
+  0x3feeaf4736b527da, 0x3feead12d497c7fd, 0x3feeab07dd485429,
+  0x3feea9268a5946b7, 0x3feea76f15ad2148, 0x3feea5e1b976dc09,
+  0x3feea47eb03a5585, 0x3feea34634ccc320, 0x3feea23882552225,
+  0x3feea155d44ca973, 0x3feea09e667f3bcd, 0x3feea012750bdabf,
+  0x3fee9fb23c651a2f, 0x3fee9f7df9519484, 0x3fee9f75e8ec5f74,
+  0x3fee9f9a48a58174, 0x3fee9feb564267c9, 0x3feea0694fde5d3f,
+  0x3feea11473eb0187, 0x3feea1ed0130c132, 0x3feea2f336cf4e62,
+  0x3feea427543e1a12, 0x3feea589994cce13, 0x3feea71a4623c7ad,
+  0x3feea8d99b4492ed, 0x3feeaac7d98a6699, 0x3feeace5422aa0db,
+  0x3feeaf3216b5448c, 0x3feeb1ae99157736, 0x3feeb45b0b91ffc6,
+  0x3feeb737b0cdc5e5, 0x3feeba44cbc8520f, 0x3feebd829fde4e50,
+  0x3feec0f170ca07ba, 0x3feec49182a3f090, 0x3feec86319e32323,
+  0x3feecc667b5de565, 0x3feed09bec4a2d33, 0x3feed503b23e255d,
+  0x3feed99e1330b358, 0x3feede6b5579fdbf, 0x3feee36bbfd3f37a,
+  0x3feee89f995ad3ad, 0x3feeee07298db666, 0x3feef3a2b84f15fb,
+  0x3feef9728de5593a, 0x3feeff76f2fb5e47, 0x3fef05b030a1064a,
+  0x3fef0c1e904bc1d2, 0x3fef12c25bd71e09, 0x3fef199bdd85529c,
+  0x3fef20ab5fffd07a, 0x3fef27f12e57d14b, 0x3fef2f6d9406e7b5,
+  0x3fef3720dcef9069, 0x3fef3f0b555dc3fa, 0x3fef472d4a07897c,
+  0x3fef4f87080d89f2, 0x3fef5818dcfba487, 0x3fef60e316c98398,
+  0x3fef69e603db3285, 0x3fef7321f301b460, 0x3fef7c97337b9b5f,
+  0x3fef864614f5a129, 0x3fef902ee78b3ff6, 0x3fef9a51fbc74c83,
+  0x3fefa4afa2a490da, 0x3fefaf482d8e67f1, 0x3fefba1bee615a27,
+  0x3fefc52b376bba97, 0x3fefd0765b6e4540, 0x3fefdbfdad9cbe14,
+  0x3fefe7c1819e90d8, 0x3feff3c22b8f71f1,
+};
diff --git a/sysdeps/aarch64/fpu/vecmath_config.h b/sysdeps/aarch64/fpu/vecmath_config.h
index 84cf5b2eef..f82287273e 100644
--- a/sysdeps/aarch64/fpu/vecmath_config.h
+++ b/sysdeps/aarch64/fpu/vecmath_config.h
@@ -41,4 +41,7 @@ extern const struct v_log_data
   double invc[1 << V_LOG_TABLE_BITS];
   double logc[1 << V_LOG_TABLE_BITS];
 } __v_log_data HIDDEN;
+
+#define V_EXP_TABLE_BITS 7
+extern const uint64_t __v_exp_data[1 << V_EXP_TABLE_BITS] HIDDEN;
 #endif
diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
index 7bdbf4614d..6b25ed43e0 100644
--- a/sysdeps/aarch64/libm-test-ulps
+++ b/sysdeps/aarch64/libm-test-ulps
@@ -1005,10 +1005,18 @@ double: 1
 float: 1
 ldouble: 2
 
+Function: "exp_advsimd":
+double: 1
+float: 1
+
 Function: "exp_downward":
 double: 1
 float: 1
 
+Function: "exp_sve":
+double: 1
+float: 1
+
 Function: "exp_towardzero":
 double: 1
 float: 1
diff --git a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
index 1922191886..ae46ef8c34 100644
--- a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
+++ b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
@@ -1,11 +1,15 @@
 GLIBC_2.38 _ZGVnN2v_cos F
+GLIBC_2.38 _ZGVnN2v_exp F
 GLIBC_2.38 _ZGVnN2v_log F
 GLIBC_2.38 _ZGVnN2v_sin F
 GLIBC_2.38 _ZGVnN4v_cosf F
+GLIBC_2.38 _ZGVnN4v_expf F
 GLIBC_2.38 _ZGVnN4v_logf F
 GLIBC_2.38 _ZGVnN4v_sinf F
 GLIBC_2.38 _ZGVsMxv_cos F
 GLIBC_2.38 _ZGVsMxv_cosf F
+GLIBC_2.38 _ZGVsMxv_exp F
+GLIBC_2.38 _ZGVsMxv_expf F
 GLIBC_2.38 _ZGVsMxv_log F
 GLIBC_2.38 _ZGVsMxv_logf F
 GLIBC_2.38 _ZGVsMxv_sin F
-- 
2.27.0


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] aarch64: Add vector implementations of cos routines
  2023-06-08 13:39 [PATCH 1/4] aarch64: Add vector implementations of cos routines Joe Ramsay
                   ` (2 preceding siblings ...)
  2023-06-08 13:39 ` [PATCH 4/4] aarch64: Add vector implementations of exp routines Joe Ramsay
@ 2023-06-13 17:29 ` Adhemerval Zanella Netto
  2023-06-15 14:43   ` Joe Ramsay
  2023-06-13 19:56 ` Adhemerval Zanella Netto
  4 siblings, 1 reply; 11+ messages in thread
From: Adhemerval Zanella Netto @ 2023-06-13 17:29 UTC (permalink / raw)
  To: Joe Ramsay, libc-alpha



On 08/06/23 10:39, Joe Ramsay via Libc-alpha wrote:
> Replace the loop-over-scalar placeholder routines with optimised
> implementations from Arm Optimized Routines (AOR).
> 
> Also add some headers containing utilities for aarch64 libmvec
> routines, and update libm-test-ulps.
> 
> AOR exposes a config option, WANT_SIMD_EXCEPT, to enable
> selective masking (and later fixing up) of invalid lanes, in
> order to trigger fp exceptions correctly (AdvSIMD only). This is
> tested and maintained in AOR, however it is configured off at
> source level here for performance reasons. We keep the
> WANT_SIMD_EXCEPT blocks in routine sources to greatly simplify
> the upstreaming process from AOR to glibc.
> ---
>  sysdeps/aarch64/fpu/cos_advsimd.c             |  81 ++++++-
>  sysdeps/aarch64/fpu/cos_sve.c                 |  73 ++++++-
>  sysdeps/aarch64/fpu/cosf_advsimd.c            |  76 ++++++-
>  sysdeps/aarch64/fpu/cosf_sve.c                |  70 ++++++-
>  sysdeps/aarch64/fpu/sv_math.h                 | 141 +++++++++++++
>  sysdeps/aarch64/fpu/sve_utils.h               |  55 -----
>  sysdeps/aarch64/fpu/v_math.h                  | 197 ++++++++++++++++++
>  .../fpu/{advsimd_utils.h => vecmath_config.h} |  30 ++-
>  sysdeps/aarch64/libm-test-ulps                |   2 +-
>  9 files changed, 629 insertions(+), 96 deletions(-)
>  create mode 100644 sysdeps/aarch64/fpu/sv_math.h
>  delete mode 100644 sysdeps/aarch64/fpu/sve_utils.h
>  create mode 100644 sysdeps/aarch64/fpu/v_math.h
>  rename sysdeps/aarch64/fpu/{advsimd_utils.h => vecmath_config.h} (57%)
> 
> diff --git a/sysdeps/aarch64/fpu/cos_advsimd.c b/sysdeps/aarch64/fpu/cos_advsimd.c
> index 40831e6b0d..1f7a7023f5 100644
> --- a/sysdeps/aarch64/fpu/cos_advsimd.c
> +++ b/sysdeps/aarch64/fpu/cos_advsimd.c
> @@ -17,13 +17,82 @@
>     License along with the GNU C Library; if not, see
>     <https://www.gnu.org/licenses/>.  */
>  
> -#include <math.h>
> +#include "v_math.h"
>  
> -#include "advsimd_utils.h"
> +static const volatile struct

Why do you need volatile here?

> +{
> +  float64x2_t poly[7];
> +  float64x2_t range_val, shift, inv_pi, half_pi, pi_1, pi_2, pi_3;
> +} data = {
> +  /* Worst-case error is 3.3 ulp in [-pi/2, pi/2].  */
> +  .poly = { V2 (-0x1.555555555547bp-3), V2 (0x1.1111111108a4dp-7),
> +	    V2 (-0x1.a01a019936f27p-13), V2 (0x1.71de37a97d93ep-19),
> +	    V2 (-0x1.ae633919987c6p-26), V2 (0x1.60e277ae07cecp-33),
> +	    V2 (-0x1.9e9540300a1p-41) },
> +  .inv_pi = V2 (0x1.45f306dc9c883p-2),
> +  .half_pi = V2 (0x1.921fb54442d18p+0),
> +  .pi_1 = V2 (0x1.921fb54442d18p+1),
> +  .pi_2 = V2 (0x1.1a62633145c06p-53),
> +  .pi_3 = V2 (0x1.c1cd129024e09p-106),
> +  .shift = V2 (0x1.8p52),
> +  .range_val = V2 (0x1p23)
> +};
> +
> +#define C(i) data.poly[i]
> +
> +static float64x2_t VPCS_ATTR NOINLINE
> +special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp)
> +{
> +  y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
> +  return v_call_f64 (cos, x, y, cmp);
> +}
>  
> -VPCS_ATTR
> -float64x2_t
> -V_NAME_D1 (cos) (float64x2_t x)
> +float64x2_t VPCS_ATTR V_NAME_D1 (cos) (float64x2_t x)
>  {
> -  return v_call_f64 (cos, x);
> +  float64x2_t n, r, r2, r3, r4, t1, t2, t3, y;
> +  uint64x2_t odd, cmp;
> +
> +#if WANT_SIMD_EXCEPT
> +  r = vabsq_f64 (x);
> +  cmp = vcgeq_u64 (vreinterpretq_u64_f64 (r),
> +		   vreinterpretq_u64_f64 (data.range_val));
> +  if (unlikely (v_any_u64 (cmp)))
> +    /* If fenv exceptions are to be triggered correctly, set any special lanes
> +       to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
> +       special-case handler later.  */
> +    r = vbslq_f64 (cmp, v_f64 (1.0), r);
> +#else
> +  cmp = vcageq_f64 (data.range_val, x);
> +  cmp = vceqzq_u64 (cmp); /* cmp = ~cmp.  */
> +  r = x;
> +#endif
> +
> +  /* n = rint((|x|+pi/2)/pi) - 0.5.  */
> +  n = vfmaq_f64 (data.shift, data.inv_pi, vaddq_f64 (r, data.half_pi));
> +  odd = vshlq_n_u64 (vreinterpretq_u64_f64 (n), 63);
> +  n = vsubq_f64 (n, data.shift);
> +  n = vsubq_f64 (n, v_f64 (0.5));
> +
> +  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
> +  r = vfmsq_f64 (r, data.pi_1, n);
> +  r = vfmsq_f64 (r, data.pi_2, n);
> +  r = vfmsq_f64 (r, data.pi_3, n);
> +
> +  /* sin(r) poly approx.  */
> +  r2 = vmulq_f64 (r, r);
> +  r3 = vmulq_f64 (r2, r);
> +  r4 = vmulq_f64 (r2, r2);
> +
> +  t1 = vfmaq_f64 (C (4), C (5), r2);
> +  t2 = vfmaq_f64 (C (2), C (3), r2);
> +  t3 = vfmaq_f64 (C (0), C (1), r2);
> +
> +  y = vfmaq_f64 (t1, C (6), r4);
> +  y = vfmaq_f64 (t2, y, r4);
> +  y = vfmaq_f64 (t3, y, r4);
> +  y = vfmaq_f64 (r, y, r3);
> +
> +  if (unlikely (v_any_u64 (cmp)))
> +    return special_case (x, y, odd, cmp);
> +  return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
>  }
> diff --git a/sysdeps/aarch64/fpu/cos_sve.c b/sysdeps/aarch64/fpu/cos_sve.c
> index 55501e5000..b93de076bb 100644
> --- a/sysdeps/aarch64/fpu/cos_sve.c
> +++ b/sysdeps/aarch64/fpu/cos_sve.c
> @@ -17,12 +17,75 @@
>     License along with the GNU C Library; if not, see
>     <https://www.gnu.org/licenses/>.  */
>  
> -#include <math.h>
> +#include "sv_math.h"
>  
> -#include "sve_utils.h"
> +static struct

These seems to be uses a constants on the code, so I think we should use
'const' here.

> +{
> +  double inv_pio2, pio2_1, pio2_2, pio2_3, shift;
> +} data = {
> +  /* Polynomial coefficients are hardwired in FTMAD instructions.  */
> +  .inv_pio2 = 0x1.45f306dc9c882p-1,
> +  .pio2_1 = 0x1.921fb50000000p+0,
> +  .pio2_2 = 0x1.110b460000000p-26,
> +  .pio2_3 = 0x1.1a62633145c07p-54,
> +  /* Original shift used in AdvSIMD cos,
> +     plus a contribution to set the bit #0 of q
> +     as expected by trigonometric instructions.  */
> +  .shift = 0x1.8000000000001p52
> +};
> +
> +#define RangeVal 0x4160000000000000 /* asuint64 (0x1p23).  */
> +
> +static svfloat64_t NOINLINE
> +special_case (svfloat64_t x, svfloat64_t y, svbool_t out_of_bounds)
> +{
> +  return sv_call_f64 (cos, x, y, out_of_bounds);
> +}
>  
> -svfloat64_t
> -SV_NAME_D1 (cos) (svfloat64_t x, svbool_t pg)
> +/* A fast SVE implementation of cos based on trigonometric
> +   instructions (FTMAD, FTSSEL, FTSMUL).
> +   Maximum measured error: 2.108 ULPs.
> +   SV_NAME_D1 (cos)(0x1.9b0ba158c98f3p+7) got -0x1.fddd4c65c7f07p-3
> +					 want -0x1.fddd4c65c7f05p-3.  */
> +svfloat64_t SV_NAME_D1 (cos) (svfloat64_t x, const svbool_t pg)
>  {
> -  return sv_call_f64 (cos, x, svdup_n_f64 (0), pg);
> +  svfloat64_t r = svabs_f64_x (pg, x);
> +  svbool_t out_of_bounds
> +      = svcmpge_n_u64 (pg, svreinterpret_u64_f64 (r), RangeVal);
> +
> +  /* Load some constants in quad-word chunks to minimise memory access.  */
> +  svbool_t ptrue = svptrue_b64 ();
> +  svfloat64_t invpio2_and_pio2_1 = svld1rq_f64 (ptrue, &data.inv_pio2);
> +  svfloat64_t pio2_23 = svld1rq_f64 (ptrue, &data.pio2_2);
> +
> +  /* n = rint(|x|/(pi/2)).  */
> +  svfloat64_t q
> +      = svmla_lane_f64 (sv_f64 (data.shift), r, invpio2_and_pio2_1, 0);
> +  svfloat64_t n = svsub_n_f64_x (pg, q, data.shift);
> +
> +  /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
> +  r = svmls_lane_f64 (r, n, invpio2_and_pio2_1, 1);
> +  r = svmls_lane_f64 (r, n, pio2_23, 0);
> +  r = svmls_lane_f64 (r, n, pio2_23, 1);
> +
> +  /* cos(r) poly approx.  */
> +  svfloat64_t r2 = svtsmul_f64 (r, svreinterpret_u64_f64 (q));
> +  svfloat64_t y = sv_f64 (0.0);
> +  y = svtmad_f64 (y, r2, 7);
> +  y = svtmad_f64 (y, r2, 6);
> +  y = svtmad_f64 (y, r2, 5);
> +  y = svtmad_f64 (y, r2, 4);
> +  y = svtmad_f64 (y, r2, 3);
> +  y = svtmad_f64 (y, r2, 2);
> +  y = svtmad_f64 (y, r2, 1);
> +  y = svtmad_f64 (y, r2, 0);
> +
> +  /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
> +  svfloat64_t f = svtssel_f64 (r, svreinterpret_u64_f64 (q));
> +  /* Apply factor.  */
> +  y = svmul_f64_x (pg, f, y);
> +
> +  if (unlikely (svptest_any (pg, out_of_bounds)))
> +    return special_case (x, y, out_of_bounds);
> +  return y;
>  }
> diff --git a/sysdeps/aarch64/fpu/cosf_advsimd.c b/sysdeps/aarch64/fpu/cosf_advsimd.c
> index 35bb81aead..a5c7437bfb 100644
> --- a/sysdeps/aarch64/fpu/cosf_advsimd.c
> +++ b/sysdeps/aarch64/fpu/cosf_advsimd.c
> @@ -17,13 +17,77 @@
>     License along with the GNU C Library; if not, see
>     <https://www.gnu.org/licenses/>.  */
>  
> -#include <math.h>
> +#include "v_math.h"
>  
> -#include "advsimd_utils.h"
> +static const volatile struct

Same as before about volatile.

> +{
> +  float32x4_t poly[4];
> +  float32x4_t range_val, inv_pi, half_pi, shift, pi_1, pi_2, pi_3;
> +} data = {
> +  /* 1.886 ulp error.  */
> +  .poly = { V4 (-0x1.555548p-3f), V4 (0x1.110df4p-7f), V4 (-0x1.9f42eap-13f),
> +	    V4 (0x1.5b2e76p-19f) },
> +
> +  .pi_1 = V4 (0x1.921fb6p+1f),
> +  .pi_2 = V4 (-0x1.777a5cp-24f),
> +  .pi_3 = V4 (-0x1.ee59dap-49f),
> +
> +  .inv_pi = V4 (0x1.45f306p-2f),
> +  .shift = V4 (0x1.8p+23f),
> +  .half_pi = V4 (0x1.921fb6p0f),
> +  .range_val = V4 (0x1p20f)
> +};
> +
> +#define C(i) data.poly[i]
> +
> +static float32x4_t VPCS_ATTR NOINLINE
> +special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp)
> +{
> +  /* Fall back to scalar code.  */
> +  y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
> +  return v_call_f32 (cosf, x, y, cmp);
> +}
>  
> -VPCS_ATTR
> -float32x4_t
> -V_NAME_F1 (cos) (float32x4_t x)
> +float32x4_t VPCS_ATTR V_NAME_F1 (cos) (float32x4_t x)
>  {
> -  return v_call_f32 (cosf, x);
> +  float32x4_t n, r, r2, r3, y;
> +  uint32x4_t odd, cmp;
> +
> +#if WANT_SIMD_EXCEPT
> +  r = vabsq_f32 (x);
> +  cmp = vcgeq_u32 (vreinterpretq_u32_f32 (r),
> +		   vreinterpretq_u32_f32 (data.range_val));
> +  if (unlikely (v_any_u32 (cmp)))
> +    /* If fenv exceptions are to be triggered correctly, set any special lanes
> +       to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
> +       special-case handler later.  */
> +    r = vbslq_f32 (cmp, v_f32 (1.0f), r);
> +#else
> +  cmp = vcageq_f32 (data.range_val, x);
> +  cmp = vceqzq_u32 (cmp); /* cmp = ~cmp.  */
> +  r = x;
> +#endif
> +
> +  /* n = rint((|x|+pi/2)/pi) - 0.5.  */
> +  n = vfmaq_f32 (data.shift, data.inv_pi, vaddq_f32 (r, data.half_pi));
> +  odd = vshlq_n_u32 (vreinterpretq_u32_f32 (n), 31);
> +  n = vsubq_f32 (n, data.shift);
> +  n = vsubq_f32 (n, v_f32 (0.5f));
> +
> +  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
> +  r = vfmsq_f32 (r, data.pi_1, n);
> +  r = vfmsq_f32 (r, data.pi_2, n);
> +  r = vfmsq_f32 (r, data.pi_3, n);
> +
> +  /* y = sin(r).  */
> +  r2 = vmulq_f32 (r, r);
> +  r3 = vmulq_f32 (r2, r);
> +  y = vfmaq_f32 (C (2), C (3), r2);
> +  y = vfmaq_f32 (C (1), y, r2);
> +  y = vfmaq_f32 (C (0), y, r2);
> +  y = vfmaq_f32 (r, y, r3);
> +
> +  if (unlikely (v_any_u32 (cmp)))
> +    return special_case (x, y, odd, cmp);
> +  return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
>  }
> diff --git a/sysdeps/aarch64/fpu/cosf_sve.c b/sysdeps/aarch64/fpu/cosf_sve.c
> index 16c68f387b..d7cfc45fc4 100644
> --- a/sysdeps/aarch64/fpu/cosf_sve.c
> +++ b/sysdeps/aarch64/fpu/cosf_sve.c
> @@ -17,12 +17,72 @@
>     License along with the GNU C Library; if not, see
>     <https://www.gnu.org/licenses/>.  */
>  
> -#include <math.h>
> +#include "sv_math.h"
>  
> -#include "sve_utils.h"
> +static struct

Same as before, I think this should be 'const'.

> +{
> +  float neg_pio2_1, neg_pio2_2, neg_pio2_3, inv_pio2, shift;
> +} data = {
> +  /* Polynomial coefficients are hard-wired in FTMAD instructions.  */
> +  .neg_pio2_1 = -0x1.921fb6p+0f,
> +  .neg_pio2_2 = 0x1.777a5cp-25f,
> +  .neg_pio2_3 = 0x1.ee59dap-50f,
> +  .inv_pio2 = 0x1.45f306p-1f,
> +  /* Original shift used in AdvSIMD cosf,
> +     plus a contribution to set the bit #0 of q
> +     as expected by trigonometric instructions.  */
> +  .shift = 0x1.800002p+23f
> +};
> +
> +#define RangeVal 0x49800000 /* asuint32(0x1p20f).  */
> +
> +static svfloat32_t NOINLINE
> +special_case (svfloat32_t x, svfloat32_t y, svbool_t out_of_bounds)
> +{
> +  return sv_call_f32 (cosf, x, y, out_of_bounds);
> +}
>  
> -svfloat32_t
> -SV_NAME_F1 (cos) (svfloat32_t x, svbool_t pg)
> +/* A fast SVE implementation of cosf based on trigonometric
> +   instructions (FTMAD, FTSSEL, FTSMUL).
> +   Maximum measured error: 2.06 ULPs.
> +   SV_NAME_F1 (cos)(0x1.dea2f2p+19) got 0x1.fffe7ap-6
> +				   want 0x1.fffe76p-6.  */
> +svfloat32_t SV_NAME_F1 (cos) (svfloat32_t x, const svbool_t pg)
>  {
> -  return sv_call_f32 (cosf, x, svdup_n_f32 (0), pg);
> +  svfloat32_t r = svabs_f32_x (pg, x);
> +  svbool_t out_of_bounds
> +    = svcmpge_n_u32 (pg, svreinterpret_u32_f32 (r), RangeVal);
> +
> +  /* Load some constants in quad-word chunks to minimise memory access.  */
> +  svfloat32_t negpio2_and_invpio2
> +      = svld1rq_f32 (svptrue_b32 (), &data.neg_pio2_1);
> +
> +  /* n = rint(|x|/(pi/2)).  */
> +  svfloat32_t q
> +    = svmla_lane_f32 (sv_f32 (data.shift), r, negpio2_and_invpio2, 3);
> +  svfloat32_t n = svsub_n_f32_x (pg, q, data.shift);
> +
> +  /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
> +  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 0);
> +  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 1);
> +  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 2);
> +
> +  /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
> +  svfloat32_t f = svtssel_f32 (r, svreinterpret_u32_f32 (q));
> +
> +  /* cos(r) poly approx.  */
> +  svfloat32_t r2 = svtsmul_f32 (r, svreinterpret_u32_f32 (q));
> +  svfloat32_t y = sv_f32 (0.0f);
> +  y = svtmad_f32 (y, r2, 4);
> +  y = svtmad_f32 (y, r2, 3);
> +  y = svtmad_f32 (y, r2, 2);
> +  y = svtmad_f32 (y, r2, 1);
> +  y = svtmad_f32 (y, r2, 0);
> +
> +  /* Apply factor.  */
> +  y = svmul_f32_x (pg, f, y);
> +
> +  if (unlikely (svptest_any (pg, out_of_bounds)))
> +    return special_case (x, y, out_of_bounds);
> +  return y;
>  }
> diff --git a/sysdeps/aarch64/fpu/sv_math.h b/sysdeps/aarch64/fpu/sv_math.h
> new file mode 100644
> index 0000000000..b63a99b24f
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/sv_math.h
> @@ -0,0 +1,141 @@
> +/* Utilities for SVE libmvec routines.
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#ifndef SV_MATH_H
> +#define SV_MATH_H
> +
> +#include <arm_sve.h>
> +#include <stdbool.h>
> +
> +#include "vecmath_config.h"
> +
> +#define SV_NAME_F1(fun) _ZGVsMxv_##fun##f
> +#define SV_NAME_D1(fun) _ZGVsMxv_##fun
> +#define SV_NAME_F2(fun) _ZGVsMxvv_##fun##f
> +#define SV_NAME_D2(fun) _ZGVsMxvv_##fun
> +
> +/* Double precision.  */
> +static inline svint64_t
> +sv_s64 (int64_t x)
> +{
> +  return svdup_n_s64 (x);
> +}
> +

It should not really matter for glibc, since we use -std=gnu11 and 
-fgnu89-inline, glibc does not really support building without optimization,
and I think it is unlikely that these function will use anything that will 
prevent them to be inline (as indicated by gcc documentation [1] such as 
alloca); but for static inline used as macro we tend to use __always_inline.

And you seems to remove __always_inline from sve_utils.h.

[1] https://gcc.gnu.org/onlinedocs/gcc/Inline.html

> +static inline svuint64_t
> +sv_u64 (uint64_t x)
> +{
> +  return svdup_n_u64 (x);
> +}
> +
> +static inline svfloat64_t
> +sv_f64 (double x)
> +{
> +  return svdup_n_f64 (x);
> +}
> +
> +static inline svfloat64_t
> +sv_call_f64 (double (*f) (double), svfloat64_t x, svfloat64_t y, svbool_t cmp)
> +{
> +  svbool_t p = svpfirst (cmp, svpfalse ());
> +  while (svptest_any (cmp, p))
> +    {
> +      double elem = svclastb_n_f64 (p, 0, x);
> +      elem = (*f) (elem);

Not really required, but you do not need to dereference a function pointer with
gnu11 (you can use as a normal function call)

> +      svfloat64_t y2 = svdup_n_f64 (elem);
> +      y = svsel_f64 (p, y2, y);
> +      p = svpnext_b64 (cmp, p);
> +    }
> +  return y;
> +}
> +
> +static inline svfloat64_t
> +sv_call2_f64 (double (*f) (double, double), svfloat64_t x1, svfloat64_t x2,
> +	      svfloat64_t y, svbool_t cmp)
> +{
> +  svbool_t p = svpfirst (cmp, svpfalse ());
> +  while (svptest_any (cmp, p))
> +    {
> +      double elem1 = svclastb_n_f64 (p, 0, x1);
> +      double elem2 = svclastb_n_f64 (p, 0, x2);
> +      double ret = (*f) (elem1, elem2);
> +      svfloat64_t y2 = svdup_n_f64 (ret);
> +      y = svsel_f64 (p, y2, y);
> +      p = svpnext_b64 (cmp, p);
> +    }
> +  return y;
> +}
> +
> +static inline svuint64_t
> +sv_mod_n_u64_x (svbool_t pg, svuint64_t x, uint64_t y)
> +{
> +  svuint64_t q = svdiv_n_u64_x (pg, x, y);
> +  return svmls_n_u64_x (pg, x, q, y);
> +}
> +
> +/* Single precision.  */
> +static inline svint32_t
> +sv_s32 (int32_t x)
> +{
> +  return svdup_n_s32 (x);
> +}
> +
> +static inline svuint32_t
> +sv_u32 (uint32_t x)
> +{
> +  return svdup_n_u32 (x);
> +}
> +
> +static inline svfloat32_t
> +sv_f32 (float x)
> +{
> +  return svdup_n_f32 (x);
> +}
> +
> +static inline svfloat32_t
> +sv_call_f32 (float (*f) (float), svfloat32_t x, svfloat32_t y, svbool_t cmp)
> +{
> +  svbool_t p = svpfirst (cmp, svpfalse ());
> +  while (svptest_any (cmp, p))
> +    {
> +      float elem = svclastb_n_f32 (p, 0, x);
> +      elem = (*f) (elem);
> +      svfloat32_t y2 = svdup_n_f32 (elem);
> +      y = svsel_f32 (p, y2, y);
> +      p = svpnext_b32 (cmp, p);
> +    }
> +  return y;
> +}
> +
> +static inline svfloat32_t
> +sv_call2_f32 (float (*f) (float, float), svfloat32_t x1, svfloat32_t x2,
> +	      svfloat32_t y, svbool_t cmp)
> +{
> +  svbool_t p = svpfirst (cmp, svpfalse ());
> +  while (svptest_any (cmp, p))
> +    {
> +      float elem1 = svclastb_n_f32 (p, 0, x1);
> +      float elem2 = svclastb_n_f32 (p, 0, x2);
> +      float ret = (*f) (elem1, elem2);
> +      svfloat32_t y2 = svdup_n_f32 (ret);
> +      y = svsel_f32 (p, y2, y);
> +      p = svpnext_b32 (cmp, p);
> +    }
> +  return y;
> +}
> +
> +#endif
> diff --git a/sysdeps/aarch64/fpu/sve_utils.h b/sysdeps/aarch64/fpu/sve_utils.h
> deleted file mode 100644
> index 5ce3d2e8d6..0000000000
> --- a/sysdeps/aarch64/fpu/sve_utils.h
> +++ /dev/null
> @@ -1,55 +0,0 @@
> -/* Helpers for SVE vector math functions.
> -
> -   Copyright (C) 2023 Free Software Foundation, Inc.
> -   This file is part of the GNU C Library.
> -
> -   The GNU C Library is free software; you can redistribute it and/or
> -   modify it under the terms of the GNU Lesser General Public
> -   License as published by the Free Software Foundation; either
> -   version 2.1 of the License, or (at your option) any later version.
> -
> -   The GNU C Library is distributed in the hope that it will be useful,
> -   but WITHOUT ANY WARRANTY; without even the implied warranty of
> -   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> -   Lesser General Public License for more details.
> -
> -   You should have received a copy of the GNU Lesser General Public
> -   License along with the GNU C Library; if not, see
> -   <https://www.gnu.org/licenses/>.  */
> -
> -#include <arm_sve.h>
> -
> -#define SV_NAME_F1(fun) _ZGVsMxv_##fun##f
> -#define SV_NAME_D1(fun) _ZGVsMxv_##fun
> -#define SV_NAME_F2(fun) _ZGVsMxvv_##fun##f
> -#define SV_NAME_D2(fun) _ZGVsMxvv_##fun
> -
> -static __always_inline svfloat32_t
> -sv_call_f32 (float (*f) (float), svfloat32_t x, svfloat32_t y, svbool_t cmp)
> -{
> -  svbool_t p = svpfirst (cmp, svpfalse ());
> -  while (svptest_any (cmp, p))
> -    {
> -      float elem = svclastb_n_f32 (p, 0, x);
> -      elem = (*f) (elem);
> -      svfloat32_t y2 = svdup_n_f32 (elem);
> -      y = svsel_f32 (p, y2, y);
> -      p = svpnext_b32 (cmp, p);
> -    }
> -  return y;
> -}
> -
> -static __always_inline svfloat64_t
> -sv_call_f64 (double (*f) (double), svfloat64_t x, svfloat64_t y, svbool_t cmp)
> -{
> -  svbool_t p = svpfirst (cmp, svpfalse ());
> -  while (svptest_any (cmp, p))
> -    {
> -      double elem = svclastb_n_f64 (p, 0, x);
> -      elem = (*f) (elem);
> -      svfloat64_t y2 = svdup_n_f64 (elem);
> -      y = svsel_f64 (p, y2, y);
> -      p = svpnext_b64 (cmp, p);
> -    }
> -  return y;
> -}
> diff --git a/sysdeps/aarch64/fpu/v_math.h b/sysdeps/aarch64/fpu/v_math.h
> new file mode 100644
> index 0000000000..77df815c33
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/v_math.h
> @@ -0,0 +1,197 @@
> +/* Utilities for Advanced SIMD libmvec routines.
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#ifndef _V_MATH_H
> +#define _V_MATH_H
> +
> +#include <arm_neon.h>
> +#include "vecmath_config.h"
> +
> +#define VPCS_ATTR __attribute__ ((aarch64_vector_pcs))
> +
> +#define V_NAME_F1(fun) _ZGVnN4v_##fun##f
> +#define V_NAME_D1(fun) _ZGVnN2v_##fun
> +#define V_NAME_F2(fun) _ZGVnN4vv_##fun##f
> +#define V_NAME_D2(fun) _ZGVnN2vv_##fun
> +
> +/* Shorthand helpers for declaring constants.  */
> +#define V2(x)                                                                  \
> +  {                                                                            \
> +    x, x                                                                       \
> +  }
> +
> +#define V4(x)                                                                  \
> +  {                                                                            \
> +    x, x, x, x                                                                 \
> +  }
> +
> +static inline int
> +v_lanes32 (void)
> +{
> +  return 4;
> +}
> +
> +static inline float32x4_t
> +v_f32 (float x)
> +{
> +  return (float32x4_t) V4 (x);
> +}
> +static inline uint32x4_t
> +v_u32 (uint32_t x)
> +{
> +  return (uint32x4_t) V4 (x);
> +}
> +static inline int32x4_t
> +v_s32 (int32_t x)
> +{
> +  return (int32x4_t) V4 (x);
> +}
> +
> +static inline float
> +v_get_f32 (float32x4_t x, int i)
> +{
> +  return x[i];
> +}
> +static inline uint32_t
> +v_get_u32 (uint32x4_t x, int i)
> +{
> +  return x[i];
> +}
> +static inline int32_t
> +v_get_s32 (int32x4_t x, int i)
> +{
> +  return x[i];
> +}
> +
> +static inline void
> +v_set_f32 (float32x4_t *x, int i, float v)
> +{
> +  (*x)[i] = v;
> +}
> +static inline void
> +v_set_u32 (uint32x4_t *x, int i, uint32_t v)
> +{
> +  (*x)[i] = v;
> +}
> +static inline void
> +v_set_s32 (int32x4_t *x, int i, int32_t v)
> +{
> +  (*x)[i] = v;
> +}
> +
> +/* true if any elements of a vector compare result is non-zero.  */
> +static inline int
> +v_any_u32 (uint32x4_t x)
> +{
> +  /* assume elements in x are either 0 or -1u.  */
> +  return vpaddd_u64 (vreinterpretq_u64_u32 (x)) != 0;
> +}
> +static inline float32x4_t
> +v_lookup_f32 (const float *tab, uint32x4_t idx)
> +{
> +  return (float32x4_t){ tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]] };
> +}
> +static inline uint32x4_t
> +v_lookup_u32 (const uint32_t *tab, uint32x4_t idx)
> +{
> +  return (uint32x4_t){ tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]] };
> +}
> +static inline float32x4_t
> +v_call_f32 (float (*f) (float), float32x4_t x, float32x4_t y, uint32x4_t p)
> +{
> +  return (float32x4_t){ p[0] ? f (x[0]) : y[0], p[1] ? f (x[1]) : y[1],
> +			p[2] ? f (x[2]) : y[2], p[3] ? f (x[3]) : y[3] };
> +}
> +static inline float32x4_t
> +v_call2_f32 (float (*f) (float, float), float32x4_t x1, float32x4_t x2,
> +	     float32x4_t y, uint32x4_t p)
> +{
> +  return (float32x4_t){ p[0] ? f (x1[0], x2[0]) : y[0],
> +			p[1] ? f (x1[1], x2[1]) : y[1],
> +			p[2] ? f (x1[2], x2[2]) : y[2],
> +			p[3] ? f (x1[3], x2[3]) : y[3] };
> +}
> +
> +static inline int
> +v_lanes64 (void)
> +{
> +  return 2;
> +}
> +static inline float64x2_t
> +v_f64 (double x)
> +{
> +  return (float64x2_t) V2 (x);
> +}
> +static inline uint64x2_t
> +v_u64 (uint64_t x)
> +{
> +  return (uint64x2_t) V2 (x);
> +}
> +static inline int64x2_t
> +v_s64 (int64_t x)
> +{
> +  return (int64x2_t) V2 (x);
> +}
> +static inline double
> +v_get_f64 (float64x2_t x, int i)
> +{
> +  return x[i];
> +}
> +static inline void
> +v_set_f64 (float64x2_t *x, int i, double v)
> +{
> +  (*x)[i] = v;
> +}
> +/* true if any elements of a vector compare result is non-zero.  */
> +static inline int
> +v_any_u64 (uint64x2_t x)
> +{
> +  /* assume elements in x are either 0 or -1u.  */
> +  return vpaddd_u64 (x) != 0;
> +}
> +/* true if all elements of a vector compare result is 1.  */
> +static inline int
> +v_all_u64 (uint64x2_t x)
> +{
> +  /* assume elements in x are either 0 or -1u.  */
> +  return vpaddd_s64 (vreinterpretq_s64_u64 (x)) == -2;
> +}
> +static inline float64x2_t
> +v_lookup_f64 (const double *tab, uint64x2_t idx)
> +{
> +  return (float64x2_t){ tab[idx[0]], tab[idx[1]] };
> +}
> +static inline uint64x2_t
> +v_lookup_u64 (const uint64_t *tab, uint64x2_t idx)
> +{
> +  return (uint64x2_t){ tab[idx[0]], tab[idx[1]] };
> +}
> +static inline float64x2_t
> +v_call_f64 (double (*f) (double), float64x2_t x, float64x2_t y, uint64x2_t p)
> +{
> +  return (float64x2_t){ p[0] ? f (x[0]) : y[0], p[1] ? f (x[1]) : y[1] };
> +}
> +static inline float64x2_t
> +v_call2_f64 (double (*f) (double, double), float64x2_t x1, float64x2_t x2,
> +	     float64x2_t y, uint64x2_t p)
> +{
> +  return (float64x2_t){ p[0] ? f (x1[0], x2[0]) : y[0],
> +			p[1] ? f (x1[1], x2[1]) : y[1] };
> +}
> +
> +#endif
> diff --git a/sysdeps/aarch64/fpu/advsimd_utils.h b/sysdeps/aarch64/fpu/vecmath_config.h
> similarity index 57%
> rename from sysdeps/aarch64/fpu/advsimd_utils.h
> rename to sysdeps/aarch64/fpu/vecmath_config.h
> index 8a0fcc0e06..c8f45af63b 100644
> --- a/sysdeps/aarch64/fpu/advsimd_utils.h
> +++ b/sysdeps/aarch64/fpu/vecmath_config.h
> @@ -1,5 +1,4 @@
> -/* Helpers for Advanced SIMD vector math functions.
> -
> +/* Configuration for libmvec routines.
>     Copyright (C) 2023 Free Software Foundation, Inc.
>     This file is part of the GNU C Library.
>  
> @@ -17,23 +16,18 @@
>     License along with the GNU C Library; if not, see
>     <https://www.gnu.org/licenses/>.  */
>  
> -#include <arm_neon.h>
> +#ifndef _VECMATH_CONFIG_H
> +#define _VECMATH_CONFIG_H
>  
> -#define VPCS_ATTR __attribute__ ((aarch64_vector_pcs))
> +#include <math.h>
>  
> -#define V_NAME_F1(fun) _ZGVnN4v_##fun##f
> -#define V_NAME_D1(fun) _ZGVnN2v_##fun
> -#define V_NAME_F2(fun) _ZGVnN4vv_##fun##f
> -#define V_NAME_D2(fun) _ZGVnN2vv_##fun
> +#define NOINLINE __attribute__ ((noinline))
> +#define likely(x) __glibc_likely (x)
> +#define unlikely(x) __glibc_unlikely (x)

Do we really to replicate these macros on different headers? Even on AOR
there are replicate in multiple places. 

>  
> -static __always_inline float32x4_t
> -v_call_f32 (float (*f) (float), float32x4_t x)
> -{
> -  return (float32x4_t){ f (x[0]), f (x[1]), f (x[2]), f (x[3]) };
> -}
> +/* Deprecated config option from Arm Optimized Routines which ensures
> +   fp exceptions are correctly triggered. This is not intended to be
> +   supported in GLIBC, however we keep it for ease of development.  */
> +#define WANT_SIMD_EXCEPT 0
>  
> -static __always_inline float64x2_t
> -v_call_f64 (double (*f) (double), float64x2_t x)
> -{
> -  return (float64x2_t){ f (x[0]), f (x[1]) };
> -}
> +#endif
> diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
> index da7c64942c..07da4ab843 100644
> --- a/sysdeps/aarch64/libm-test-ulps
> +++ b/sysdeps/aarch64/libm-test-ulps
> @@ -642,7 +642,7 @@ float: 1
>  ldouble: 2
>  
>  Function: "cos_advsimd":
> -double: 1
> +double: 2
>  float: 1
>  
>  Function: "cos_downward":

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/4] aarch64: Add vector implementations of sin routines
  2023-06-08 13:39 ` [PATCH 2/4] aarch64: Add vector implementations of sin routines Joe Ramsay
@ 2023-06-13 18:16   ` Adhemerval Zanella Netto
  0 siblings, 0 replies; 11+ messages in thread
From: Adhemerval Zanella Netto @ 2023-06-13 18:16 UTC (permalink / raw)
  To: libc-alpha



On 08/06/23 10:39, Joe Ramsay via Libc-alpha wrote:
> Optimised implementations for single and double precision, Advanced
> SIMD and SVE, copied from Arm Optimized Routines. Also allow
> certain tests to be skipped for mathvec routines, for example both
> AdvSIMD algorithms discard the sign of 0.
> ---
>  math/auto-libm-test-out-sin                   |   4 +-
>  math/gen-libm-test.py                         |   3 +-
>  sysdeps/aarch64/fpu/Makefile                  |   8 +-
>  sysdeps/aarch64/fpu/Versions                  |   4 +
>  sysdeps/aarch64/fpu/bits/math-vector.h        |   6 ++
>  sysdeps/aarch64/fpu/sin_advsimd.c             | 100 ++++++++++++++++++
>  sysdeps/aarch64/fpu/sin_sve.c                 |  96 +++++++++++++++++
>  sysdeps/aarch64/fpu/sinf_advsimd.c            |  93 ++++++++++++++++
>  sysdeps/aarch64/fpu/sinf_sve.c                |  92 ++++++++++++++++
>  sysdeps/aarch64/fpu/sv_horner_wrap.h          |  55 ++++++++++
>  sysdeps/aarch64/fpu/sv_hornerf.h              |  24 +++++
>  .../fpu/test-double-advsimd-wrappers.c        |   1 +
>  .../aarch64/fpu/test-double-sve-wrappers.c    |   1 +
>  .../aarch64/fpu/test-float-advsimd-wrappers.c |   1 +
>  sysdeps/aarch64/fpu/test-float-sve-wrappers.c |   1 +
>  sysdeps/aarch64/libm-test-ulps                |   8 ++
>  .../unix/sysv/linux/aarch64/libmvec.abilist   |   4 +
>  17 files changed, 494 insertions(+), 7 deletions(-)
>  create mode 100644 sysdeps/aarch64/fpu/sin_advsimd.c
>  create mode 100644 sysdeps/aarch64/fpu/sin_sve.c
>  create mode 100644 sysdeps/aarch64/fpu/sinf_advsimd.c
>  create mode 100644 sysdeps/aarch64/fpu/sinf_sve.c
>  create mode 100644 sysdeps/aarch64/fpu/sv_horner_wrap.h
>  create mode 100644 sysdeps/aarch64/fpu/sv_hornerf.h
> 
> diff --git a/math/auto-libm-test-out-sin b/math/auto-libm-test-out-sin
> index f1d21b179c..27ccaff1aa 100644
> --- a/math/auto-libm-test-out-sin
> +++ b/math/auto-libm-test-out-sin
> @@ -25,11 +25,11 @@ sin 0
>  = sin upward ibm128 0x0p+0 : 0x0p+0 : inexact-ok
>  sin -0
>  = sin downward binary32 -0x0p+0 : -0x0p+0 : inexact-ok
> -= sin tonearest binary32 -0x0p+0 : -0x0p+0 : inexact-ok
> += sin tonearest binary32 -0x0p+0 : -0x0p+0 : inexact-ok no-mathvec
>  = sin towardzero binary32 -0x0p+0 : -0x0p+0 : inexact-ok
>  = sin upward binary32 -0x0p+0 : -0x0p+0 : inexact-ok
>  = sin downward binary64 -0x0p+0 : -0x0p+0 : inexact-ok
> -= sin tonearest binary64 -0x0p+0 : -0x0p+0 : inexact-ok
> += sin tonearest binary64 -0x0p+0 : -0x0p+0 : inexact-ok no-mathvec
>  = sin towardzero binary64 -0x0p+0 : -0x0p+0 : inexact-ok
>  = sin upward binary64 -0x0p+0 : -0x0p+0 : inexact-ok
>  = sin downward intel96 -0x0p+0 : -0x0p+0 : inexact-ok
> diff --git a/math/gen-libm-test.py b/math/gen-libm-test.py
> index 6ae78beb01..a573c3b8cb 100755
> --- a/math/gen-libm-test.py
> +++ b/math/gen-libm-test.py
> @@ -93,7 +93,8 @@ BEAUTIFY_MAP = {'minus_zero': '-0',
>  
>  # Flags in auto-libm-test-out that map directly to C flags.
>  FLAGS_SIMPLE = {'ignore-zero-inf-sign': 'IGNORE_ZERO_INF_SIGN',
> -                'xfail': 'XFAIL_TEST'}
> +                'xfail': 'XFAIL_TEST',
> +                'no-mathvec': 'NO_TEST_MATHVEC'}
>  
>  # Exceptions in auto-libm-test-out, and their corresponding C flags
>  # for being required, OK or required to be absent.
> diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile
> index 850cfb9012..b3285542ea 100644
> --- a/sysdeps/aarch64/fpu/Makefile
> +++ b/sysdeps/aarch64/fpu/Makefile
> @@ -1,10 +1,10 @@
> -float-advsimd-funcs = cos
> +float-advsimd-funcs = cos sin

I think it would be good to follow the current pratice of one target per
line:

float-advsimd-funcs = \
  cos \
  sin \
  # float-advsimd-funcs

>  
> -double-advsimd-funcs = cos
> +double-advsimd-funcs = cos sin
>  
> -float-sve-funcs = cos
> +float-sve-funcs = cos sin
>  
> -double-sve-funcs = cos
> +double-sve-funcs = cos sin
>  
>  ifeq ($(subdir),mathvec)>  libmvec-support = $(addsuffix f_advsimd,$(float-advsimd-funcs)) \
> diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions
> index 5222a6f180..d26b3968a9 100644
> --- a/sysdeps/aarch64/fpu/Versions
> +++ b/sysdeps/aarch64/fpu/Versions
> @@ -1,8 +1,12 @@
>  libmvec {
>    GLIBC_2.38 {
>      _ZGVnN2v_cos;
> +    _ZGVnN2v_sin;
>      _ZGVnN4v_cosf;
> +    _ZGVnN4v_sinf;
>      _ZGVsMxv_cos;
>      _ZGVsMxv_cosf;
> +    _ZGVsMxv_sin;
> +    _ZGVsMxv_sinf;
>    }
>  }
> diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h
> index a2f2277591..ad9c9945e8 100644
> --- a/sysdeps/aarch64/fpu/bits/math-vector.h
> +++ b/sysdeps/aarch64/fpu/bits/math-vector.h
> @@ -50,7 +50,10 @@ typedef __SVBool_t __sv_bool_t;
>  #  define __vpcs __attribute__ ((__aarch64_vector_pcs__))
>  
>  __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
> +__vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
> +
>  __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
> +__vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
>  
>  #  undef __ADVSIMD_VEC_MATH_SUPPORTED
>  #endif /* __ADVSIMD_VEC_MATH_SUPPORTED */
> @@ -58,7 +61,10 @@ __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
>  #ifdef __SVE_VEC_MATH_SUPPORTED
>  
>  __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
> +__sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
> +
>  __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
> +__sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
>  
>  #  undef __SVE_VEC_MATH_SUPPORTED
>  #endif /* __SVE_VEC_MATH_SUPPORTED */
> diff --git a/sysdeps/aarch64/fpu/sin_advsimd.c b/sysdeps/aarch64/fpu/sin_advsimd.c
> new file mode 100644
> index 0000000000..1206a5d760
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/sin_advsimd.c
> @@ -0,0 +1,100 @@
> +/* Double-precision vector (Advanced SIMD) sin function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "v_math.h"
> +
> +static const volatile struct

Why do you need volatile here?

> +{
> +  float64x2_t poly[7];
> +  float64x2_t range_val, inv_pi, shift, pi_1, pi_2, pi_3;
> +} data = {
> +  /* Worst-case error is 2.8 ulp in [-pi/2, pi/2].  */
> +  .poly = { V2 (-0x1.555555555547bp-3), V2 (0x1.1111111108a4dp-7),
> +	    V2 (-0x1.a01a019936f27p-13), V2 (0x1.71de37a97d93ep-19),
> +	    V2 (-0x1.ae633919987c6p-26), V2 (0x1.60e277ae07cecp-33),
> +	    V2 (-0x1.9e9540300a1p-41) },
> +
> +  .range_val = V2 (0x1p23),
> +  .inv_pi = V2 (0x1.45f306dc9c883p-2),
> +  .pi_1 = V2 (0x1.921fb54442d18p+1),
> +  .pi_2 = V2 (0x1.1a62633145c06p-53),
> +  .pi_3 = V2 (0x1.c1cd129024e09p-106),
> +  .shift = V2 (0x1.8p52),
> +};
> +
> +#if WANT_SIMD_EXCEPT
> +# define TinyBound v_u64 (0x3000000000000000) /* asuint64 (0x1p-255).  */
> +# define Thresh v_u64 (0x1160000000000000)    /* RangeVal - TinyBound.  */
> +#endif
> +
> +#define C(i) data.poly[i]
> +
> +static float64x2_t VPCS_ATTR NOINLINE
> +special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp)
> +{
> +  y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
> +  return v_call_f64 (sin, x, y, cmp);
> +}
> +
> +float64x2_t VPCS_ATTR V_NAME_D1 (sin) (float64x2_t x)
> +{
> +  float64x2_t n, r, r2, r3, r4, y, t1, t2, t3;
> +  uint64x2_t odd, cmp;
> +
> +#if WANT_SIMD_EXCEPT
> +  /* Detect |x| <= TinyBound or |x| >= RangeVal. If fenv exceptions are to be
> +     triggered correctly, set any special lanes to 1 (which is neutral w.r.t.
> +     fenv). These lanes will be fixed by special-case handler later.  */
> +  uint64x2_t ir = vreinterpretq_u64_f64 (vabsq_f64 (x));
> +  cmp = vcgeq_u64 (vsubq_u64 (ir, TinyBound), Thresh);
> +  r = vbslq_f64 (cmp, vreinterpretq_f64_u64 (cmp), x);
> +#else
> +  r = x;
> +  cmp = vcageq_f64 (data.range_val, x);
> +  cmp = vceqzq_u64 (cmp); /* cmp = ~cmp.  */
> +#endif
> +
> +  /* n = rint(|x|/pi).  */
> +  n = vfmaq_f64 (data.shift, data.inv_pi, r);
> +  odd = vshlq_n_u64 (vreinterpretq_u64_f64 (n), 63);
> +  n = vsubq_f64 (n, data.shift);
> +
> +  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
> +  r = vfmsq_f64 (r, data.pi_1, n);
> +  r = vfmsq_f64 (r, data.pi_2, n);
> +  r = vfmsq_f64 (r, data.pi_3, n);
> +
> +  /* sin(r) poly approx.  */
> +  r2 = vmulq_f64 (r, r);
> +  r3 = vmulq_f64 (r2, r);
> +  r4 = vmulq_f64 (r2, r2);
> +
> +  t1 = vfmaq_f64 (C (4), C (5), r2);
> +  t2 = vfmaq_f64 (C (2), C (3), r2);
> +  t3 = vfmaq_f64 (C (0), C (1), r2);
> +
> +  y = vfmaq_f64 (t1, C (6), r4);
> +  y = vfmaq_f64 (t2, y, r4);
> +  y = vfmaq_f64 (t3, y, r4);
> +  y = vfmaq_f64 (r, y, r3);
> +
> +  if (unlikely (v_any_u64 (cmp)))
> +    return special_case (x, y, odd, cmp);
> +  return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
> +}
> diff --git a/sysdeps/aarch64/fpu/sin_sve.c b/sysdeps/aarch64/fpu/sin_sve.c
> new file mode 100644
> index 0000000000..3750700759
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/sin_sve.c
> @@ -0,0 +1,96 @@
> +/* Double-precision vector (SVE) sin function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "sv_math.h"
> +
> +static struct

Add const here.

> +{
> +  double inv_pi, half_pi, inv_pi_over_2, pi_over_2_1, pi_over_2_2, pi_over_2_3,
> +      shift;
> +} data = {
> +  /* Polynomial coefficients are hard-wired in the FTMAD instruction.  */
> +  .inv_pi = 0x1.45f306dc9c883p-2,
> +  .half_pi = 0x1.921fb54442d18p+0,
> +  .inv_pi_over_2 = 0x1.45f306dc9c882p-1,
> +  .pi_over_2_1 = 0x1.921fb50000000p+0,
> +  .pi_over_2_2 = 0x1.110b460000000p-26,
> +  .pi_over_2_3 = 0x1.1a62633145c07p-54,
> +  .shift = 0x1.8p52
> +};
> +
> +#define RangeVal 0x4160000000000000 /* asuint64 (0x1p23).  */
> +
> +static svfloat64_t NOINLINE
> +special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
> +{
> +  return sv_call_f64 (sin, x, y, cmp);
> +}
> +
> +/* A fast SVE implementation of sin based on trigonometric
> +   instructions (FTMAD, FTSSEL, FTSMUL).
> +   Maximum observed error in 2.52 ULP:
> +   SV_NAME_D1 (sin)(0x1.2d2b00df69661p+19) got 0x1.10ace8f3e786bp-40
> +					  want 0x1.10ace8f3e7868p-40.  */
> +svfloat64_t SV_NAME_D1 (sin) (svfloat64_t x, const svbool_t pg)
> +{
> +  svfloat64_t r = svabs_f64_x (pg, x);
> +  svuint64_t sign
> +      = sveor_u64_x (pg, svreinterpret_u64_f64 (x), svreinterpret_u64_f64 (r));
> +  svbool_t cmp = svcmpge_n_u64 (pg, svreinterpret_u64_f64 (r), RangeVal);
> +
> +  /* Load first two pio2-related constants to one vector.  */
> +  svfloat64_t invpio2_and_pio2_1
> +      = svld1rq_f64 (svptrue_b64 (), &data.inv_pi_over_2);
> +
> +  /* n = rint(|x|/(pi/2)).  */
> +  svfloat64_t q
> +      = svmla_lane_f64 (sv_f64 (data.shift), r, invpio2_and_pio2_1, 0);
> +  svfloat64_t n = svsub_n_f64_x (pg, q, data.shift);
> +
> +  /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
> +  r = svmls_lane_f64 (r, n, invpio2_and_pio2_1, 1);
> +  r = svmls_n_f64_x (pg, r, n, data.pi_over_2_2);
> +  r = svmls_n_f64_x (pg, r, n, data.pi_over_2_3);
> +
> +  /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
> +  svfloat64_t f = svtssel_f64 (r, svreinterpret_u64_f64 (q));
> +
> +  /* sin(r) poly approx.  */
> +  svfloat64_t r2 = svtsmul_f64 (r, svreinterpret_u64_f64 (q));
> +  svfloat64_t y = sv_f64 (0.0);
> +  y = svtmad_f64 (y, r2, 7);
> +  y = svtmad_f64 (y, r2, 6);
> +  y = svtmad_f64 (y, r2, 5);
> +  y = svtmad_f64 (y, r2, 4);
> +  y = svtmad_f64 (y, r2, 3);
> +  y = svtmad_f64 (y, r2, 2);
> +  y = svtmad_f64 (y, r2, 1);
> +  y = svtmad_f64 (y, r2, 0);
> +
> +  /* Apply factor.  */
> +  y = svmul_f64_x (pg, f, y);
> +
> +  /* sign = y^sign.  */
> +  y = svreinterpret_f64_u64 (
> +      sveor_u64_x (pg, svreinterpret_u64_f64 (y), sign));
> +
> +  if (unlikely (svptest_any (pg, cmp)))
> +    return special_case (x, y, cmp);
> +  return y;
> +}
> diff --git a/sysdeps/aarch64/fpu/sinf_advsimd.c b/sysdeps/aarch64/fpu/sinf_advsimd.c
> new file mode 100644
> index 0000000000..6267594000
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/sinf_advsimd.c
> @@ -0,0 +1,93 @@
> +/* Single-precision vector (Advanced SIMD) sin function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "v_math.h"
> +
> +static const volatile struct

Ditto.

> +{
> +  float32x4_t poly[4];
> +  float32x4_t range_val, inv_pi, shift, pi_1, pi_2, pi_3;
> +} data = {
> +  /* 1.886 ulp error.  */
> +  .poly = { V4 (-0x1.555548p-3f), V4 (0x1.110df4p-7f), V4 (-0x1.9f42eap-13f),
> +	    V4 (0x1.5b2e76p-19f) },
> +
> +  .pi_1 = V4 (0x1.921fb6p+1f),
> +  .pi_2 = V4 (-0x1.777a5cp-24f),
> +  .pi_3 = V4 (-0x1.ee59dap-49f),
> +
> +  .inv_pi = V4 (0x1.45f306p-2f),
> +  .shift = V4 (0x1.8p+23f),
> +  .range_val = V4 (0x1p20f)
> +};
> +
> +#if WANT_SIMD_EXCEPT
> +# define TinyBound v_u32 (0x21000000) /* asuint32(0x1p-61f).  */
> +# define Thresh v_u32 (0x28800000)    /* RangeVal - TinyBound.  */
> +#endif
> +
> +#define C(i) data.poly[i]
> +
> +static float32x4_t VPCS_ATTR NOINLINE
> +special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp)
> +{
> +  /* Fall back to scalar code.  */
> +  y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
> +  return v_call_f32 (sinf, x, y, cmp);
> +}
> +
> +float32x4_t VPCS_ATTR V_NAME_F1 (sin) (float32x4_t x)
> +{
> +  float32x4_t n, r, r2, y;
> +  uint32x4_t odd, cmp;
> +
> +#if WANT_SIMD_EXCEPT
> +  uint32x4_t ir = vreinterpretq_u32_f32 (vabsq_f32 (x));
> +  cmp = vcgeq_u32 (vsubq_u32 (ir, TinyBound), Thresh);
> +  /* If fenv exceptions are to be triggered correctly, set any special lanes
> +     to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
> +     special-case handler later.  */
> +  r = vbslq_f32 (cmp, vreinterpretq_f32_u32 (cmp), x);
> +#else
> +  r = x;
> +  cmp = vcageq_f32 (data.range_val, x);
> +  cmp = vceqzq_u32 (cmp); /* cmp = ~cmp.  */
> +#endif
> +
> +  /* n = rint(|x|/pi) */
> +  n = vfmaq_f32 (data.shift, data.inv_pi, r);
> +  odd = vshlq_n_u32 (vreinterpretq_u32_f32 (n), 31);
> +  n = vsubq_f32 (n, data.shift);
> +
> +  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2) */
> +  r = vfmsq_f32 (r, data.pi_1, n);
> +  r = vfmsq_f32 (r, data.pi_2, n);
> +  r = vfmsq_f32 (r, data.pi_3, n);
> +
> +  /* y = sin(r) */
> +  r2 = vmulq_f32 (r, r);
> +  y = vfmaq_f32 (C (2), C (3), r2);
> +  y = vfmaq_f32 (C (1), y, r2);
> +  y = vfmaq_f32 (C (0), y, r2);
> +  y = vfmaq_f32 (r, vmulq_f32 (y, r2), r);
> +
> +  if (unlikely (v_any_u32 (cmp)))
> +    return special_case (x, y, odd, cmp);
> +  return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
> +}
> diff --git a/sysdeps/aarch64/fpu/sinf_sve.c b/sysdeps/aarch64/fpu/sinf_sve.c
> new file mode 100644
> index 0000000000..4159d90534
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/sinf_sve.c
> @@ -0,0 +1,92 @@
> +/* Single-precision vector (SVE) sin function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "sv_math.h"
> +#include "sv_hornerf.h"
> +
> +static struct

Add const here.

> +{
> +  float poly[4];
> +  /* Pi-related values to be loaded as one quad-word and used with
> +     svmla_lane_f32.  */
> +  float negpi1, negpi2, negpi3, invpi;
> +  float shift;
> +} data = {
> +  .poly = {
> +    /* Non-zero coefficients from the degree 9 Taylor series expansion of
> +       sin.  */
> +    -0x1.555548p-3f, 0x1.110df4p-7f, -0x1.9f42eap-13f, 0x1.5b2e76p-19f
> +  },
> +  .negpi1 = -0x1.921fb6p+1f,
> +  .negpi2 = 0x1.777a5cp-24f,
> +  .negpi3 = 0x1.ee59dap-49f,
> +  .invpi = 0x1.45f306p-2f,
> +  .shift = 0x1.8p+23f
> +};
> +
> +#define RangeVal 0x49800000 /* asuint32 (0x1p20f).  */
> +#define C(i) data.poly[i]
> +
> +static svfloat32_t NOINLINE
> +special_case (svfloat32_t x, svfloat32_t y, svbool_t cmp)
> +{
> +  return sv_call_f32 (sinf, x, y, cmp);
> +}
> +
> +/* A fast SVE implementation of sinf.
> +   Maximum error: 1.89 ULPs.
> +   This maximum error is achieved at multiple values in [-2^18, 2^18]
> +   but one example is:
> +   SV_NAME_F1 (sin)(0x1.9247a4p+0) got 0x1.fffff6p-1 want 0x1.fffffap-1.  */
> +svfloat32_t SV_NAME_F1 (sin) (svfloat32_t x, const svbool_t pg)
> +{
> +  svfloat32_t ax = svabs_f32_x (pg, x);
> +  svuint32_t sign = sveor_u32_x (pg, svreinterpret_u32_f32 (x),
> +				 svreinterpret_u32_f32 (ax));
> +  svbool_t cmp = svcmpge_n_u32 (pg, svreinterpret_u32_f32 (ax), RangeVal);
> +
> +  /* pi_vals are a quad-word of helper values - the first 3 elements contain
> +     -pi in extended precision, the last contains 1 / pi.  */
> +  svfloat32_t pi_vals = svld1rq_f32 (svptrue_b32 (), &data.negpi1);
> +
> +  /* n = rint(|x|/pi).  */
> +  svfloat32_t n = svmla_lane_f32 (sv_f32 (data.shift), ax, pi_vals, 3);
> +  svuint32_t odd = svlsl_n_u32_x (pg, svreinterpret_u32_f32 (n), 31);
> +  n = svsub_n_f32_x (pg, n, data.shift);
> +
> +  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
> +  svfloat32_t r;
> +  r = svmla_lane_f32 (ax, n, pi_vals, 0);
> +  r = svmla_lane_f32 (r, n, pi_vals, 1);
> +  r = svmla_lane_f32 (r, n, pi_vals, 2);
> +
> +  /* sin(r) approx using a degree 9 polynomial from the Taylor series
> +     expansion. Note that only the odd terms of this are non-zero.  */
> +  svfloat32_t r2 = svmul_f32_x (pg, r, r);
> +  svfloat32_t y = HORNER_3 (pg, r2, C);
> +  y = svmla_f32_x (pg, r, r, svmul_f32_x (pg, y, r2));
> +
> +  /* sign = y^sign^odd.  */
> +  y = svreinterpret_f32_u32 (sveor_u32_x (pg, svreinterpret_u32_f32 (y),
> +					  sveor_u32_x (pg, sign, odd)));
> +
> +  if (unlikely (svptest_any (pg, cmp)))
> +    return special_case (x, y, cmp);
> +  return y;
> +}
> diff --git a/sysdeps/aarch64/fpu/sv_horner_wrap.h b/sysdeps/aarch64/fpu/sv_horner_wrap.h
> new file mode 100644
> index 0000000000..142a06d5c4
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/sv_horner_wrap.h
> @@ -0,0 +1,55 @@
> +/* Helper macros for Horner polynomial evaluation in SVE routines.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#define HORNER_1_(pg, x, c, i) FMA (pg, VECTOR (c (i + 1)), x, VECTOR (c (i)))
> +#define HORNER_2_(pg, x, c, i)                                                \
> +  FMA (pg, HORNER_1_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_3_(pg, x, c, i)                                                \
> +  FMA (pg, HORNER_2_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_4_(pg, x, c, i)                                                \
> +  FMA (pg, HORNER_3_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_5_(pg, x, c, i)                                                \
> +  FMA (pg, HORNER_4_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_6_(pg, x, c, i)                                                \
> +  FMA (pg, HORNER_5_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_7_(pg, x, c, i)                                                \
> +  FMA (pg, HORNER_6_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_8_(pg, x, c, i)                                                \
> +  FMA (pg, HORNER_7_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_9_(pg, x, c, i)                                                \
> +  FMA (pg, HORNER_8_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_10_(pg, x, c, i)                                               \
> +  FMA (pg, HORNER_9_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_11_(pg, x, c, i)                                               \
> +  FMA (pg, HORNER_10_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +#define HORNER_12_(pg, x, c, i)                                               \
> +  FMA (pg, HORNER_11_ (pg, x, c, i + 1), x, VECTOR (c (i)))
> +
> +#define HORNER_1(pg, x, c) HORNER_1_ (pg, x, c, 0)
> +#define HORNER_2(pg, x, c) HORNER_2_ (pg, x, c, 0)
> +#define HORNER_3(pg, x, c) HORNER_3_ (pg, x, c, 0)
> +#define HORNER_4(pg, x, c) HORNER_4_ (pg, x, c, 0)
> +#define HORNER_5(pg, x, c) HORNER_5_ (pg, x, c, 0)
> +#define HORNER_6(pg, x, c) HORNER_6_ (pg, x, c, 0)
> +#define HORNER_7(pg, x, c) HORNER_7_ (pg, x, c, 0)
> +#define HORNER_8(pg, x, c) HORNER_8_ (pg, x, c, 0)
> +#define HORNER_9(pg, x, c) HORNER_9_ (pg, x, c, 0)
> +#define HORNER_10(pg, x, c) HORNER_10_ (pg, x, c, 0)
> +#define HORNER_11(pg, x, c) HORNER_11_ (pg, x, c, 0)
> +#define HORNER_12(pg, x, c) HORNER_12_ (pg, x, c, 0)
> diff --git a/sysdeps/aarch64/fpu/sv_hornerf.h b/sysdeps/aarch64/fpu/sv_hornerf.h
> new file mode 100644
> index 0000000000..146c117019
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/sv_hornerf.h
> @@ -0,0 +1,24 @@
> +/* Helper macros for single-precision Horner polynomial evaluation
> +   in SVE routines.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#define FMA(pg, x, y, z) svmla_f32_x (pg, z, x, y)
> +#define VECTOR sv_f32
> +
> +#include "sv_horner_wrap.h"
> diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
> index cb45fd3298..4af97a25a2 100644
> --- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
> +++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
> @@ -24,3 +24,4 @@
>  #define VEC_TYPE float64x2_t
>  
>  VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
> +VPCS_VECTOR_WRAPPER (sin_advsimd, _ZGVnN2v_sin)
> diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
> index cf72ef83b7..64c790adc5 100644
> --- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
> +++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
> @@ -33,3 +33,4 @@
>    }
>  
>  SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
> +SVE_VECTOR_WRAPPER (sin_sve, _ZGVsMxv_sin)
> diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
> index fa146862b0..50e776b952 100644
> --- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
> +++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
> @@ -24,3 +24,4 @@
>  #define VEC_TYPE float32x4_t
>  
>  VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
> +VPCS_VECTOR_WRAPPER (sinf_advsimd, _ZGVnN4v_sinf)
> diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
> index bc26558c62..7355032929 100644
> --- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
> +++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
> @@ -33,3 +33,4 @@
>    }
>  
>  SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
> +SVE_VECTOR_WRAPPER (sinf_sve, _ZGVsMxv_sinf)
> diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
> index 07da4ab843..4145662b2d 100644
> --- a/sysdeps/aarch64/libm-test-ulps
> +++ b/sysdeps/aarch64/libm-test-ulps
> @@ -1257,11 +1257,19 @@ double: 1
>  float: 1
>  ldouble: 2
>  
> +Function: "sin_advsimd":
> +double: 2
> +float: 1
> +
>  Function: "sin_downward":
>  double: 1
>  float: 1
>  ldouble: 3
>  
> +Function: "sin_sve":
> +double: 2
> +float: 1
> +
>  Function: "sin_towardzero":
>  double: 1
>  float: 1
> diff --git a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
> index 13af421af2..a4c564859c 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
> +++ b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
> @@ -1,4 +1,8 @@
>  GLIBC_2.38 _ZGVnN2v_cos F
> +GLIBC_2.38 _ZGVnN2v_sin F
>  GLIBC_2.38 _ZGVnN4v_cosf F
> +GLIBC_2.38 _ZGVnN4v_sinf F
>  GLIBC_2.38 _ZGVsMxv_cos F
>  GLIBC_2.38 _ZGVsMxv_cosf F
> +GLIBC_2.38 _ZGVsMxv_sin F
> +GLIBC_2.38 _ZGVsMxv_sinf F

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] aarch64: Add vector implementations of cos routines
  2023-06-08 13:39 [PATCH 1/4] aarch64: Add vector implementations of cos routines Joe Ramsay
                   ` (3 preceding siblings ...)
  2023-06-13 17:29 ` [PATCH 1/4] aarch64: Add vector implementations of cos routines Adhemerval Zanella Netto
@ 2023-06-13 19:56 ` Adhemerval Zanella Netto
  4 siblings, 0 replies; 11+ messages in thread
From: Adhemerval Zanella Netto @ 2023-06-13 19:56 UTC (permalink / raw)
  To: Joe Ramsay, libc-alpha



On 08/06/23 10:39, Joe Ramsay via Libc-alpha wrote:
> Replace the loop-over-scalar placeholder routines with optimised
> implementations from Arm Optimized Routines (AOR).
> 
> Also add some headers containing utilities for aarch64 libmvec
> routines, and update libm-test-ulps.
> 
> AOR exposes a config option, WANT_SIMD_EXCEPT, to enable
> selective masking (and later fixing up) of invalid lanes, in
> order to trigger fp exceptions correctly (AdvSIMD only). This is
> tested and maintained in AOR, however it is configured off at
> source level here for performance reasons. We keep the
> WANT_SIMD_EXCEPT blocks in routine sources to greatly simplify
> the upstreaming process from AOR to glibc.
> ---
>  sysdeps/aarch64/fpu/cos_advsimd.c             |  81 ++++++-
>  sysdeps/aarch64/fpu/cos_sve.c                 |  73 ++++++-
>  sysdeps/aarch64/fpu/cosf_advsimd.c            |  76 ++++++-
>  sysdeps/aarch64/fpu/cosf_sve.c                |  70 ++++++-
>  sysdeps/aarch64/fpu/sv_math.h                 | 141 +++++++++++++
>  sysdeps/aarch64/fpu/sve_utils.h               |  55 -----
>  sysdeps/aarch64/fpu/v_math.h                  | 197 ++++++++++++++++++
>  .../fpu/{advsimd_utils.h => vecmath_config.h} |  30 ++-
>  sysdeps/aarch64/libm-test-ulps                |   2 +-
>  9 files changed, 629 insertions(+), 96 deletions(-)
>  create mode 100644 sysdeps/aarch64/fpu/sv_math.h
>  delete mode 100644 sysdeps/aarch64/fpu/sve_utils.h
>  create mode 100644 sysdeps/aarch64/fpu/v_math.h
>  rename sysdeps/aarch64/fpu/{advsimd_utils.h => vecmath_config.h} (57%)
> 
> diff --git a/sysdeps/aarch64/fpu/cos_advsimd.c b/sysdeps/aarch64/fpu/cos_advsimd.c
> index 40831e6b0d..1f7a7023f5 100644
> --- a/sysdeps/aarch64/fpu/cos_advsimd.c
> +++ b/sysdeps/aarch64/fpu/cos_advsimd.c
> @@ -17,13 +17,82 @@
>     License along with the GNU C Library; if not, see
>     <https://www.gnu.org/licenses/>.  */
>  
> -#include <math.h>
> +#include "v_math.h"
>  
> -#include "advsimd_utils.h"
> +static const volatile struct
> +{
> +  float64x2_t poly[7];
> +  float64x2_t range_val, shift, inv_pi, half_pi, pi_1, pi_2, pi_3;
> +} data = {
> +  /* Worst-case error is 3.3 ulp in [-pi/2, pi/2].  */
> +  .poly = { V2 (-0x1.555555555547bp-3), V2 (0x1.1111111108a4dp-7),
> +	    V2 (-0x1.a01a019936f27p-13), V2 (0x1.71de37a97d93ep-19),
> +	    V2 (-0x1.ae633919987c6p-26), V2 (0x1.60e277ae07cecp-33),
> +	    V2 (-0x1.9e9540300a1p-41) },
> +  .inv_pi = V2 (0x1.45f306dc9c883p-2),
> +  .half_pi = V2 (0x1.921fb54442d18p+0),
> +  .pi_1 = V2 (0x1.921fb54442d18p+1),
> +  .pi_2 = V2 (0x1.1a62633145c06p-53),
> +  .pi_3 = V2 (0x1.c1cd129024e09p-106),
> +  .shift = V2 (0x1.8p52),
> +  .range_val = V2 (0x1p23)
> +};
> +
> +#define C(i) data.poly[i]
> +
> +static float64x2_t VPCS_ATTR NOINLINE

Why does it need NOINLINE here?  Are you trying to optimize for code size?
With stack protector I do see a small code size increase which does not 
happen without stack protector.

Otherwise, I don't think you will get much regarding code reorganization.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/4] aarch64: Add vector implementations of log routines
  2023-06-08 13:39 ` [PATCH 3/4] aarch64: Add vector implementations of log routines Joe Ramsay
@ 2023-06-14 13:27   ` Adhemerval Zanella Netto
  0 siblings, 0 replies; 11+ messages in thread
From: Adhemerval Zanella Netto @ 2023-06-14 13:27 UTC (permalink / raw)
  To: Joe Ramsay, libc-alpha



On 08/06/23 10:39, Joe Ramsay via Libc-alpha wrote:
> Optimised implementations for single and double precision, Advanced
> SIMD and SVE, copied from Arm Optimized Routines. Log lookup table
> added as HIDDEN symbol to allow it to be shared between AdvSIMD and
> SVE variants.
> ---
>  sysdeps/aarch64/fpu/Makefile                  |  11 +-
>  sysdeps/aarch64/fpu/Versions                  |   4 +
>  sysdeps/aarch64/fpu/bits/math-vector.h        |   4 +
>  sysdeps/aarch64/fpu/log_advsimd.c             | 104 +++++++++++
>  sysdeps/aarch64/fpu/log_sve.c                 |  80 ++++++++
>  sysdeps/aarch64/fpu/logf_advsimd.c            |  80 ++++++++
>  sysdeps/aarch64/fpu/logf_sve.c                |  85 +++++++++
>  .../fpu/test-double-advsimd-wrappers.c        |   1 +
>  .../aarch64/fpu/test-double-sve-wrappers.c    |   1 +
>  .../aarch64/fpu/test-float-advsimd-wrappers.c |   1 +
>  sysdeps/aarch64/fpu/test-float-sve-wrappers.c |   1 +
>  sysdeps/aarch64/fpu/v_log_data.c              | 173 ++++++++++++++++++
>  sysdeps/aarch64/fpu/vecmath_config.h          |  11 ++
>  sysdeps/aarch64/libm-test-ulps                |   8 +
>  .../unix/sysv/linux/aarch64/libmvec.abilist   |   4 +
>  15 files changed, 563 insertions(+), 5 deletions(-)
>  create mode 100644 sysdeps/aarch64/fpu/log_advsimd.c
>  create mode 100644 sysdeps/aarch64/fpu/log_sve.c
>  create mode 100644 sysdeps/aarch64/fpu/logf_advsimd.c
>  create mode 100644 sysdeps/aarch64/fpu/logf_sve.c
>  create mode 100644 sysdeps/aarch64/fpu/v_log_data.c
> 
> diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile
> index b3285542ea..3f9cd2d000 100644
> --- a/sysdeps/aarch64/fpu/Makefile
> +++ b/sysdeps/aarch64/fpu/Makefile
> @@ -1,16 +1,17 @@
> -float-advsimd-funcs = cos sin
> +float-advsimd-funcs = cos sin log
>  
> -double-advsimd-funcs = cos sin
> +double-advsimd-funcs = cos sin log
>  
> -float-sve-funcs = cos sin
> +float-sve-funcs = cos sin log
>  
> -double-sve-funcs = cos sin
> +double-sve-funcs = cos sin log
>  
>  ifeq ($(subdir),mathvec)
>  libmvec-support = $(addsuffix f_advsimd,$(float-advsimd-funcs)) \
>                    $(addsuffix _advsimd,$(double-advsimd-funcs)) \
>                    $(addsuffix f_sve,$(float-sve-funcs)) \
> -                  $(addsuffix _sve,$(double-sve-funcs))
> +                  $(addsuffix _sve,$(double-sve-funcs)) \
> +                  v_log_data
>  endif
>  
>  sve-cflags = -march=armv8-a+sve
> diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions
> index d26b3968a9..902446f40d 100644
> --- a/sysdeps/aarch64/fpu/Versions
> +++ b/sysdeps/aarch64/fpu/Versions
> @@ -1,11 +1,15 @@
>  libmvec {
>    GLIBC_2.38 {
>      _ZGVnN2v_cos;
> +    _ZGVnN2v_log;
>      _ZGVnN2v_sin;
>      _ZGVnN4v_cosf;
> +    _ZGVnN4v_logf;
>      _ZGVnN4v_sinf;
>      _ZGVsMxv_cos;
>      _ZGVsMxv_cosf;
> +    _ZGVsMxv_log;
> +    _ZGVsMxv_logf;
>      _ZGVsMxv_sin;
>      _ZGVsMxv_sinf;
>    }
> diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h
> index ad9c9945e8..70c737338e 100644
> --- a/sysdeps/aarch64/fpu/bits/math-vector.h
> +++ b/sysdeps/aarch64/fpu/bits/math-vector.h
> @@ -50,9 +50,11 @@ typedef __SVBool_t __sv_bool_t;
>  #  define __vpcs __attribute__ ((__aarch64_vector_pcs__))
>  
>  __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
> +__vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t);
>  __vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
>  
>  __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
> +__vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t);
>  __vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
>  
>  #  undef __ADVSIMD_VEC_MATH_SUPPORTED
> @@ -61,9 +63,11 @@ __vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
>  #ifdef __SVE_VEC_MATH_SUPPORTED
>  
>  __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
> +__sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t);
>  __sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
>  
>  __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
> +__sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t);
>  __sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
>  
>  #  undef __SVE_VEC_MATH_SUPPORTED
> diff --git a/sysdeps/aarch64/fpu/log_advsimd.c b/sysdeps/aarch64/fpu/log_advsimd.c
> new file mode 100644
> index 0000000000..b8f10efe35
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/log_advsimd.c
> @@ -0,0 +1,104 @@
> +/* Double-precision vector (Advanced SIMD) log function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "v_math.h"
> +
> +static const volatile struct

Same as other patches, I think there is no need to add volatile here.

> +{
> +  float64x2_t poly[5];
> +  float64x2_t ln2;
> +  uint64x2_t min_norm, special_bound, sign_exp_mask;
> +} data = {
> +  /* Worst-case error: 1.17 + 0.5 ulp.
> +     Rel error: 0x1.6272e588p-56 in [ -0x1.fc1p-9 0x1.009p-8 ].  */
> +  .poly = { V2 (-0x1.ffffffffffff7p-2), V2 (0x1.55555555170d4p-2),
> +	    V2 (-0x1.0000000399c27p-2), V2 (0x1.999b2e90e94cap-3),
> +	    V2 (-0x1.554e550bd501ep-3) },
> +  .ln2 = V2 (0x1.62e42fefa39efp-1),
> +  .min_norm = V2 (0x0010000000000000),
> +  .special_bound = V2 (0x7fe0000000000000), /* asuint64(inf) - min_norm.  */
> +  .sign_exp_mask = V2 (0xfff0000000000000)
> +};
> +
> +#define A(i) data.poly[i]
> +#define N (1 << V_LOG_TABLE_BITS)
> +#define IndexMask (N - 1)
> +#define Off v_u64 (0x3fe6900900000000)
> +
> +struct entry
> +{
> +  float64x2_t invc;
> +  float64x2_t logc;
> +};
> +
> +static inline struct entry
> +lookup (uint64x2_t i)
> +{
> +  /* Since N is a power of 2, n % N = n & (N - 1).  */
> +  struct entry e;
> +  e.invc[0] = __v_log_data.invc[i[0] & IndexMask];
> +  e.logc[0] = __v_log_data.logc[i[0] & IndexMask];
> +  e.invc[1] = __v_log_data.invc[i[1] & IndexMask];
> +  e.logc[1] = __v_log_data.logc[i[1] & IndexMask];
> +  return e;
> +}
> +
> +static float64x2_t VPCS_ATTR NOINLINE

As before, what exactly are you trying to accomplish here with NOINLINE?

> +special_case (float64x2_t x, float64x2_t y, uint64x2_t cmp)
> +{
> +  return v_call_f64 (log, x, y, cmp);
> +}
> +
> +float64x2_t VPCS_ATTR V_NAME_D1 (log) (float64x2_t x)
> +{
> +  float64x2_t z, r, r2, p, y, kd, hi;
> +  uint64x2_t ix, iz, tmp, cmp;
> +  int64x2_t k;
> +  struct entry e;
> +
> +  ix = vreinterpretq_u64_f64 (x);
> +  cmp = vcgeq_u64 (vsubq_u64 (ix, data.min_norm), data.special_bound);
> +
> +  /* x = 2^k z; where z is in range [Off,2*Off) and exact.
> +     The range is split into N subintervals.
> +     The ith subinterval contains z and c is near its center.  */
> +  tmp = vsubq_u64 (ix, Off);
> +  k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); /* arithmetic shift.  */
> +  iz = vsubq_u64 (ix, vandq_u64 (tmp, data.sign_exp_mask));
> +  z = vreinterpretq_f64_u64 (iz);
> +  e = lookup (vshrq_n_u64 (tmp, 52 - V_LOG_TABLE_BITS));
> +
> +  /* log(x) = log1p(z/c-1) + log(c) + k*Ln2.  */
> +  r = vfmaq_f64 (v_f64 (-1.0), z, e.invc);
> +  kd = vcvtq_f64_s64 (k);
> +
> +  /* hi = r + log(c) + k*Ln2.  */
> +  hi = vfmaq_f64 (vaddq_f64 (e.logc, r), kd, data.ln2);
> +  /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi.  */
> +  r2 = vmulq_f64 (r, r);
> +  y = vfmaq_f64 (A (2), A (3), r);
> +  p = vfmaq_f64 (A (0), A (1), r);
> +  y = vfmaq_f64 (y, A (4), r2);
> +  y = vfmaq_f64 (p, y, r2);
> +  y = vfmaq_f64 (hi, y, r2);
> +
> +  if (unlikely (v_any_u64 (cmp)))
> +    return special_case (x, y, cmp);
> +  return y;
> +}
> diff --git a/sysdeps/aarch64/fpu/log_sve.c b/sysdeps/aarch64/fpu/log_sve.c
> new file mode 100644
> index 0000000000..eedc5679e7
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/log_sve.c
> @@ -0,0 +1,80 @@
> +/* Double-precision vector (SVE) log function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "sv_math.h"
> +
> +#define P(i) sv_f64 (__v_log_data.poly[i])
> +#define N (1 << V_LOG_TABLE_BITS)
> +#define Off (0x3fe6900900000000)
> +#define MaxTop (0x7ff)
> +#define MinTop (0x001)
> +#define ThreshTop (0x7fe) /* MaxTop - MinTop.  */
> +
> +static svfloat64_t NOINLINE
> +special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
> +{
> +  return sv_call_f64 (log, x, y, cmp);
> +}
> +
> +/* SVE port of AdvSIMD log algorithm.
> +   Maximum measured error is 2.17 ulp:
> +   SV_NAME_D1 (log)(0x1.a6129884398a3p+0) got 0x1.ffffff1cca043p-2
> +					 want 0x1.ffffff1cca045p-2.  */
> +svfloat64_t SV_NAME_D1 (log) (svfloat64_t x, const svbool_t pg)
> +{
> +  svuint64_t ix = svreinterpret_u64_f64 (x);
> +  svuint64_t top = svlsr_n_u64_x (pg, ix, 52);
> +  svbool_t cmp
> +      = svcmpge_u64 (pg, svsub_n_u64_x (pg, top, MinTop), sv_u64 (ThreshTop));
> +
> +  /* x = 2^k z; where z is in range [Off,2*Off) and exact.
> +     The range is split into N subintervals.
> +     The ith subinterval contains z and c is near its center.  */
> +  svuint64_t tmp = svsub_n_u64_x (pg, ix, Off);
> +  /* Equivalent to (tmp >> (52 - V_LOG_TABLE_BITS)) % N, since N is a power
> +     of 2.  */
> +  svuint64_t i = svand_n_u64_x (
> +      pg, svlsr_n_u64_x (pg, tmp, (52 - V_LOG_TABLE_BITS)), N - 1);
> +  svint64_t k = svasr_n_s64_x (pg, svreinterpret_s64_u64 (tmp),
> +			       52); /* Arithmetic shift.  */
> +  svuint64_t iz
> +      = svsub_u64_x (pg, ix, svand_n_u64_x (pg, tmp, 0xfffULL << 52));
> +  svfloat64_t z = svreinterpret_f64_u64 (iz);
> +  /* Lookup in 2 global lists (length N).  */
> +  svfloat64_t invc = svld1_gather_u64index_f64 (pg, __v_log_data.invc, i);
> +  svfloat64_t logc = svld1_gather_u64index_f64 (pg, __v_log_data.logc, i);
> +
> +  /* log(x) = log1p(z/c-1) + log(c) + k*Ln2.  */
> +  svfloat64_t r = svmad_n_f64_x (pg, invc, z, -1);
> +  svfloat64_t kd = svcvt_f64_s64_x (pg, k);
> +  /* hi = r + log(c) + k*Ln2.  */
> +  svfloat64_t hi
> +      = svmla_n_f64_x (pg, svadd_f64_x (pg, logc, r), kd, __v_log_data.ln2);
> +  /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi.  */
> +  svfloat64_t r2 = svmul_f64_x (pg, r, r);
> +  svfloat64_t y = svmla_f64_x (pg, P (2), r, P (3));
> +  svfloat64_t p = svmla_f64_x (pg, P (0), r, P (1));
> +  y = svmla_f64_x (pg, y, r2, P (4));
> +  y = svmla_f64_x (pg, p, r2, y);
> +  y = svmla_f64_x (pg, hi, r2, y);
> +
> +  if (unlikely (svptest_any (pg, cmp)))
> +    return special_case (x, y, cmp);
> +  return y;
> +}
> diff --git a/sysdeps/aarch64/fpu/logf_advsimd.c b/sysdeps/aarch64/fpu/logf_advsimd.c
> new file mode 100644
> index 0000000000..5ca56ab27c
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/logf_advsimd.c
> @@ -0,0 +1,80 @@
> +/* Single-precision vector (Advanced SIMD) log function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "v_math.h"
> +
> +static const volatile struct

Ditto.

> +{
> +  float32x4_t poly[7];
> +  float32x4_t ln2, tiny_bound;
> +  uint32x4_t min_norm, special_bound, off, mantissa_mask;
> +} data = {
> +  /* 3.34 ulp error.  */
> +  .poly = { V4 (-0x1.3e737cp-3f), V4 (0x1.5a9aa2p-3f), V4 (-0x1.4f9934p-3f),
> +	    V4 (0x1.961348p-3f), V4 (-0x1.00187cp-2f), V4 (0x1.555d7cp-2f),
> +	    V4 (-0x1.ffffc8p-2f) },
> +  .ln2 = V4 (0x1.62e43p-1f),
> +  .tiny_bound = V4 (0x1p-126),
> +  .min_norm = V4 (0x00800000),
> +  .special_bound = V4 (0x7f000000), /* asuint32(inf) - min_norm.  */
> +  .off = V4 (0x3f2aaaab),	    /* 0.666667.  */
> +  .mantissa_mask = V4 (0x007fffff)
> +};
> +
> +#define P(i) data.poly[7 - i]
> +
> +static float32x4_t VPCS_ATTR NOINLINE
> +special_case (float32x4_t x, float32x4_t y, uint32x4_t cmp)
> +{
> +  /* Fall back to scalar code.  */
> +  return v_call_f32 (logf, x, y, cmp);
> +}
> +
> +float32x4_t VPCS_ATTR V_NAME_F1 (log) (float32x4_t x)
> +{
> +  float32x4_t n, p, q, r, r2, y;
> +  uint32x4_t u, cmp;
> +
> +  u = vreinterpretq_u32_f32 (x);
> +  cmp = vcgeq_u32 (vsubq_u32 (u, data.min_norm), data.special_bound);
> +
> +  /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
> +  u = vsubq_u32 (u, data.off);
> +  n = vcvtq_f32_s32 (
> +      vshrq_n_s32 (vreinterpretq_s32_u32 (u), 23)); /* signextend.  */
> +  u = vandq_u32 (u, data.mantissa_mask);
> +  u = vaddq_u32 (u, data.off);
> +  r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f));
> +
> +  /* y = log(1+r) + n*ln2.  */
> +  r2 = vmulq_f32 (r, r);
> +  /* n*ln2 + r + r2*(P1 + r*P2 + r2*(P3 + r*P4 + r2*(P5 + r*P6 + r2*P7))).  */
> +  p = vfmaq_f32 (P (5), P (6), r);
> +  q = vfmaq_f32 (P (3), P (4), r);
> +  y = vfmaq_f32 (P (1), P (2), r);
> +  p = vfmaq_f32 (p, P (7), r2);
> +  q = vfmaq_f32 (q, p, r2);
> +  y = vfmaq_f32 (y, q, r2);
> +  p = vfmaq_f32 (r, data.ln2, n);
> +  y = vfmaq_f32 (p, y, r2);
> +
> +  if (unlikely (v_any_u32 (cmp)))
> +    return special_case (x, y, cmp);
> +  return y;
> +}
> diff --git a/sysdeps/aarch64/fpu/logf_sve.c b/sysdeps/aarch64/fpu/logf_sve.c
> new file mode 100644
> index 0000000000..446a3ba3cf
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/logf_sve.c
> @@ -0,0 +1,85 @@
> +/* Single-precision vector (SVE) log function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "sv_math.h"
> +
> +static struct

Use const here.

> +{
> +  float poly_0135[4];
> +  float poly_246[3];
> +  float ln2;
> +} data = {
> +  .poly_0135 = {
> +    /* Coefficients copied from the AdvSIMD routine in math/, then rearranged so
> +       that coeffs 0, 1, 3 and 5 can be loaded as a single quad-word, hence used
> +       with _lane variant of MLA intrinsic.  */
> +    -0x1.3e737cp-3f, 0x1.5a9aa2p-3f, 0x1.961348p-3f, 0x1.555d7cp-2f
> +  },
> +  .poly_246 = { -0x1.4f9934p-3f, -0x1.00187cp-2f, -0x1.ffffc8p-2f },
> +  .ln2 = 0x1.62e43p-1f
> +};
> +
> +#define Min (0x00800000)
> +#define Max (0x7f800000)
> +#define Thresh (0x7f000000) /* Max - Min.  */
> +#define Mask (0x007fffff)
> +#define Off (0x3f2aaaab) /* 0.666667.  */
> +
> +static svfloat32_t NOINLINE
> +special_case (svfloat32_t x, svfloat32_t y, svbool_t cmp)
> +{
> +  return sv_call_f32 (logf, x, y, cmp);
> +}
> +
> +/* Optimised implementation of SVE logf, using the same algorithm and
> +   polynomial as the AdvSIMD routine. Maximum error is 3.34 ULPs:
> +   SV_NAME_F1 (log)(0x1.557298p+0) got 0x1.26edecp-2
> +				  want 0x1.26ede6p-2.  */
> +svfloat32_t SV_NAME_F1 (log) (svfloat32_t x, const svbool_t pg)
> +{
> +  svuint32_t u = svreinterpret_u32_f32 (x);
> +  svbool_t cmp = svcmpge_n_u32 (pg, svsub_n_u32_x (pg, u, Min), Thresh);
> +
> +  /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
> +  u = svsub_n_u32_x (pg, u, Off);
> +  svfloat32_t n
> +      = svcvt_f32_s32_x (pg, svasr_n_s32_x (pg, svreinterpret_s32_u32 (u),
> +					    23)); /* Sign-extend.  */
> +  u = svand_n_u32_x (pg, u, Mask);
> +  u = svadd_n_u32_x (pg, u, Off);
> +  svfloat32_t r = svsub_n_f32_x (pg, svreinterpret_f32_u32 (u), 1.0f);
> +
> +  /* y = log(1+r) + n*ln2.  */
> +  svfloat32_t r2 = svmul_f32_x (pg, r, r);
> +  /* n*ln2 + r + r2*(P6 + r*P5 + r2*(P4 + r*P3 + r2*(P2 + r*P1 + r2*P0))).  */
> +  svfloat32_t p_0135 = svld1rq_f32 (svptrue_b32 (), &data.poly_0135[0]);
> +  svfloat32_t p = svmla_lane_f32 (sv_f32 (data.poly_246[0]), r, p_0135, 1);
> +  svfloat32_t q = svmla_lane_f32 (sv_f32 (data.poly_246[1]), r, p_0135, 2);
> +  svfloat32_t y = svmla_lane_f32 (sv_f32 (data.poly_246[2]), r, p_0135, 3);
> +  p = svmla_lane_f32 (p, r2, p_0135, 0);
> +
> +  q = svmla_f32_x (pg, q, r2, p);
> +  y = svmla_f32_x (pg, y, r2, q);
> +  p = svmla_n_f32_x (pg, r, n, data.ln2);
> +  y = svmla_f32_x (pg, p, r2, y);
> +
> +  if (unlikely (svptest_any (pg, cmp)))
> +    return special_case (x, y, cmp);
> +  return y;
> +}
> diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
> index 4af97a25a2..c5f6fcd7c4 100644
> --- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
> +++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
> @@ -24,4 +24,5 @@
>  #define VEC_TYPE float64x2_t
>  
>  VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
> +VPCS_VECTOR_WRAPPER (log_advsimd, _ZGVnN2v_log)
>  VPCS_VECTOR_WRAPPER (sin_advsimd, _ZGVnN2v_sin)
> diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
> index 64c790adc5..d5e2ec6dc5 100644
> --- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
> +++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
> @@ -33,4 +33,5 @@
>    }
>  
>  SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
> +SVE_VECTOR_WRAPPER (log_sve, _ZGVsMxv_log)
>  SVE_VECTOR_WRAPPER (sin_sve, _ZGVsMxv_sin)
> diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
> index 50e776b952..c240738837 100644
> --- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
> +++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
> @@ -24,4 +24,5 @@
>  #define VEC_TYPE float32x4_t
>  
>  VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
> +VPCS_VECTOR_WRAPPER (logf_advsimd, _ZGVnN4v_logf)
>  VPCS_VECTOR_WRAPPER (sinf_advsimd, _ZGVnN4v_sinf)
> diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
> index 7355032929..5a06b75857 100644
> --- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
> +++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
> @@ -33,4 +33,5 @@
>    }
>  
>  SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
> +SVE_VECTOR_WRAPPER (logf_sve, _ZGVsMxv_logf)
>  SVE_VECTOR_WRAPPER (sinf_sve, _ZGVsMxv_sinf)
> diff --git a/sysdeps/aarch64/fpu/v_log_data.c b/sysdeps/aarch64/fpu/v_log_data.c
> new file mode 100644
> index 0000000000..6fd6f43695
> --- /dev/null
> +++ b/sysdeps/aarch64/fpu/v_log_data.c
> @@ -0,0 +1,173 @@
> +/* Lookup table for double-precision log(x) vector function.
> +
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#include "vecmath_config.h"
> +
> +const struct v_log_data __v_log_data = {
> +  /* Worst-case error: 1.17 + 0.5 ulp.
> +     Rel error: 0x1.6272e588p-56 in [ -0x1.fc1p-9 0x1.009p-8 ].  */
> +  .poly = { -0x1.ffffffffffff7p-2, 0x1.55555555170d4p-2, -0x1.0000000399c27p-2,
> +	    0x1.999b2e90e94cap-3, -0x1.554e550bd501ep-3 },
> +  .ln2 = 0x1.62e42fefa39efp-1,
> +  /* Algorithm:
> +
> +	x = 2^k z
> +	log(x) = k ln2 + log(c) + poly(z/c - 1)
> +
> +     where z is in [a;2a) which is split into N subintervals (a=0x1.69009p-1,
> +     N=128) and log(c) and 1/c for the ith subinterval comes from two lookup
> +     tables:
> +
> +	invc[i] = 1/c
> +	logc[i] = (double)log(c)
> +
> +     where c is near the center of the subinterval and is chosen by trying
> +     several floating point invc candidates around 1/center and selecting one
> +     for which the error in (double)log(c) is minimized (< 0x1p-74), except the
> +     subinterval that contains 1 and the previous one got tweaked to avoid
> +     cancellation.  */
> +  .invc = { 0x1.6a133d0dec120p+0, 0x1.6815f2f3e42edp+0,
> +	    0x1.661e39be1ac9ep+0, 0x1.642bfa30ac371p+0,
> +	    0x1.623f1d916f323p+0, 0x1.60578da220f65p+0,
> +	    0x1.5e75349dea571p+0, 0x1.5c97fd387a75ap+0,
> +	    0x1.5abfd2981f200p+0, 0x1.58eca051dc99cp+0,
> +	    0x1.571e526d9df12p+0, 0x1.5554d555b3fcbp+0,
> +	    0x1.539015e2a20cdp+0, 0x1.51d0014ee0164p+0,
> +	    0x1.50148538cd9eep+0, 0x1.4e5d8f9f698a1p+0,
> +	    0x1.4cab0edca66bep+0, 0x1.4afcf1a9db874p+0,
> +	    0x1.495327136e16fp+0, 0x1.47ad9e84af28fp+0,
> +	    0x1.460c47b39ae15p+0, 0x1.446f12b278001p+0,
> +	    0x1.42d5efdd720ecp+0, 0x1.4140cfe001a0fp+0,
> +	    0x1.3fafa3b421f69p+0, 0x1.3e225c9c8ece5p+0,
> +	    0x1.3c98ec29a211ap+0, 0x1.3b13442a413fep+0,
> +	    0x1.399156baa3c54p+0, 0x1.38131639b4cdbp+0,
> +	    0x1.36987540fbf53p+0, 0x1.352166b648f61p+0,
> +	    0x1.33adddb3eb575p+0, 0x1.323dcd99fc1d3p+0,
> +	    0x1.30d129fefc7d2p+0, 0x1.2f67e6b72fe7dp+0,
> +	    0x1.2e01f7cf8b187p+0, 0x1.2c9f518ddc86ep+0,
> +	    0x1.2b3fe86e5f413p+0, 0x1.29e3b1211b25cp+0,
> +	    0x1.288aa08b373cfp+0, 0x1.2734abcaa8467p+0,
> +	    0x1.25e1c82459b81p+0, 0x1.2491eb1ad59c5p+0,
> +	    0x1.23450a54048b5p+0, 0x1.21fb1bb09e578p+0,
> +	    0x1.20b415346d8f7p+0, 0x1.1f6fed179a1acp+0,
> +	    0x1.1e2e99b93c7b3p+0, 0x1.1cf011a7a882ap+0,
> +	    0x1.1bb44b97dba5ap+0, 0x1.1a7b3e66cdd4fp+0,
> +	    0x1.1944e11dc56cdp+0, 0x1.18112aebb1a6ep+0,
> +	    0x1.16e013231b7e9p+0, 0x1.15b1913f156cfp+0,
> +	    0x1.14859cdedde13p+0, 0x1.135c2dc68cfa4p+0,
> +	    0x1.12353bdb01684p+0, 0x1.1110bf25b85b4p+0,
> +	    0x1.0feeafd2f8577p+0, 0x1.0ecf062c51c3bp+0,
> +	    0x1.0db1baa076c8bp+0, 0x1.0c96c5bb3048ep+0,
> +	    0x1.0b7e20263e070p+0, 0x1.0a67c2acd0ce3p+0,
> +	    0x1.0953a6391e982p+0, 0x1.0841c3caea380p+0,
> +	    0x1.07321489b13eap+0, 0x1.062491aee9904p+0,
> +	    0x1.05193497a7cc5p+0, 0x1.040ff6b5f5e9fp+0,
> +	    0x1.0308d19aa6127p+0, 0x1.0203beedb0c67p+0,
> +	    0x1.010037d38bcc2p+0, 1.0,
> +	    0x1.fc06d493cca10p-1, 0x1.f81e6ac3b918fp-1,
> +	    0x1.f44546ef18996p-1, 0x1.f07b10382c84bp-1,
> +	    0x1.ecbf7070e59d4p-1, 0x1.e91213f715939p-1,
> +	    0x1.e572a9a75f7b7p-1, 0x1.e1e0e2c530207p-1,
> +	    0x1.de5c72d8a8be3p-1, 0x1.dae50fa5658ccp-1,
> +	    0x1.d77a71145a2dap-1, 0x1.d41c51166623ep-1,
> +	    0x1.d0ca6ba0bb29fp-1, 0x1.cd847e8e59681p-1,
> +	    0x1.ca4a499693e00p-1, 0x1.c71b8e399e821p-1,
> +	    0x1.c3f80faf19077p-1, 0x1.c0df92dc2b0ecp-1,
> +	    0x1.bdd1de3cbb542p-1, 0x1.baceb9e1007a3p-1,
> +	    0x1.b7d5ef543e55ep-1, 0x1.b4e749977d953p-1,
> +	    0x1.b20295155478ep-1, 0x1.af279f8e82be2p-1,
> +	    0x1.ac5638197fdf3p-1, 0x1.a98e2f102e087p-1,
> +	    0x1.a6cf5606d05c1p-1, 0x1.a4197fc04d746p-1,
> +	    0x1.a16c80293dc01p-1, 0x1.9ec82c4dc5bc9p-1,
> +	    0x1.9c2c5a491f534p-1, 0x1.9998e1480b618p-1,
> +	    0x1.970d9977c6c2dp-1, 0x1.948a5c023d212p-1,
> +	    0x1.920f0303d6809p-1, 0x1.8f9b698a98b45p-1,
> +	    0x1.8d2f6b81726f6p-1, 0x1.8acae5bb55badp-1,
> +	    0x1.886db5d9275b8p-1, 0x1.8617ba567c13cp-1,
> +	    0x1.83c8d27487800p-1, 0x1.8180de3c5dbe7p-1,
> +	    0x1.7f3fbe71cdb71p-1, 0x1.7d055498071c1p-1,
> +	    0x1.7ad182e54f65ap-1, 0x1.78a42c3c90125p-1,
> +	    0x1.767d342f76944p-1, 0x1.745c7ef26b00ap-1,
> +	    0x1.7241f15769d0fp-1, 0x1.702d70d396e41p-1,
> +	    0x1.6e1ee3700cd11p-1, 0x1.6c162fc9cbe02p-1 },
> +  .logc = { -0x1.62fe995eb963ap-2, -0x1.5d5a48dad6b67p-2,
> +	    -0x1.57bde257d2769p-2, -0x1.52294fbf2af55p-2,
> +	    -0x1.4c9c7b598aa38p-2, -0x1.47174fc5ff560p-2,
> +	    -0x1.4199b7fa7b5cap-2, -0x1.3c239f48cfb99p-2,
> +	    -0x1.36b4f154d2aebp-2, -0x1.314d9a0ff32fbp-2,
> +	    -0x1.2bed85cca3cffp-2, -0x1.2694a11421af9p-2,
> +	    -0x1.2142d8d014fb2p-2, -0x1.1bf81a2c77776p-2,
> +	    -0x1.16b452a39c6a4p-2, -0x1.11776ffa6c67ep-2,
> +	    -0x1.0c416035020e0p-2, -0x1.071211aa10fdap-2,
> +	    -0x1.01e972e293b1bp-2, -0x1.f98ee587fd434p-3,
> +	    -0x1.ef5800ad716fbp-3, -0x1.e52e160484698p-3,
> +	    -0x1.db1104b19352ep-3, -0x1.d100ac59e0bd6p-3,
> +	    -0x1.c6fced287c3bdp-3, -0x1.bd05a7b317c29p-3,
> +	    -0x1.b31abd229164fp-3, -0x1.a93c0edadb0a3p-3,
> +	    -0x1.9f697ee30d7ddp-3, -0x1.95a2efa9aa40ap-3,
> +	    -0x1.8be843d796044p-3, -0x1.82395ecc477edp-3,
> +	    -0x1.7896240966422p-3, -0x1.6efe77aca8c55p-3,
> +	    -0x1.65723e117ec5cp-3, -0x1.5bf15c0955706p-3,
> +	    -0x1.527bb6c111da1p-3, -0x1.491133c939f8fp-3,
> +	    -0x1.3fb1b90c7fc58p-3, -0x1.365d2cc485f8dp-3,
> +	    -0x1.2d13758970de7p-3, -0x1.23d47a721fd47p-3,
> +	    -0x1.1aa0229f25ec2p-3, -0x1.117655ddebc3bp-3,
> +	    -0x1.0856fbf83ab6bp-3, -0x1.fe83fabbaa106p-4,
> +	    -0x1.ec6e8507a56cdp-4, -0x1.da6d68c7cc2eap-4,
> +	    -0x1.c88078462be0cp-4, -0x1.b6a786a423565p-4,
> +	    -0x1.a4e2676ac7f85p-4, -0x1.9330eea777e76p-4,
> +	    -0x1.8192f134d5ad9p-4, -0x1.70084464f0538p-4,
> +	    -0x1.5e90bdec5cb1fp-4, -0x1.4d2c3433c5536p-4,
> +	    -0x1.3bda7e219879ap-4, -0x1.2a9b732d27194p-4,
> +	    -0x1.196eeb2b10807p-4, -0x1.0854be8ef8a7ep-4,
> +	    -0x1.ee998cb277432p-5, -0x1.ccadb79919fb9p-5,
> +	    -0x1.aae5b1d8618b0p-5, -0x1.89413015d7442p-5,
> +	    -0x1.67bfe7bf158dep-5, -0x1.46618f83941bep-5,
> +	    -0x1.2525df1b0618ap-5, -0x1.040c8e2f77c6ap-5,
> +	    -0x1.c62aad39f738ap-6, -0x1.847fe3bdead9cp-6,
> +	    -0x1.43183683400acp-6, -0x1.01f31c4e1d544p-6,
> +	    -0x1.82201d1e6b69ap-7, -0x1.00dd0f3e1bfd6p-7,
> +	    -0x1.ff6fe1feb4e53p-9, 0.0,
> +	    0x1.fe91885ec8e20p-8,  0x1.fc516f716296dp-7,
> +	    0x1.7bb4dd70a015bp-6,  0x1.f84c99b34b674p-6,
> +	    0x1.39f9ce4fb2d71p-5,  0x1.7756c0fd22e78p-5,
> +	    0x1.b43ee82db8f3ap-5,  0x1.f0b3fced60034p-5,
> +	    0x1.165bd78d4878ep-4,  0x1.3425d2715ebe6p-4,
> +	    0x1.51b8bd91b7915p-4,  0x1.6f15632c76a47p-4,
> +	    0x1.8c3c88ecbe503p-4,  0x1.a92ef077625dap-4,
> +	    0x1.c5ed5745fa006p-4,  0x1.e27876de1c993p-4,
> +	    0x1.fed104fce4cdcp-4,  0x1.0d7bd9c17d78bp-3,
> +	    0x1.1b76986cef97bp-3,  0x1.295913d24f750p-3,
> +	    0x1.37239fa295d17p-3,  0x1.44d68dd78714bp-3,
> +	    0x1.52722ebe5d780p-3,  0x1.5ff6d12671f98p-3,
> +	    0x1.6d64c2389484bp-3,  0x1.7abc4da40fddap-3,
> +	    0x1.87fdbda1e8452p-3,  0x1.95295b06a5f37p-3,
> +	    0x1.a23f6d34abbc5p-3,  0x1.af403a28e04f2p-3,
> +	    0x1.bc2c06a85721ap-3,  0x1.c903161240163p-3,
> +	    0x1.d5c5aa93287ebp-3,  0x1.e274051823fa9p-3,
> +	    0x1.ef0e656300c16p-3,  0x1.fb9509f05aa2ap-3,
> +	    0x1.04041821f37afp-2,  0x1.0a340a49b3029p-2,
> +	    0x1.105a7918a126dp-2,  0x1.1677819812b84p-2,
> +	    0x1.1c8b405b40c0ep-2,  0x1.2295d16cfa6b1p-2,
> +	    0x1.28975066318a2p-2,  0x1.2e8fd855d86fcp-2,
> +	    0x1.347f83d605e59p-2,  0x1.3a666d1244588p-2,
> +	    0x1.4044adb6f8ec4p-2,  0x1.461a5f077558cp-2,
> +	    0x1.4be799e20b9c8p-2,  0x1.51ac76a6b79dfp-2,
> +	    0x1.57690d5744a45p-2,  0x1.5d1d758e45217p-2 }
> +};
> diff --git a/sysdeps/aarch64/fpu/vecmath_config.h b/sysdeps/aarch64/fpu/vecmath_config.h
> index c8f45af63b..84cf5b2eef 100644
> --- a/sysdeps/aarch64/fpu/vecmath_config.h
> +++ b/sysdeps/aarch64/fpu/vecmath_config.h
> @@ -21,6 +21,7 @@
>  
>  #include <math.h>
>  
> +#define HIDDEN attribute_hidden
>  #define NOINLINE __attribute__ ((noinline))
>  #define likely(x) __glibc_likely (x)
>  #define unlikely(x) __glibc_unlikely (x)
> @@ -30,4 +31,14 @@
>     supported in GLIBC, however we keep it for ease of development.  */
>  #define WANT_SIMD_EXCEPT 0
>  
> +#define V_LOG_POLY_ORDER 6
> +#define V_LOG_TABLE_BITS 7
> +extern const struct v_log_data
> +{
> +  /* Shared data for vector log and log-derived routines (e.g. asinh).  */
> +  double poly[V_LOG_POLY_ORDER - 1];
> +  double ln2;
> +  double invc[1 << V_LOG_TABLE_BITS];
> +  double logc[1 << V_LOG_TABLE_BITS];
> +} __v_log_data HIDDEN;
>  #endif
> diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
> index 4145662b2d..7bdbf4614d 100644
> --- a/sysdeps/aarch64/libm-test-ulps
> +++ b/sysdeps/aarch64/libm-test-ulps
> @@ -1219,10 +1219,18 @@ double: 3
>  float: 3
>  ldouble: 1
>  
> +Function: "log_advsimd":
> +double: 1
> +float: 3
> +
>  Function: "log_downward":
>  float: 2
>  ldouble: 1
>  
> +Function: "log_sve":
> +double: 1
> +float: 3
> +
>  Function: "log_towardzero":
>  float: 2
>  ldouble: 2
> diff --git a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
> index a4c564859c..1922191886 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
> +++ b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
> @@ -1,8 +1,12 @@
>  GLIBC_2.38 _ZGVnN2v_cos F
> +GLIBC_2.38 _ZGVnN2v_log F
>  GLIBC_2.38 _ZGVnN2v_sin F
>  GLIBC_2.38 _ZGVnN4v_cosf F
> +GLIBC_2.38 _ZGVnN4v_logf F
>  GLIBC_2.38 _ZGVnN4v_sinf F
>  GLIBC_2.38 _ZGVsMxv_cos F
>  GLIBC_2.38 _ZGVsMxv_cosf F
> +GLIBC_2.38 _ZGVsMxv_log F
> +GLIBC_2.38 _ZGVsMxv_logf F
>  GLIBC_2.38 _ZGVsMxv_sin F
>  GLIBC_2.38 _ZGVsMxv_sinf F

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] aarch64: Add vector implementations of cos routines
  2023-06-13 17:29 ` [PATCH 1/4] aarch64: Add vector implementations of cos routines Adhemerval Zanella Netto
@ 2023-06-15 14:43   ` Joe Ramsay
  0 siblings, 0 replies; 11+ messages in thread
From: Joe Ramsay @ 2023-06-15 14:43 UTC (permalink / raw)
  To: Adhemerval Zanella Netto, libc-alpha

Hi Adhemerval, thanks for the comments.

On 13/06/2023 18:29, Adhemerval Zanella Netto wrote:

> It should not really matter for glibc, since we use -std=gnu11 and
> -fgnu89-inline, glibc does not really support building without optimization,
> and I think it is unlikely that these function will use anything that will
> prevent them to be inline (as indicated by gcc documentation [1] such as
> alloca); but for static inline used as macro we tend to use __always_inline.
> 
> And you seems to remove __always_inline from sve_utils.h.
> 
> [1] https://gcc.gnu.org/onlinedocs/gcc/Inline.html
>
static inline seems to be enough to ensure that these small functions 
are always inlined. We have done it this way to be consistent with 
existing scalar helper functions from both glibc and AOR, for example in 
various versions of math_config.h.

Cheers,
Joe

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] aarch64: Add vector implementations of cos routines
  2023-06-14 16:27 Wilco Dijkstra
@ 2023-06-14 18:03 ` Adhemerval Zanella Netto
  0 siblings, 0 replies; 11+ messages in thread
From: Adhemerval Zanella Netto @ 2023-06-14 18:03 UTC (permalink / raw)
  To: Wilco Dijkstra; +Cc: 'GNU C Library', Joe Ramsay



On 14/06/23 13:27, Wilco Dijkstra wrote:
> Hi Adhemerval,
> 
>>> +static float64x2_t VPCS_ATTR NOINLINE
>>
>> Why does it need NOINLINE here?  Are you trying to optimize for code size?
>> With stack protector I do see a small code size increase which does not 
>> happen without stack protector.
>>
>> Otherwise, I don't think you will get much regarding code reorganization.
> 
> This (and the const volatile on data) is required to generate good quality code.
> There is a callback here from vector calling standard to normal calling standard
> which requires a large number of registers to be saved and restored. Since this
> is only needed for exceptional cases, this has to be done in a separate function
> so that the common doesn't get these overheads.
> 
> There is still another GCC bug I need to work around - it decides to save/restore
> registers once for each scalar math function callback which makes the exceptional
> case significantly slower (and have much larger codesize).

Sigh, For the 'volatile' I see now that gcc is generating multiple labels for 
each constant and generating a pair of adrp/ldr instead of just ldr. 

I really think we should document this properly either on the code or on the 
commit message, since it is really not clear by just reading the code.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] aarch64: Add vector implementations of cos routines
@ 2023-06-14 16:27 Wilco Dijkstra
  2023-06-14 18:03 ` Adhemerval Zanella Netto
  0 siblings, 1 reply; 11+ messages in thread
From: Wilco Dijkstra @ 2023-06-14 16:27 UTC (permalink / raw)
  To: Adhemerval Zanella; +Cc: 'GNU C Library', Joe Ramsay

Hi Adhemerval,

>> +static float64x2_t VPCS_ATTR NOINLINE
>
> Why does it need NOINLINE here?  Are you trying to optimize for code size?
> With stack protector I do see a small code size increase which does not 
> happen without stack protector.
> 
> Otherwise, I don't think you will get much regarding code reorganization.

This (and the const volatile on data) is required to generate good quality code.
There is a callback here from vector calling standard to normal calling standard
which requires a large number of registers to be saved and restored. Since this
is only needed for exceptional cases, this has to be done in a separate function
so that the common doesn't get these overheads.

There is still another GCC bug I need to work around - it decides to save/restore
registers once for each scalar math function callback which makes the exceptional
case significantly slower (and have much larger codesize).

Cheers,
Wilco

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2023-06-15 14:43 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-08 13:39 [PATCH 1/4] aarch64: Add vector implementations of cos routines Joe Ramsay
2023-06-08 13:39 ` [PATCH 2/4] aarch64: Add vector implementations of sin routines Joe Ramsay
2023-06-13 18:16   ` Adhemerval Zanella Netto
2023-06-08 13:39 ` [PATCH 3/4] aarch64: Add vector implementations of log routines Joe Ramsay
2023-06-14 13:27   ` Adhemerval Zanella Netto
2023-06-08 13:39 ` [PATCH 4/4] aarch64: Add vector implementations of exp routines Joe Ramsay
2023-06-13 17:29 ` [PATCH 1/4] aarch64: Add vector implementations of cos routines Adhemerval Zanella Netto
2023-06-15 14:43   ` Joe Ramsay
2023-06-13 19:56 ` Adhemerval Zanella Netto
2023-06-14 16:27 Wilco Dijkstra
2023-06-14 18:03 ` Adhemerval Zanella Netto

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).