public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH] vect: Don't allow vect_emulated_vector_p type in vectorizable_call [PR106322]
@ 2022-08-12  9:40 Kewen.Lin
  2022-08-12 11:14 ` Richard Biener
  0 siblings, 1 reply; 5+ messages in thread
From: Kewen.Lin @ 2022-08-12  9:40 UTC (permalink / raw)
  To: GCC Patches; +Cc: Richard Biener, Richard Sandiford, Andrew Pinski, Uros Bizjak

Hi,

As PR106322 shows, in some cases for some vector type whose
TYPE_MODE is a scalar integral mode instead of a vector mode,
it's possible to obtain wrong target support information when
querying with the scalar integral mode.  For example, for the
test case in PR106322, on ppc64 32bit vectorizer gets vector
type "vector(2) short unsigned int" for scalar type "short
unsigned int", its mode is SImode instead of V2HImode.  The
target support querying checks umul_highpart optab with SImode
and considers it's supported, then vectorizer further generates
.MULH IFN call for that vector type.  Unfortunately it's wrong
to use SImode support for that vector type multiply highpart
here.

This patch is to teach vectorizable_call analysis not to allow
vect_emulated_vector_p type for both vectype_in and vectype_out
as Richi suggested.

Bootstrapped and regtested on x86_64-redhat-linux,
aarch64-linux-gnu and powerpc64{,le}-linux-gnu.

Is it ok for trunk?  If it's ok, I guess we want this to be
backported?

BR,
Kewen
-----
	PR tree-optimization/106322

gcc/ChangeLog:

	* tree-vect-stmts.cc (vectorizable_call): Don't allow
	vect_emulated_vector_p type for both vectype_in and vectype_out.

gcc/testsuite/ChangeLog:

	* g++.target/i386/pr106322.C: New test.
	* g++.target/powerpc/pr106322.C: New test.
---
 gcc/testsuite/g++.target/i386/pr106322.C    | 196 ++++++++++++++++++++
 gcc/testsuite/g++.target/powerpc/pr106322.C | 195 +++++++++++++++++++
 gcc/tree-vect-stmts.cc                      |   8 +
 3 files changed, 399 insertions(+)
 create mode 100644 gcc/testsuite/g++.target/i386/pr106322.C
 create mode 100644 gcc/testsuite/g++.target/powerpc/pr106322.C

diff --git a/gcc/testsuite/g++.target/i386/pr106322.C b/gcc/testsuite/g++.target/i386/pr106322.C
new file mode 100644
index 00000000000..3cd8d6bf225
--- /dev/null
+++ b/gcc/testsuite/g++.target/i386/pr106322.C
@@ -0,0 +1,196 @@
+/* { dg-do run } */
+/* { dg-require-effective-target ia32 } */
+/* { dg-require-effective-target c++11 } */
+/* { dg-options "-O2 -mtune=generic -march=i686" } */
+
+/* As PR106322, verify this can execute well (not abort).  */
+
+#include <atomic>
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <memory>
+
+__attribute__((noipa))
+bool BytesEqual(const void *bytes1, const void *bytes2, const size_t size) {
+  return memcmp(bytes1, bytes2, size) == 0;
+}
+
+#define HWY_ALIGNMENT 64
+constexpr size_t kAlignment = HWY_ALIGNMENT;
+constexpr size_t kAlias = kAlignment * 4;
+
+namespace hwy {
+namespace N_EMU128 {
+template <typename T, size_t N = 16 / sizeof(T)> struct Vec128 {
+  T raw[16 / sizeof(T)] = {};
+};
+} // namespace N_EMU128
+} // namespace hwy
+
+template <typename T, size_t N>
+static void Store(const hwy::N_EMU128::Vec128<T, N> v,
+                  T *__restrict__ aligned) {
+  __builtin_memcpy(aligned, v.raw, sizeof(T) * N);
+}
+
+template <typename T, size_t N>
+static hwy::N_EMU128::Vec128<T, N> Load(const T *__restrict__ aligned) {
+  hwy::N_EMU128::Vec128<T, N> v;
+  __builtin_memcpy(v.raw, aligned, sizeof(T) * N);
+  return v;
+}
+
+template <size_t N>
+static hwy::N_EMU128::Vec128<uint16_t, N>
+MulHigh(hwy::N_EMU128::Vec128<uint16_t, N> a,
+        const hwy::N_EMU128::Vec128<uint16_t, N> b) {
+  for (size_t i = 0; i < N; ++i) {
+    // Cast to uint32_t first to prevent overflow. Otherwise the result of
+    // uint16_t * uint16_t is in "int" which may overflow. In practice the
+    // result is the same but this way it is also defined.
+    a.raw[i] = static_cast<uint16_t>(
+        (static_cast<uint32_t>(a.raw[i]) * static_cast<uint32_t>(b.raw[i])) >>
+        16);
+  }
+  return a;
+}
+
+#define HWY_ASSERT(condition) assert((condition))
+#define HWY_ASSUME_ALIGNED(ptr, align) __builtin_assume_aligned((ptr), (align))
+
+#pragma pack(push, 1)
+struct AllocationHeader {
+  void *allocated;
+  size_t payload_size;
+};
+#pragma pack(pop)
+
+static void FreeAlignedBytes(const void *aligned_pointer) {
+  HWY_ASSERT(aligned_pointer != nullptr);
+  if (aligned_pointer == nullptr)
+    return;
+
+  const uintptr_t payload = reinterpret_cast<uintptr_t>(aligned_pointer);
+  HWY_ASSERT(payload % kAlignment == 0);
+  const AllocationHeader *header =
+      reinterpret_cast<const AllocationHeader *>(payload) - 1;
+
+  free(header->allocated);
+}
+
+class AlignedFreer {
+public:
+  template <typename T> void operator()(T *aligned_pointer) const {
+    FreeAlignedBytes(aligned_pointer);
+  }
+};
+
+template <typename T>
+using AlignedFreeUniquePtr = std::unique_ptr<T, AlignedFreer>;
+
+static inline constexpr size_t ShiftCount(size_t n) {
+  return (n <= 1) ? 0 : 1 + ShiftCount(n / 2);
+}
+
+namespace {
+static size_t NextAlignedOffset() {
+  static std::atomic<uint32_t> next{0};
+  constexpr uint32_t kGroups = kAlias / kAlignment;
+  const uint32_t group = next.fetch_add(1, std::memory_order_relaxed) % kGroups;
+  const size_t offset = kAlignment * group;
+  HWY_ASSERT((offset % kAlignment == 0) && offset <= kAlias);
+  return offset;
+}
+} // namespace
+
+static void *AllocateAlignedBytes(const size_t payload_size) {
+  HWY_ASSERT(payload_size != 0); // likely a bug in caller
+  if (payload_size >= std::numeric_limits<size_t>::max() / 2) {
+    HWY_ASSERT(false && "payload_size too large");
+    return nullptr;
+  }
+
+  size_t offset = NextAlignedOffset();
+
+  // What: | misalign | unused | AllocationHeader |payload
+  // Size: |<= kAlias | offset                    |payload_size
+  //       ^allocated.^aligned.^header............^payload
+  // The header must immediately precede payload, which must remain aligned.
+  // To avoid wasting space, the header resides at the end of `unused`,
+  // which therefore cannot be empty (offset == 0).
+  if (offset == 0) {
+    offset = kAlignment; // = RoundUpTo(sizeof(AllocationHeader), kAlignment)
+    static_assert(sizeof(AllocationHeader) <= kAlignment, "Else: round up");
+  }
+
+  const size_t allocated_size = kAlias + offset + payload_size;
+  void *allocated = malloc(allocated_size);
+  HWY_ASSERT(allocated != nullptr);
+  if (allocated == nullptr)
+    return nullptr;
+  // Always round up even if already aligned - we already asked for kAlias
+  // extra bytes and there's no way to give them back.
+  uintptr_t aligned = reinterpret_cast<uintptr_t>(allocated) + kAlias;
+  static_assert((kAlias & (kAlias - 1)) == 0, "kAlias must be a power of 2");
+  static_assert(kAlias >= kAlignment, "Cannot align to more than kAlias");
+  aligned &= ~(kAlias - 1);
+
+  const uintptr_t payload = aligned + offset; // still aligned
+
+  // Stash `allocated` and payload_size inside header for FreeAlignedBytes().
+  // The allocated_size can be reconstructed from the payload_size.
+  AllocationHeader *header = reinterpret_cast<AllocationHeader *>(payload) - 1;
+  header->allocated = allocated;
+  header->payload_size = payload_size;
+
+  return HWY_ASSUME_ALIGNED(reinterpret_cast<void *>(payload), kAlignment);
+}
+
+template <typename T> static T *AllocateAlignedItems(size_t items) {
+  constexpr size_t size = sizeof(T);
+
+  constexpr bool is_pow2 = (size & (size - 1)) == 0;
+  constexpr size_t bits = ShiftCount(size);
+  static_assert(!is_pow2 || (1ull << bits) == size, "ShiftCount is incorrect");
+
+  const size_t bytes = is_pow2 ? items << bits : items * size;
+  const size_t check = is_pow2 ? bytes >> bits : bytes / size;
+  if (check != items) {
+    return nullptr; // overflowed
+  }
+  return static_cast<T *>(AllocateAlignedBytes(bytes));
+}
+
+template <typename T>
+static AlignedFreeUniquePtr<T[]> AllocateAligned(const size_t items) {
+  return AlignedFreeUniquePtr<T[]>(AllocateAlignedItems<T>(items),
+                                   AlignedFreer());
+}
+
+int main() {
+  AlignedFreeUniquePtr<uint16_t[]> in_lanes = AllocateAligned<uint16_t>(2);
+  uint16_t expected_lanes[2];
+  in_lanes[0] = 65535;
+  in_lanes[1] = 32767;
+  expected_lanes[0] = 65534;
+  expected_lanes[1] = 16383;
+  hwy::N_EMU128::Vec128<uint16_t, 2> v = Load<uint16_t, 2>(in_lanes.get());
+  hwy::N_EMU128::Vec128<uint16_t, 2> actual = MulHigh(v, v);
+  {
+    auto actual_lanes = AllocateAligned<uint16_t>(2);
+    Store(actual, actual_lanes.get());
+    const uint8_t *expected_array =
+        reinterpret_cast<const uint8_t *>(expected_lanes);
+    const uint8_t *actual_array =
+        reinterpret_cast<const uint8_t *>(actual_lanes.get());
+    for (size_t i = 0; i < 2; ++i) {
+      const uint8_t *expected_ptr = expected_array + i * 2;
+      const uint8_t *actual_ptr = actual_array + i * 2;
+      if (!BytesEqual(expected_ptr, actual_ptr, 2)) {
+        abort();
+      }
+    }
+  }
+}
diff --git a/gcc/testsuite/g++.target/powerpc/pr106322.C b/gcc/testsuite/g++.target/powerpc/pr106322.C
new file mode 100644
index 00000000000..1de6e5e37e5
--- /dev/null
+++ b/gcc/testsuite/g++.target/powerpc/pr106322.C
@@ -0,0 +1,195 @@
+/* { dg-do run } */
+/* { dg-require-effective-target c++11 } */
+/* { dg-options "-O2 -mdejagnu-cpu=power4" } */
+
+/* As PR106322, verify this can execute well (not abort).  */
+
+#include <atomic>
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <memory>
+
+__attribute__((noipa))
+bool BytesEqual(const void *bytes1, const void *bytes2, const size_t size) {
+  return memcmp(bytes1, bytes2, size) == 0;
+}
+
+#define HWY_ALIGNMENT 64
+constexpr size_t kAlignment = HWY_ALIGNMENT;
+constexpr size_t kAlias = kAlignment * 4;
+
+namespace hwy {
+namespace N_EMU128 {
+template <typename T, size_t N = 16 / sizeof(T)> struct Vec128 {
+  T raw[16 / sizeof(T)] = {};
+};
+} // namespace N_EMU128
+} // namespace hwy
+
+template <typename T, size_t N>
+static void Store(const hwy::N_EMU128::Vec128<T, N> v,
+                  T *__restrict__ aligned) {
+  __builtin_memcpy(aligned, v.raw, sizeof(T) * N);
+}
+
+template <typename T, size_t N>
+static hwy::N_EMU128::Vec128<T, N> Load(const T *__restrict__ aligned) {
+  hwy::N_EMU128::Vec128<T, N> v;
+  __builtin_memcpy(v.raw, aligned, sizeof(T) * N);
+  return v;
+}
+
+template <size_t N>
+static hwy::N_EMU128::Vec128<uint16_t, N>
+MulHigh(hwy::N_EMU128::Vec128<uint16_t, N> a,
+        const hwy::N_EMU128::Vec128<uint16_t, N> b) {
+  for (size_t i = 0; i < N; ++i) {
+    // Cast to uint32_t first to prevent overflow. Otherwise the result of
+    // uint16_t * uint16_t is in "int" which may overflow. In practice the
+    // result is the same but this way it is also defined.
+    a.raw[i] = static_cast<uint16_t>(
+        (static_cast<uint32_t>(a.raw[i]) * static_cast<uint32_t>(b.raw[i])) >>
+        16);
+  }
+  return a;
+}
+
+#define HWY_ASSERT(condition) assert((condition))
+#define HWY_ASSUME_ALIGNED(ptr, align) __builtin_assume_aligned((ptr), (align))
+
+#pragma pack(push, 1)
+struct AllocationHeader {
+  void *allocated;
+  size_t payload_size;
+};
+#pragma pack(pop)
+
+static void FreeAlignedBytes(const void *aligned_pointer) {
+  HWY_ASSERT(aligned_pointer != nullptr);
+  if (aligned_pointer == nullptr)
+    return;
+
+  const uintptr_t payload = reinterpret_cast<uintptr_t>(aligned_pointer);
+  HWY_ASSERT(payload % kAlignment == 0);
+  const AllocationHeader *header =
+      reinterpret_cast<const AllocationHeader *>(payload) - 1;
+
+  free(header->allocated);
+}
+
+class AlignedFreer {
+public:
+  template <typename T> void operator()(T *aligned_pointer) const {
+    FreeAlignedBytes(aligned_pointer);
+  }
+};
+
+template <typename T>
+using AlignedFreeUniquePtr = std::unique_ptr<T, AlignedFreer>;
+
+static inline constexpr size_t ShiftCount(size_t n) {
+  return (n <= 1) ? 0 : 1 + ShiftCount(n / 2);
+}
+
+namespace {
+static size_t NextAlignedOffset() {
+  static std::atomic<uint32_t> next{0};
+  constexpr uint32_t kGroups = kAlias / kAlignment;
+  const uint32_t group = next.fetch_add(1, std::memory_order_relaxed) % kGroups;
+  const size_t offset = kAlignment * group;
+  HWY_ASSERT((offset % kAlignment == 0) && offset <= kAlias);
+  return offset;
+}
+} // namespace
+
+static void *AllocateAlignedBytes(const size_t payload_size) {
+  HWY_ASSERT(payload_size != 0); // likely a bug in caller
+  if (payload_size >= std::numeric_limits<size_t>::max() / 2) {
+    HWY_ASSERT(false && "payload_size too large");
+    return nullptr;
+  }
+
+  size_t offset = NextAlignedOffset();
+
+  // What: | misalign | unused | AllocationHeader |payload
+  // Size: |<= kAlias | offset                    |payload_size
+  //       ^allocated.^aligned.^header............^payload
+  // The header must immediately precede payload, which must remain aligned.
+  // To avoid wasting space, the header resides at the end of `unused`,
+  // which therefore cannot be empty (offset == 0).
+  if (offset == 0) {
+    offset = kAlignment; // = RoundUpTo(sizeof(AllocationHeader), kAlignment)
+    static_assert(sizeof(AllocationHeader) <= kAlignment, "Else: round up");
+  }
+
+  const size_t allocated_size = kAlias + offset + payload_size;
+  void *allocated = malloc(allocated_size);
+  HWY_ASSERT(allocated != nullptr);
+  if (allocated == nullptr)
+    return nullptr;
+  // Always round up even if already aligned - we already asked for kAlias
+  // extra bytes and there's no way to give them back.
+  uintptr_t aligned = reinterpret_cast<uintptr_t>(allocated) + kAlias;
+  static_assert((kAlias & (kAlias - 1)) == 0, "kAlias must be a power of 2");
+  static_assert(kAlias >= kAlignment, "Cannot align to more than kAlias");
+  aligned &= ~(kAlias - 1);
+
+  const uintptr_t payload = aligned + offset; // still aligned
+
+  // Stash `allocated` and payload_size inside header for FreeAlignedBytes().
+  // The allocated_size can be reconstructed from the payload_size.
+  AllocationHeader *header = reinterpret_cast<AllocationHeader *>(payload) - 1;
+  header->allocated = allocated;
+  header->payload_size = payload_size;
+
+  return HWY_ASSUME_ALIGNED(reinterpret_cast<void *>(payload), kAlignment);
+}
+
+template <typename T> static T *AllocateAlignedItems(size_t items) {
+  constexpr size_t size = sizeof(T);
+
+  constexpr bool is_pow2 = (size & (size - 1)) == 0;
+  constexpr size_t bits = ShiftCount(size);
+  static_assert(!is_pow2 || (1ull << bits) == size, "ShiftCount is incorrect");
+
+  const size_t bytes = is_pow2 ? items << bits : items * size;
+  const size_t check = is_pow2 ? bytes >> bits : bytes / size;
+  if (check != items) {
+    return nullptr; // overflowed
+  }
+  return static_cast<T *>(AllocateAlignedBytes(bytes));
+}
+
+template <typename T>
+static AlignedFreeUniquePtr<T[]> AllocateAligned(const size_t items) {
+  return AlignedFreeUniquePtr<T[]>(AllocateAlignedItems<T>(items),
+                                   AlignedFreer());
+}
+
+int main() {
+  AlignedFreeUniquePtr<uint16_t[]> in_lanes = AllocateAligned<uint16_t>(2);
+  uint16_t expected_lanes[2];
+  in_lanes[0] = 65535;
+  in_lanes[1] = 32767;
+  expected_lanes[0] = 65534;
+  expected_lanes[1] = 16383;
+  hwy::N_EMU128::Vec128<uint16_t, 2> v = Load<uint16_t, 2>(in_lanes.get());
+  hwy::N_EMU128::Vec128<uint16_t, 2> actual = MulHigh(v, v);
+  {
+    auto actual_lanes = AllocateAligned<uint16_t>(2);
+    Store(actual, actual_lanes.get());
+    const uint8_t *expected_array =
+        reinterpret_cast<const uint8_t *>(expected_lanes);
+    const uint8_t *actual_array =
+        reinterpret_cast<const uint8_t *>(actual_lanes.get());
+    for (size_t i = 0; i < 2; ++i) {
+      const uint8_t *expected_ptr = expected_array + i * 2;
+      const uint8_t *actual_ptr = actual_array + i * 2;
+      if (!BytesEqual(expected_ptr, actual_ptr, 2)) {
+        abort();
+      }
+    }
+  }
+}
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index f582d238984..c9dab217f05 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -3423,6 +3423,14 @@ vectorizable_call (vec_info *vinfo,
       return false;
     }

+  if (vect_emulated_vector_p (vectype_in) || vect_emulated_vector_p (vectype_out))
+  {
+      if (dump_enabled_p ())
+	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+			 "use emulated vector type for call\n");
+      return false;
+  }
+
   /* FORNOW */
   nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
   nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
--
2.27.0


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-08-15  8:09 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-12  9:40 [PATCH] vect: Don't allow vect_emulated_vector_p type in vectorizable_call [PR106322] Kewen.Lin
2022-08-12 11:14 ` Richard Biener
2022-08-12 11:27   ` Kewen.Lin
2022-08-15  7:59     ` Kewen.Lin
2022-08-15  8:09       ` Richard Biener

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).