public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r14-6459] tree-optimization/112736 - avoid overread with non-grouped SLP load
@ 2023-12-12 14:26 Richard Biener
  0 siblings, 0 replies; only message in thread
From: Richard Biener @ 2023-12-12 14:26 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:6d0b0806eb638447c3184c59d996c2f178553d45

commit r14-6459-g6d0b0806eb638447c3184c59d996c2f178553d45
Author: Richard Biener <rguenther@suse.de>
Date:   Mon Dec 11 14:39:48 2023 +0100

    tree-optimization/112736 - avoid overread with non-grouped SLP load
    
    The following aovids over/under-read of storage when vectorizing
    a non-grouped load with SLP.  Instead of forcing peeling for gaps
    use a smaller load for the last vector which might access excess
    elements.  This builds upon the existing optimization avoiding
    peeling for gaps, generalizing it to all gap widths leaving a
    power-of-two remaining number of elements (but it doesn't replace
    or improve that particular case at this point).
    
    I wonder if the poly relational compares I set up are good enough
    to guarantee /* remain should now be > 0 and < nunits.  */.
    
    There is existing test coverage that runs into /* DR will be unused.  */
    always when the gap is wider than nunits.  Compared to the
    existing gap == nunits/2 case this only adjusts the load that will
    cause the overrun at the end, not every load.  Apart from the
    poly relational compares it should reliably cover these cases but
    I'll leave it for stage1 to remove.
    
            PR tree-optimization/112736
            * tree-vect-stmts.cc (vectorizable_load): Extend optimization
            to avoid peeling for gaps to handle single-element non-groups
            we now allow with SLP.
    
            * gcc.dg/torture/pr112736.c: New testcase.

Diff:
---
 gcc/testsuite/gcc.dg/torture/pr112736.c | 27 ++++++++++
 gcc/tree-vect-stmts.cc                  | 92 ++++++++++++++++++++++++++-------
 2 files changed, 100 insertions(+), 19 deletions(-)

diff --git a/gcc/testsuite/gcc.dg/torture/pr112736.c b/gcc/testsuite/gcc.dg/torture/pr112736.c
new file mode 100644
index 00000000000..6abb56edba3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr112736.c
@@ -0,0 +1,27 @@
+/* { dg-do run { target *-*-linux* *-*-gnu* *-*-uclinux* } } */
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+int a, c[3][5];
+
+void __attribute__((noipa))
+fn1 (int * __restrict b)
+{
+  int e;
+  for (a = 2; a >= 0; a--)
+    for (e = 0; e < 4; e++)
+      c[a][e] = b[a];
+}
+
+int main()
+{
+  long pgsz = sysconf (_SC_PAGESIZE);
+  void *p = mmap (NULL, pgsz * 2, PROT_READ|PROT_WRITE,
+                  MAP_ANONYMOUS|MAP_PRIVATE, 0, 0);
+  if (p == MAP_FAILED)
+    return 0;
+  mprotect (p, pgsz, PROT_NONE);
+  fn1 (p + pgsz);
+  return 0;
+}
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 390c8472fd6..fc6923cf68a 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -11465,26 +11465,70 @@ vectorizable_load (vec_info *vinfo,
 			if (new_vtype != NULL_TREE)
 			  ltype = half_vtype;
 		      }
+		    /* Try to use a single smaller load when we are about
+		       to load excess elements compared to the unrolled
+		       scalar loop.
+		       ???  This should cover the above case as well.  */
+		    else if (known_gt ((vec_num * j + i + 1) * nunits,
+				       (group_size * vf - gap)))
+		      {
+			if (known_ge ((vec_num * j + i + 1) * nunits
+				      - (group_size * vf - gap), nunits))
+			  /* DR will be unused.  */
+			  ltype = NULL_TREE;
+			else if (alignment_support_scheme == dr_aligned)
+			  /* Aligned access to excess elements is OK if
+			     at least one element is accessed in the
+			     scalar loop.  */
+			  ;
+			else
+			  {
+			    auto remain
+			      = ((group_size * vf - gap)
+				 - (vec_num * j + i) * nunits);
+			    /* remain should now be > 0 and < nunits.  */
+			    unsigned num;
+			    if (constant_multiple_p (nunits, remain, &num))
+			      {
+				tree ptype;
+				new_vtype
+				  = vector_vector_composition_type (vectype,
+								    num,
+								    &ptype);
+				if (new_vtype)
+				  ltype = ptype;
+			      }
+			    /* Else use multiple loads or a masked load?  */
+			  }
+		      }
 		    tree offset
 		      = (dataref_offset ? dataref_offset
 					: build_int_cst (ref_type, 0));
-		    if (ltype != vectype
-			&& memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+		    if (!ltype)
+		      ;
+		    else if (ltype != vectype
+			     && memory_access_type == VMAT_CONTIGUOUS_REVERSE)
 		      {
-			unsigned HOST_WIDE_INT gap_offset
-			  = gap * tree_to_uhwi (TYPE_SIZE_UNIT (elem_type));
-			tree gapcst = build_int_cst (ref_type, gap_offset);
+			poly_uint64 gap_offset
+			  = (tree_to_poly_uint64 (TYPE_SIZE_UNIT (vectype))
+			     - tree_to_poly_uint64 (TYPE_SIZE_UNIT (ltype)));
+			tree gapcst = build_int_cstu (ref_type, gap_offset);
 			offset = size_binop (PLUS_EXPR, offset, gapcst);
 		      }
-		    data_ref
-		      = fold_build2 (MEM_REF, ltype, dataref_ptr, offset);
-		    if (alignment_support_scheme == dr_aligned)
-		      ;
-		    else
-		      TREE_TYPE (data_ref)
-			= build_aligned_type (TREE_TYPE (data_ref),
-					      align * BITS_PER_UNIT);
-		    if (ltype != vectype)
+		    if (ltype)
+		      {
+			data_ref
+			  = fold_build2 (MEM_REF, ltype, dataref_ptr, offset);
+			if (alignment_support_scheme == dr_aligned)
+			  ;
+			else
+			  TREE_TYPE (data_ref)
+			    = build_aligned_type (TREE_TYPE (data_ref),
+						  align * BITS_PER_UNIT);
+		      }
+		    if (!ltype)
+		      data_ref = build_constructor (vectype, NULL);
+		    else if (ltype != vectype)
 		      {
 			vect_copy_ref_info (data_ref,
 					    DR_REF (first_dr_info->dr));
@@ -11494,18 +11538,28 @@ vectorizable_load (vec_info *vinfo,
 						     gsi);
 			data_ref = NULL;
 			vec<constructor_elt, va_gc> *v;
-			vec_alloc (v, 2);
+			/* We've computed 'num' above to statically two
+			   or via constant_multiple_p.  */
+			unsigned num
+			  = (exact_div (tree_to_poly_uint64
+					  (TYPE_SIZE_UNIT (vectype)),
+					tree_to_poly_uint64
+					  (TYPE_SIZE_UNIT (ltype)))
+			     .to_constant ());
+			vec_alloc (v, num);
 			if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
 			  {
-			    CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
-						    build_zero_cst (ltype));
+			    while (--num)
+			      CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
+						      build_zero_cst (ltype));
 			    CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tem);
 			  }
 			else
 			  {
 			    CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tem);
-			    CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
-						    build_zero_cst (ltype));
+			    while (--num)
+			      CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
+						      build_zero_cst (ltype));
 			  }
 			gcc_assert (new_vtype != NULL_TREE);
 			if (new_vtype == vectype)

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-12-12 14:26 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-12 14:26 [gcc r14-6459] tree-optimization/112736 - avoid overread with non-grouped SLP load Richard Biener

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).