public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Richard Sandiford <richard.sandiford@linaro.org>
To: gcc-patches@gcc.gnu.org
Cc: patches@linaro.org
Subject: [2/9] Reindent parts of vectorizable_load and vectorizable_store
Date: Tue, 12 Apr 2011 13:28:00 -0000	[thread overview]
Message-ID: <g4hba3fvkn.fsf@linaro.org> (raw)
In-Reply-To: <g4pqorfvwp.fsf@linaro.org> (Richard Sandiford's message of "Tue,	12 Apr 2011 14:20:54 +0100")

This patch just reindents part of vectorizable_load and vectorizable_store
so that the main diff is easier to read.  It also CSEs the element type,
which seemed better than breaking the long lines.

I've included both the real diff and a -b version.

Tested on x86_64-linux-gnu and arm-linux-gnueabi.  OK to install?

Richard


gcc/
	* tree-vect-stmts.c (vectorizable_store): Store the element type
	in a local variable.  Indent generation of per-vector memory accesses.
	(vectorizable_load): Likewise.

Index: gcc/tree-vect-stmts.c
===================================================================
--- gcc/tree-vect-stmts.c	2011-04-12 11:55:08.000000000 +0100
+++ gcc/tree-vect-stmts.c	2011-04-12 11:55:08.000000000 +0100
@@ -3308,6 +3308,7 @@ vectorizable_store (gimple stmt, gimple_
   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree elem_type;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   struct loop *loop = NULL;
   enum machine_mode vec_mode;
@@ -3383,7 +3384,8 @@ vectorizable_store (gimple stmt, gimple_
 
   /* The scalar rhs type needs to be trivially convertible to the vector
      component type.  This should always be the case.  */
-  if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
+  elem_type = TREE_TYPE (vectype);
+  if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
     {
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "???  operands of different types");
@@ -3608,72 +3610,75 @@ vectorizable_store (gimple stmt, gimple_
 		bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
 	}
 
-      if (strided_store)
+      if (1)
 	{
-	  result_chain = VEC_alloc (tree, heap, group_size);
-	  /* Permute.  */
-	  if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
-					 &result_chain))
-	    return false;
-	}
-
-      next_stmt = first_stmt;
-      for (i = 0; i < vec_num; i++)
-	{
-	  struct ptr_info_def *pi;
-
-	  if (i > 0)
-	    /* Bump the vector pointer.  */
-	    dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
-					   NULL_TREE);
-
-	  if (slp)
-	    vec_oprnd = VEC_index (tree, vec_oprnds, i);
-	  else if (strided_store)
-	    /* For strided stores vectorized defs are interleaved in
-	       vect_permute_store_chain().  */
-	    vec_oprnd = VEC_index (tree, result_chain, i);
-
-	  data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
-			     build_int_cst (reference_alias_ptr_type
-					    (DR_REF (first_dr)), 0));
-	  pi = get_ptr_info (dataref_ptr);
-	  pi->align = TYPE_ALIGN_UNIT (vectype);
-          if (aligned_access_p (first_dr))
-	    pi->misalign = 0;
-          else if (DR_MISALIGNMENT (first_dr) == -1)
+	  if (strided_store)
 	    {
-	      TREE_TYPE (data_ref)
-		= build_aligned_type (TREE_TYPE (data_ref),
-				      TYPE_ALIGN (TREE_TYPE (vectype)));
-	      pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
-	      pi->misalign = 0;
+	      result_chain = VEC_alloc (tree, heap, group_size);
+	      /* Permute.  */
+	      if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
+					     &result_chain))
+		return false;
 	    }
-	  else
+
+	  next_stmt = first_stmt;
+	  for (i = 0; i < vec_num; i++)
 	    {
-	      TREE_TYPE (data_ref)
-		= build_aligned_type (TREE_TYPE (data_ref),
-				      TYPE_ALIGN (TREE_TYPE (vectype)));
-	      pi->misalign = DR_MISALIGNMENT (first_dr);
-	    }
+	      struct ptr_info_def *pi;
 
-	  /* Arguments are ready.  Create the new vector stmt.  */
-	  new_stmt = gimple_build_assign (data_ref, vec_oprnd);
-	  vect_finish_stmt_generation (stmt, new_stmt, gsi);
-	  mark_symbols_for_renaming (new_stmt);
+	      if (i > 0)
+		/* Bump the vector pointer.  */
+		dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+					       stmt, NULL_TREE);
+
+	      if (slp)
+		vec_oprnd = VEC_index (tree, vec_oprnds, i);
+	      else if (strided_store)
+		/* For strided stores vectorized defs are interleaved in
+		   vect_permute_store_chain().  */
+		vec_oprnd = VEC_index (tree, result_chain, i);
+
+	      data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
+				 build_int_cst (reference_alias_ptr_type
+						(DR_REF (first_dr)), 0));
+	      pi = get_ptr_info (dataref_ptr);
+	      pi->align = TYPE_ALIGN_UNIT (vectype);
+	      if (aligned_access_p (first_dr))
+		pi->misalign = 0;
+	      else if (DR_MISALIGNMENT (first_dr) == -1)
+		{
+		  TREE_TYPE (data_ref)
+		    = build_aligned_type (TREE_TYPE (data_ref),
+					  TYPE_ALIGN (elem_type));
+		  pi->align = TYPE_ALIGN_UNIT (elem_type);
+		  pi->misalign = 0;
+		}
+	      else
+		{
+		  TREE_TYPE (data_ref)
+		    = build_aligned_type (TREE_TYPE (data_ref),
+					  TYPE_ALIGN (elem_type));
+		  pi->misalign = DR_MISALIGNMENT (first_dr);
+		}
 
-          if (slp)
-            continue;
+	      /* Arguments are ready.  Create the new vector stmt.  */
+	      new_stmt = gimple_build_assign (data_ref, vec_oprnd);
+	      vect_finish_stmt_generation (stmt, new_stmt, gsi);
+	      mark_symbols_for_renaming (new_stmt);
 
-          if (j == 0)
-            STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt =  new_stmt;
-	  else
-	    STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+	      if (slp)
+		continue;
+
+	      if (j == 0)
+		STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt =  new_stmt;
+	      else
+		STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
 
-	  prev_stmt_info = vinfo_for_stmt (new_stmt);
-	  next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
-	  if (!next_stmt)
-	    break;
+	      prev_stmt_info = vinfo_for_stmt (new_stmt);
+	      next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
+	      if (!next_stmt)
+		break;
+	    }
 	}
     }
 
@@ -3784,6 +3789,7 @@ vectorizable_load (gimple stmt, gimple_s
   bool nested_in_vect_loop = false;
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree elem_type;
   tree new_temp;
   enum machine_mode mode;
   gimple new_stmt = NULL;
@@ -3888,7 +3894,8 @@ vectorizable_load (gimple stmt, gimple_s
 
   /* The vector component type needs to be trivially convertible to the
      scalar lhs.  This should always be the case.  */
-  if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
+  elem_type = TREE_TYPE (vectype);
+  if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
     {
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "???  operands of different types");
@@ -4117,193 +4124,205 @@ vectorizable_load (gimple stmt, gimple_s
       if (strided_load || slp_perm)
 	dr_chain = VEC_alloc (tree, heap, vec_num);
 
-      for (i = 0; i < vec_num; i++)
+      if (1)
 	{
-	  if (i > 0)
-	    dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
-					   NULL_TREE);
-
-	  /* 2. Create the vector-load in the loop.  */
-	  switch (alignment_support_scheme)
+	  for (i = 0; i < vec_num; i++)
 	    {
-	    case dr_aligned:
-	    case dr_unaligned_supported:
-	      {
-		struct ptr_info_def *pi;
-		data_ref
-		  = build2 (MEM_REF, vectype, dataref_ptr,
-			    build_int_cst (reference_alias_ptr_type
-					   (DR_REF (first_dr)), 0));
-		pi = get_ptr_info (dataref_ptr);
-		pi->align = TYPE_ALIGN_UNIT (vectype);
-		if (alignment_support_scheme == dr_aligned)
-		  {
-		    gcc_assert (aligned_access_p (first_dr));
-		    pi->misalign = 0;
-		  }
-		else if (DR_MISALIGNMENT (first_dr) == -1)
+	      if (i > 0)
+		dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+					       stmt, NULL_TREE);
+
+	      /* 2. Create the vector-load in the loop.  */
+	      switch (alignment_support_scheme)
+		{
+		case dr_aligned:
+		case dr_unaligned_supported:
 		  {
-		    TREE_TYPE (data_ref)
-		      = build_aligned_type (TREE_TYPE (data_ref),
-					    TYPE_ALIGN (TREE_TYPE (vectype)));
-		    pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
-		    pi->misalign = 0;
+		    struct ptr_info_def *pi;
+		    data_ref
+		      = build2 (MEM_REF, vectype, dataref_ptr,
+				build_int_cst (reference_alias_ptr_type
+					       (DR_REF (first_dr)), 0));
+		    pi = get_ptr_info (dataref_ptr);
+		    pi->align = TYPE_ALIGN_UNIT (vectype);
+		    if (alignment_support_scheme == dr_aligned)
+		      {
+			gcc_assert (aligned_access_p (first_dr));
+			pi->misalign = 0;
+		      }
+		    else if (DR_MISALIGNMENT (first_dr) == -1)
+		      {
+			TREE_TYPE (data_ref)
+			  = build_aligned_type (TREE_TYPE (data_ref),
+						TYPE_ALIGN (elem_type));
+			pi->align = TYPE_ALIGN_UNIT (elem_type);
+			pi->misalign = 0;
+		      }
+		    else
+		      {
+			TREE_TYPE (data_ref)
+			  = build_aligned_type (TREE_TYPE (data_ref),
+						TYPE_ALIGN (elem_type));
+			pi->misalign = DR_MISALIGNMENT (first_dr);
+		      }
+		    break;
 		  }
-		else
+		case dr_explicit_realign:
 		  {
-		    TREE_TYPE (data_ref)
-		      = build_aligned_type (TREE_TYPE (data_ref),
-					    TYPE_ALIGN (TREE_TYPE (vectype)));
-		    pi->misalign = DR_MISALIGNMENT (first_dr);
+		    tree ptr, bump;
+		    tree vs_minus_1;
+
+		    vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+
+		    if (compute_in_loop)
+		      msq = vect_setup_realignment (first_stmt, gsi,
+						    &realignment_token,
+						    dr_explicit_realign,
+						    dataref_ptr, NULL);
+
+		    new_stmt = gimple_build_assign_with_ops
+				 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
+				  build_int_cst
+				  (TREE_TYPE (dataref_ptr),
+				   -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+		    ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+		    gimple_assign_set_lhs (new_stmt, ptr);
+		    vect_finish_stmt_generation (stmt, new_stmt, gsi);
+		    data_ref
+		      = build2 (MEM_REF, vectype, ptr,
+				build_int_cst (reference_alias_ptr_type
+						 (DR_REF (first_dr)), 0));
+		    vec_dest = vect_create_destination_var (scalar_dest,
+							    vectype);
+		    new_stmt = gimple_build_assign (vec_dest, data_ref);
+		    new_temp = make_ssa_name (vec_dest, new_stmt);
+		    gimple_assign_set_lhs (new_stmt, new_temp);
+		    gimple_set_vdef (new_stmt, gimple_vdef (stmt));
+		    gimple_set_vuse (new_stmt, gimple_vuse (stmt));
+		    vect_finish_stmt_generation (stmt, new_stmt, gsi);
+		    msq = new_temp;
+
+		    bump = size_binop (MULT_EXPR, vs_minus_1,
+				       TYPE_SIZE_UNIT (scalar_type));
+		    ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
+		    new_stmt = gimple_build_assign_with_ops
+				 (BIT_AND_EXPR, NULL_TREE, ptr,
+				  build_int_cst
+				  (TREE_TYPE (ptr),
+				   -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+		    ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+		    gimple_assign_set_lhs (new_stmt, ptr);
+		    vect_finish_stmt_generation (stmt, new_stmt, gsi);
+		    data_ref
+		      = build2 (MEM_REF, vectype, ptr,
+				build_int_cst (reference_alias_ptr_type
+						 (DR_REF (first_dr)), 0));
+		    break;
 		  }
-		break;
-	      }
-	    case dr_explicit_realign:
-	      {
-		tree ptr, bump;
-		tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
-
-		if (compute_in_loop)
-		  msq = vect_setup_realignment (first_stmt, gsi,
-						&realignment_token,
-						dr_explicit_realign,
-						dataref_ptr, NULL);
-
-		new_stmt = gimple_build_assign_with_ops
-			     (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
-			      build_int_cst
-			        (TREE_TYPE (dataref_ptr),
-				 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
-		ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
-		gimple_assign_set_lhs (new_stmt, ptr);
-		vect_finish_stmt_generation (stmt, new_stmt, gsi);
-		data_ref
-		  = build2 (MEM_REF, vectype, ptr,
-			    build_int_cst (reference_alias_ptr_type
-					     (DR_REF (first_dr)), 0));
-		vec_dest = vect_create_destination_var (scalar_dest, vectype);
-		new_stmt = gimple_build_assign (vec_dest, data_ref);
-		new_temp = make_ssa_name (vec_dest, new_stmt);
-		gimple_assign_set_lhs (new_stmt, new_temp);
-		gimple_set_vdef (new_stmt, gimple_vdef (stmt));
-		gimple_set_vuse (new_stmt, gimple_vuse (stmt));
-		vect_finish_stmt_generation (stmt, new_stmt, gsi);
-		msq = new_temp;
-
-		bump = size_binop (MULT_EXPR, vs_minus_1,
-				   TYPE_SIZE_UNIT (scalar_type));
-		ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
-		new_stmt = gimple_build_assign_with_ops
-			     (BIT_AND_EXPR, NULL_TREE, ptr,
-			      build_int_cst
-			        (TREE_TYPE (ptr),
-				 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
-		ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
-		gimple_assign_set_lhs (new_stmt, ptr);
-		vect_finish_stmt_generation (stmt, new_stmt, gsi);
-		data_ref
-		  = build2 (MEM_REF, vectype, ptr,
-			    build_int_cst (reference_alias_ptr_type
-					     (DR_REF (first_dr)), 0));
-	        break;
-	      }
-	    case dr_explicit_realign_optimized:
-	      new_stmt = gimple_build_assign_with_ops
-			   (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
-			    build_int_cst
-			      (TREE_TYPE (dataref_ptr),
-			       -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
-	      new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
-	      gimple_assign_set_lhs (new_stmt, new_temp);
-	      vect_finish_stmt_generation (stmt, new_stmt, gsi);
-	      data_ref
-		= build2 (MEM_REF, vectype, new_temp,
-			  build_int_cst (reference_alias_ptr_type
-					   (DR_REF (first_dr)), 0));
-	      break;
-	    default:
-	      gcc_unreachable ();
-	    }
-	  vec_dest = vect_create_destination_var (scalar_dest, vectype);
-	  new_stmt = gimple_build_assign (vec_dest, data_ref);
-	  new_temp = make_ssa_name (vec_dest, new_stmt);
-	  gimple_assign_set_lhs (new_stmt, new_temp);
-	  vect_finish_stmt_generation (stmt, new_stmt, gsi);
-	  mark_symbols_for_renaming (new_stmt);
-
-	  /* 3. Handle explicit realignment if necessary/supported.  Create in
-		loop: vec_dest = realign_load (msq, lsq, realignment_token)  */
-	  if (alignment_support_scheme == dr_explicit_realign_optimized
-	      || alignment_support_scheme == dr_explicit_realign)
-	    {
-	      lsq = gimple_assign_lhs (new_stmt);
-	      if (!realignment_token)
-		realignment_token = dataref_ptr;
+		case dr_explicit_realign_optimized:
+		  new_stmt = gimple_build_assign_with_ops
+			       (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
+				build_int_cst
+				  (TREE_TYPE (dataref_ptr),
+				   -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+		  new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
+					    new_stmt);
+		  gimple_assign_set_lhs (new_stmt, new_temp);
+		  vect_finish_stmt_generation (stmt, new_stmt, gsi);
+		  data_ref
+		    = build2 (MEM_REF, vectype, new_temp,
+			      build_int_cst (reference_alias_ptr_type
+					       (DR_REF (first_dr)), 0));
+		  break;
+		default:
+		  gcc_unreachable ();
+		}
 	      vec_dest = vect_create_destination_var (scalar_dest, vectype);
-	      new_stmt
-		= gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR, vec_dest,
-						 msq, lsq, realignment_token);
+	      new_stmt = gimple_build_assign (vec_dest, data_ref);
 	      new_temp = make_ssa_name (vec_dest, new_stmt);
 	      gimple_assign_set_lhs (new_stmt, new_temp);
 	      vect_finish_stmt_generation (stmt, new_stmt, gsi);
+	      mark_symbols_for_renaming (new_stmt);
 
-	      if (alignment_support_scheme == dr_explicit_realign_optimized)
+	      /* 3. Handle explicit realignment if necessary/supported.
+		 Create in loop:
+		   vec_dest = realign_load (msq, lsq, realignment_token)  */
+	      if (alignment_support_scheme == dr_explicit_realign_optimized
+		  || alignment_support_scheme == dr_explicit_realign)
 		{
-		  gcc_assert (phi);
-		  if (i == vec_num - 1 && j == ncopies - 1)
-		    add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
-				 UNKNOWN_LOCATION);
-		  msq = lsq;
+		  lsq = gimple_assign_lhs (new_stmt);
+		  if (!realignment_token)
+		    realignment_token = dataref_ptr;
+		  vec_dest = vect_create_destination_var (scalar_dest, vectype);
+		  new_stmt
+		    = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
+						     vec_dest, msq, lsq,
+						     realignment_token);
+		  new_temp = make_ssa_name (vec_dest, new_stmt);
+		  gimple_assign_set_lhs (new_stmt, new_temp);
+		  vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+		  if (alignment_support_scheme == dr_explicit_realign_optimized)
+		    {
+		      gcc_assert (phi);
+		      if (i == vec_num - 1 && j == ncopies - 1)
+			add_phi_arg (phi, lsq,
+				     loop_latch_edge (containing_loop),
+				     UNKNOWN_LOCATION);
+		      msq = lsq;
+		    }
 		}
-	    }
 
-	  /* 4. Handle invariant-load.  */
-	  if (inv_p && !bb_vinfo)
-	    {
-	      gcc_assert (!strided_load);
-	      gcc_assert (nested_in_vect_loop_p (loop, stmt));
-	      if (j == 0)
+	      /* 4. Handle invariant-load.  */
+	      if (inv_p && !bb_vinfo)
 		{
-		  int k;
-		  tree t = NULL_TREE;
-		  tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
-
-		  /* CHECKME: bitpos depends on endianess?  */
-		  bitpos = bitsize_zero_node;
-		  vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
-				    bitsize, bitpos);
-		  vec_dest =
-			vect_create_destination_var (scalar_dest, NULL_TREE);
-		  new_stmt = gimple_build_assign (vec_dest, vec_inv);
-                  new_temp = make_ssa_name (vec_dest, new_stmt);
-		  gimple_assign_set_lhs (new_stmt, new_temp);
-		  vect_finish_stmt_generation (stmt, new_stmt, gsi);
+		  gcc_assert (!strided_load);
+		  gcc_assert (nested_in_vect_loop_p (loop, stmt));
+		  if (j == 0)
+		    {
+		      int k;
+		      tree t = NULL_TREE;
+		      tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
+
+		      /* CHECKME: bitpos depends on endianess?  */
+		      bitpos = bitsize_zero_node;
+		      vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
+					bitsize, bitpos);
+		      vec_dest = vect_create_destination_var (scalar_dest,
+							      NULL_TREE);
+		      new_stmt = gimple_build_assign (vec_dest, vec_inv);
+		      new_temp = make_ssa_name (vec_dest, new_stmt);
+		      gimple_assign_set_lhs (new_stmt, new_temp);
+		      vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+		      for (k = nunits - 1; k >= 0; --k)
+			t = tree_cons (NULL_TREE, new_temp, t);
+		      /* FIXME: use build_constructor directly.  */
+		      vec_inv = build_constructor_from_list (vectype, t);
+		      new_temp = vect_init_vector (stmt, vec_inv,
+						   vectype, gsi);
+		      new_stmt = SSA_NAME_DEF_STMT (new_temp);
+		    }
+		  else
+		    gcc_unreachable (); /* FORNOW. */
+		}
 
-		  for (k = nunits - 1; k >= 0; --k)
-		    t = tree_cons (NULL_TREE, new_temp, t);
-		  /* FIXME: use build_constructor directly.  */
-		  vec_inv = build_constructor_from_list (vectype, t);
-		  new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
+	      if (negative)
+		{
+		  new_temp = reverse_vec_elements (new_temp, stmt, gsi);
 		  new_stmt = SSA_NAME_DEF_STMT (new_temp);
 		}
-	      else
-		gcc_unreachable (); /* FORNOW. */
-	    }
 
-	  if (negative)
-	    {
-	      new_temp = reverse_vec_elements (new_temp, stmt, gsi);
-	      new_stmt = SSA_NAME_DEF_STMT (new_temp);
+	      /* Collect vector loads and later create their permutation in
+		 vect_transform_strided_load ().  */
+	      if (strided_load || slp_perm)
+		VEC_quick_push (tree, dr_chain, new_temp);
+
+	      /* Store vector loads in the corresponding SLP_NODE.  */
+	      if (slp && !slp_perm)
+		VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+				new_stmt);
 	    }
-
-	  /* Collect vector loads and later create their permutation in
-	     vect_transform_strided_load ().  */
-          if (strided_load || slp_perm)
-            VEC_quick_push (tree, dr_chain, new_temp);
-
-         /* Store vector loads in the corresponding SLP_NODE.  */
-	  if (slp && !slp_perm)
-	    VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
 	}
 
       if (slp && !slp_perm)
@@ -4322,7 +4341,8 @@ vectorizable_load (gimple stmt, gimple_s
         {
           if (strided_load)
   	    {
-	      if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
+	      if (!vect_transform_strided_load (stmt, dr_chain,
+						group_size, gsi))
 	        return false;
 
 	      *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);

Index: gcc/tree-vect-stmts.c
===================================================================
--- gcc/tree-vect-stmts.c	2011-04-12 14:27:00.000000000 +0100
+++ gcc/tree-vect-stmts.c	2011-04-12 14:27:02.000000000 +0100
@@ -3308,6 +3308,7 @@ vectorizable_store (gimple stmt, gimple_
   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree elem_type;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   struct loop *loop = NULL;
   enum machine_mode vec_mode;
@@ -3383,7 +3384,8 @@ vectorizable_store (gimple stmt, gimple_
 
   /* The scalar rhs type needs to be trivially convertible to the vector
      component type.  This should always be the case.  */
-  if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
+  elem_type = TREE_TYPE (vectype);
+  if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
     {
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "???  operands of different types");
@@ -3608,6 +3610,8 @@ vectorizable_store (gimple stmt, gimple_
 		bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
 	}
 
+      if (1)
+	{
       if (strided_store)
 	{
 	  result_chain = VEC_alloc (tree, heap, group_size);
@@ -3624,8 +3628,8 @@ vectorizable_store (gimple stmt, gimple_
 
 	  if (i > 0)
 	    /* Bump the vector pointer.  */
-	    dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
-					   NULL_TREE);
+		dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+					       stmt, NULL_TREE);
 
 	  if (slp)
 	    vec_oprnd = VEC_index (tree, vec_oprnds, i);
@@ -3645,15 +3649,15 @@ vectorizable_store (gimple stmt, gimple_
 	    {
 	      TREE_TYPE (data_ref)
 		= build_aligned_type (TREE_TYPE (data_ref),
-				      TYPE_ALIGN (TREE_TYPE (vectype)));
-	      pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
+					  TYPE_ALIGN (elem_type));
+		  pi->align = TYPE_ALIGN_UNIT (elem_type);
 	      pi->misalign = 0;
 	    }
 	  else
 	    {
 	      TREE_TYPE (data_ref)
 		= build_aligned_type (TREE_TYPE (data_ref),
-				      TYPE_ALIGN (TREE_TYPE (vectype)));
+					  TYPE_ALIGN (elem_type));
 	      pi->misalign = DR_MISALIGNMENT (first_dr);
 	    }
 
@@ -3676,6 +3680,7 @@ vectorizable_store (gimple stmt, gimple_
 	    break;
 	}
     }
+    }
 
   VEC_free (tree, heap, dr_chain);
   VEC_free (tree, heap, oprnds);
@@ -3784,6 +3789,7 @@ vectorizable_load (gimple stmt, gimple_s
   bool nested_in_vect_loop = false;
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree elem_type;
   tree new_temp;
   enum machine_mode mode;
   gimple new_stmt = NULL;
@@ -3888,7 +3894,8 @@ vectorizable_load (gimple stmt, gimple_s
 
   /* The vector component type needs to be trivially convertible to the
      scalar lhs.  This should always be the case.  */
-  if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
+  elem_type = TREE_TYPE (vectype);
+  if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
     {
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "???  operands of different types");
@@ -4117,11 +4124,13 @@ vectorizable_load (gimple stmt, gimple_s
       if (strided_load || slp_perm)
 	dr_chain = VEC_alloc (tree, heap, vec_num);
 
+      if (1)
+	{
       for (i = 0; i < vec_num; i++)
 	{
 	  if (i > 0)
-	    dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
-					   NULL_TREE);
+		dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+					       stmt, NULL_TREE);
 
 	  /* 2. Create the vector-load in the loop.  */
 	  switch (alignment_support_scheme)
@@ -4145,15 +4154,15 @@ vectorizable_load (gimple stmt, gimple_s
 		  {
 		    TREE_TYPE (data_ref)
 		      = build_aligned_type (TREE_TYPE (data_ref),
-					    TYPE_ALIGN (TREE_TYPE (vectype)));
-		    pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
+						TYPE_ALIGN (elem_type));
+			pi->align = TYPE_ALIGN_UNIT (elem_type);
 		    pi->misalign = 0;
 		  }
 		else
 		  {
 		    TREE_TYPE (data_ref)
 		      = build_aligned_type (TREE_TYPE (data_ref),
-					    TYPE_ALIGN (TREE_TYPE (vectype)));
+						TYPE_ALIGN (elem_type));
 		    pi->misalign = DR_MISALIGNMENT (first_dr);
 		  }
 		break;
@@ -4161,7 +4170,9 @@ vectorizable_load (gimple stmt, gimple_s
 	    case dr_explicit_realign:
 	      {
 		tree ptr, bump;
-		tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+		    tree vs_minus_1;
+
+		    vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
 
 		if (compute_in_loop)
 		  msq = vect_setup_realignment (first_stmt, gsi,
@@ -4181,7 +4192,8 @@ vectorizable_load (gimple stmt, gimple_s
 		  = build2 (MEM_REF, vectype, ptr,
 			    build_int_cst (reference_alias_ptr_type
 					     (DR_REF (first_dr)), 0));
-		vec_dest = vect_create_destination_var (scalar_dest, vectype);
+		    vec_dest = vect_create_destination_var (scalar_dest,
+							    vectype);
 		new_stmt = gimple_build_assign (vec_dest, data_ref);
 		new_temp = make_ssa_name (vec_dest, new_stmt);
 		gimple_assign_set_lhs (new_stmt, new_temp);
@@ -4213,7 +4225,8 @@ vectorizable_load (gimple stmt, gimple_s
 			    build_int_cst
 			      (TREE_TYPE (dataref_ptr),
 			       -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
-	      new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+		  new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
+					    new_stmt);
 	      gimple_assign_set_lhs (new_stmt, new_temp);
 	      vect_finish_stmt_generation (stmt, new_stmt, gsi);
 	      data_ref
@@ -4231,8 +4244,9 @@ vectorizable_load (gimple stmt, gimple_s
 	  vect_finish_stmt_generation (stmt, new_stmt, gsi);
 	  mark_symbols_for_renaming (new_stmt);
 
-	  /* 3. Handle explicit realignment if necessary/supported.  Create in
-		loop: vec_dest = realign_load (msq, lsq, realignment_token)  */
+	      /* 3. Handle explicit realignment if necessary/supported.
+		 Create in loop:
+		   vec_dest = realign_load (msq, lsq, realignment_token)  */
 	  if (alignment_support_scheme == dr_explicit_realign_optimized
 	      || alignment_support_scheme == dr_explicit_realign)
 	    {
@@ -4241,8 +4255,9 @@ vectorizable_load (gimple stmt, gimple_s
 		realignment_token = dataref_ptr;
 	      vec_dest = vect_create_destination_var (scalar_dest, vectype);
 	      new_stmt
-		= gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR, vec_dest,
-						 msq, lsq, realignment_token);
+		    = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
+						     vec_dest, msq, lsq,
+						     realignment_token);
 	      new_temp = make_ssa_name (vec_dest, new_stmt);
 	      gimple_assign_set_lhs (new_stmt, new_temp);
 	      vect_finish_stmt_generation (stmt, new_stmt, gsi);
@@ -4251,7 +4266,8 @@ vectorizable_load (gimple stmt, gimple_s
 		{
 		  gcc_assert (phi);
 		  if (i == vec_num - 1 && j == ncopies - 1)
-		    add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
+			add_phi_arg (phi, lsq,
+				     loop_latch_edge (containing_loop),
 				 UNKNOWN_LOCATION);
 		  msq = lsq;
 		}
@@ -4272,8 +4288,8 @@ vectorizable_load (gimple stmt, gimple_s
 		  bitpos = bitsize_zero_node;
 		  vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
 				    bitsize, bitpos);
-		  vec_dest =
-			vect_create_destination_var (scalar_dest, NULL_TREE);
+		      vec_dest = vect_create_destination_var (scalar_dest,
+							      NULL_TREE);
 		  new_stmt = gimple_build_assign (vec_dest, vec_inv);
                   new_temp = make_ssa_name (vec_dest, new_stmt);
 		  gimple_assign_set_lhs (new_stmt, new_temp);
@@ -4283,7 +4299,8 @@ vectorizable_load (gimple stmt, gimple_s
 		    t = tree_cons (NULL_TREE, new_temp, t);
 		  /* FIXME: use build_constructor directly.  */
 		  vec_inv = build_constructor_from_list (vectype, t);
-		  new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
+		      new_temp = vect_init_vector (stmt, vec_inv,
+						   vectype, gsi);
 		  new_stmt = SSA_NAME_DEF_STMT (new_temp);
 		}
 	      else
@@ -4303,7 +4320,9 @@ vectorizable_load (gimple stmt, gimple_s
 
          /* Store vector loads in the corresponding SLP_NODE.  */
 	  if (slp && !slp_perm)
-	    VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+		VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+				new_stmt);
+	    }
 	}
 
       if (slp && !slp_perm)
@@ -4322,7 +4341,8 @@ vectorizable_load (gimple stmt, gimple_s
         {
           if (strided_load)
   	    {
-	      if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
+	      if (!vect_transform_strided_load (stmt, dr_chain,
+						group_size, gsi))
 	        return false;
 
 	      *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);

  parent reply	other threads:[~2011-04-12 13:28 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-04-12 13:21 [0/9] Direct support for loads and stores of interleaved vectors Richard Sandiford
2011-04-12 13:25 ` [1/9] Generalise vect_create_data_ref_ptr Richard Sandiford
2011-04-12 13:30   ` Richard Guenther
2011-04-12 13:28 ` Richard Sandiford [this message]
2011-04-12 13:33   ` [2/9] Reindent parts of vectorizable_load and vectorizable_store Richard Guenther
2011-04-12 14:39     ` Richard Sandiford
2011-04-12 13:40 ` [3/9] STMT_VINFO_RELATED_STMT handling in vectorizable_store Richard Sandiford
2011-04-17 10:25   ` Ira Rosen
2011-04-12 13:44 ` [4/9] Move power-of-two checks for interleaving Richard Sandiford
2011-04-12 13:57   ` Richard Guenther
2011-04-12 13:59 ` [5/9] Main target-independent support for direct interleaving Richard Sandiford
2011-04-17 14:26   ` Ira Rosen
2011-04-18 11:54   ` Richard Guenther
2011-04-18 11:57     ` Richard Sandiford
2011-04-18 12:54       ` Richard Guenther
2011-04-18 12:58         ` Richard Sandiford
2011-04-18 13:22           ` Richard Guenther
2011-04-12 14:01 ` [6/9] NEON vec_load_lanes and vec_store_lanes patterns Richard Sandiford
2011-04-15 13:20   ` Richard Earnshaw
2011-04-12 14:14 ` [7/9] Testsuite: remove vect_{extract_even_odd,strided}_wide Richard Sandiford
2011-04-15 12:43   ` Richard Guenther
2011-04-12 14:19 ` [8/9] Testsuite: split tests for strided accesses Richard Sandiford
2011-04-15 12:44   ` Richard Guenther
2011-04-12 14:29 ` [9/9] Testsuite: Replace vect_strided with vect_stridedN Richard Sandiford
2011-04-15 12:44   ` Richard Guenther
2011-04-12 14:34 ` [10/9] Add tests for stride-3 accesses Richard Sandiford
2011-04-15 12:45   ` Richard Guenther

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=g4hba3fvkn.fsf@linaro.org \
    --to=richard.sandiford@linaro.org \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=patches@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).