public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [gomp4] Generate sequential loop for OpenACC loop directive inside kernels
@ 2015-06-16  9:01 Chung-Lin Tang
  2015-06-16  9:05 ` Tom de Vries
  0 siblings, 1 reply; 3+ messages in thread
From: Chung-Lin Tang @ 2015-06-16  9:01 UTC (permalink / raw)
  To: gcc-patches; +Cc: Tom de Vries, Thomas Schwinge

[-- Attachment #1: Type: text/plain, Size: 928 bytes --]

This patch adjusts omp-low.c:expand_omp_for_generic() to expand to a "sequential"
loop form (without the OMP runtime calls), used for loop directives inside
OpenACC kernels constructs. Tom mentions that this allows the kernels parallelization
to work when '#pragma acc loop' makes the front-ends create OMP_FOR, which the
loop analysis phases don't understand.

Tested and committed to gomp-4_0-branch.

Chung-Lin

2015-06-16  Chung-Lin Tang  <cltang@codesourcery.com>

        * omp-low.c (struct omp_region): Add inside_kernels_p field.
        (expand_omp_for_generic): Adjust to generate a 'sequential' loop
        when GOMP builtin arguments are BUILT_IN_NONE.
        (expand_omp_for): Use expand_omp_for_generic() to generate a
        non-parallelized loop for OMP_FORs inside OpenACC kernels regions.
        (expand_omp): Mark inside_kernels_p field true for regions
        nested inside OpenACC kernels constructs.

[-- Attachment #2: kernels-acc-loop-seq.patch --]
[-- Type: text/x-patch, Size: 6700 bytes --]

Index: omp-low.c
===================================================================
--- omp-low.c	(revision 224475)
+++ omp-low.c	(working copy)
@@ -161,6 +161,9 @@ struct omp_region
   /* True if this is a combined parallel+workshare region.  */
   bool is_combined_parallel;
 
+  /* True if this is nested inside an OpenACC kernels construct.  */
+  bool inside_kernels_p;
+
   /* For an OpenACC loop, the level of parallelism requested.  */
   int gwv_this;
 
@@ -6734,6 +6737,7 @@ expand_omp_for_generic (struct omp_region *region,
   gassign *assign_stmt;
   bool in_combined_parallel = is_combined_parallel (region);
   bool broken_loop = region->cont == NULL;
+  bool seq_loop = (!start_fn || !next_fn);
   edge e, ne;
   tree *counts = NULL;
   int i;
@@ -6821,8 +6825,21 @@ expand_omp_for_generic (struct omp_region *region,
 							    zero_iter_bb));
 	}
     }
-  if (in_combined_parallel)
+  if (seq_loop)
     {
+      tree n1 = fold_convert (fd->iter_type, fd->loop.n1);
+      tree n2 = fold_convert (fd->iter_type, fd->loop.n2);
+
+      assign_stmt = gimple_build_assign (istart0, n1);
+      gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
+
+      assign_stmt = gimple_build_assign (iend0, n2);
+      gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
+
+      t = fold_build2 (NE_EXPR, boolean_type_node, istart0, iend0);
+    }
+  else if (in_combined_parallel)
+    {
       /* In a combined parallel loop, emit a call to
 	 GOMP_loop_foo_next.  */
       t = build_call_expr (builtin_decl_explicit (next_fn), 2,
@@ -7007,32 +7024,38 @@ expand_omp_for_generic (struct omp_region *region,
 	collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
 
       /* Emit code to get the next parallel iteration in L2_BB.  */
-      gsi = gsi_start_bb (l2_bb);
+      if (!seq_loop)
+	{
+	  gsi = gsi_start_bb (l2_bb);
 
-      t = build_call_expr (builtin_decl_explicit (next_fn), 2,
-			   build_fold_addr_expr (istart0),
-			   build_fold_addr_expr (iend0));
-      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
-				    false, GSI_CONTINUE_LINKING);
-      if (TREE_TYPE (t) != boolean_type_node)
-	t = fold_build2 (NE_EXPR, boolean_type_node,
-			 t, build_int_cst (TREE_TYPE (t), 0));
-      gcond *cond_stmt = gimple_build_cond_empty (t);
-      gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
+	  t = build_call_expr (builtin_decl_explicit (next_fn), 2,
+			       build_fold_addr_expr (istart0),
+			       build_fold_addr_expr (iend0));
+	  t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
+					false, GSI_CONTINUE_LINKING);
+	  if (TREE_TYPE (t) != boolean_type_node)
+	    t = fold_build2 (NE_EXPR, boolean_type_node,
+			     t, build_int_cst (TREE_TYPE (t), 0));
+	  gcond *cond_stmt = gimple_build_cond_empty (t);
+	  gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
+	}
     }
 
   /* Add the loop cleanup function.  */
   gsi = gsi_last_bb (exit_bb);
-  if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
-    t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
-  else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
-    t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
-  else
-    t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
-  gcall *call_stmt = gimple_build_call (t, 0);
-  if (gimple_omp_return_lhs (gsi_stmt (gsi)))
-    gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
-  gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
+  if (!seq_loop)
+    {
+      if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
+	t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
+      else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
+	t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
+      else
+	t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
+      gcall *call_stmt = gimple_build_call (t, 0);
+      if (gimple_omp_return_lhs (gsi_stmt (gsi)))
+	gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
+      gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
+    }
   gsi_remove (&gsi, true);
 
   /* Connect the new blocks.  */
@@ -7044,7 +7067,7 @@ expand_omp_for_generic (struct omp_region *region,
       gimple_seq phis;
 
       e = find_edge (cont_bb, l3_bb);
-      ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
+      ne = make_edge (l2_bb, l3_bb, seq_loop ? EDGE_FALLTHRU : EDGE_FALSE_VALUE);
 
       phis = phi_nodes (l3_bb);
       for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -7080,7 +7103,8 @@ expand_omp_for_generic (struct omp_region *region,
 	  e = find_edge (cont_bb, l2_bb);
 	  e->flags = EDGE_FALLTHRU;
 	}
-      make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
+      if (!seq_loop)
+	make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
 
       set_immediate_dominator (CDI_DOMINATORS, l2_bb,
 			       recompute_dominator (CDI_DOMINATORS, l2_bb));
@@ -7091,10 +7115,16 @@ expand_omp_for_generic (struct omp_region *region,
       set_immediate_dominator (CDI_DOMINATORS, l1_bb,
 			       recompute_dominator (CDI_DOMINATORS, l1_bb));
 
-      struct loop *outer_loop = alloc_loop ();
-      outer_loop->header = l0_bb;
-      outer_loop->latch = l2_bb;
-      add_loop (outer_loop, l0_bb->loop_father);
+      struct loop *outer_loop;
+      if (seq_loop)
+	outer_loop = l0_bb->loop_father;
+      else
+	{
+	  outer_loop = alloc_loop ();
+	  outer_loop->header = l0_bb;
+	  outer_loop->latch = l2_bb;
+	  add_loop (outer_loop, l0_bb->loop_father);
+	}
 
       if (!gimple_omp_for_combined_p (fd->for_stmt))
 	{
@@ -8552,7 +8582,10 @@ expand_omp_for (struct omp_region *region, gimple
        original loops from being detected.  Fix that up.  */
     loops_state_set (LOOPS_NEED_FIXUP);
 
-  if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
+  if (region->inside_kernels_p)
+    expand_omp_for_generic (region, &fd, BUILT_IN_NONE, BUILT_IN_NONE,
+			    inner_stmt);
+  else if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
     expand_omp_simd (region, &fd);
   else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
     expand_cilk_for (region, &fd);
@@ -10144,6 +10177,14 @@ expand_omp (struct omp_region *region)
       if (region->type == GIMPLE_OMP_PARALLEL)
 	determine_parallel_type (region);
 
+      if (region->type == GIMPLE_OMP_TARGET && region->inner)
+	{
+	  gomp_target *entry = as_a <gomp_target *> (last_stmt (region->entry));
+	  if (region->inside_kernels_p
+	      || gimple_omp_target_kind (entry) == GF_OMP_TARGET_KIND_OACC_KERNELS)
+	    region->inner->inside_kernels_p = true;
+	}
+
       if (region->type == GIMPLE_OMP_FOR
 	  && gimple_omp_for_combined_p (last_stmt (region->entry)))
 	inner_stmt = last_stmt (region->inner->entry);

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [gomp4] Generate sequential loop for OpenACC loop directive inside kernels
  2015-06-16  9:01 [gomp4] Generate sequential loop for OpenACC loop directive inside kernels Chung-Lin Tang
@ 2015-06-16  9:05 ` Tom de Vries
  2015-06-23 12:56   ` Chung-Lin Tang
  0 siblings, 1 reply; 3+ messages in thread
From: Tom de Vries @ 2015-06-16  9:05 UTC (permalink / raw)
  To: Chung-Lin Tang, gcc-patches; +Cc: Tom de Vries, Thomas Schwinge

On 16/06/15 10:59, Chung-Lin Tang wrote:
> This patch adjusts omp-low.c:expand_omp_for_generic() to expand to a "sequential"
> loop form (without the OMP runtime calls), used for loop directives inside
> OpenACC kernels constructs. Tom mentions that this allows the kernels parallelization
> to work when '#pragma acc loop' makes the front-ends create OMP_FOR, which the
> loop analysis phases don't understand.
>
> Tested and committed to gomp-4_0-branch.
>

Hi Chung-Lin,

can you commit a test-case to exercise the code?

Thanks,
- Tom

> Chung-Lin
>
> 2015-06-16  Chung-Lin Tang  <cltang@codesourcery.com>
>
>          * omp-low.c (struct omp_region): Add inside_kernels_p field.
>          (expand_omp_for_generic): Adjust to generate a 'sequential' loop
>          when GOMP builtin arguments are BUILT_IN_NONE.
>          (expand_omp_for): Use expand_omp_for_generic() to generate a
>          non-parallelized loop for OMP_FORs inside OpenACC kernels regions.
>          (expand_omp): Mark inside_kernels_p field true for regions
>          nested inside OpenACC kernels constructs.
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [gomp4] Generate sequential loop for OpenACC loop directive inside kernels
  2015-06-16  9:05 ` Tom de Vries
@ 2015-06-23 12:56   ` Chung-Lin Tang
  0 siblings, 0 replies; 3+ messages in thread
From: Chung-Lin Tang @ 2015-06-23 12:56 UTC (permalink / raw)
  To: Tom de Vries, gcc-patches; +Cc: Tom de Vries, Thomas Schwinge

[-- Attachment #1: Type: text/plain, Size: 1340 bytes --]

On 2015/6/16 05:05 PM, Tom de Vries wrote:
> On 16/06/15 10:59, Chung-Lin Tang wrote:
>> This patch adjusts omp-low.c:expand_omp_for_generic() to expand to a "sequential"
>> loop form (without the OMP runtime calls), used for loop directives inside
>> OpenACC kernels constructs. Tom mentions that this allows the kernels parallelization
>> to work when '#pragma acc loop' makes the front-ends create OMP_FOR, which the
>> loop analysis phases don't understand.
>>
>> Tested and committed to gomp-4_0-branch.
>>
> 
> Hi Chung-Lin,
> 
> can you commit a test-case to exercise the code?
> 
> Thanks,
> - Tom

Just committed the attached testcase patch to gomp-4_0-branch.

Chung-Lin

2015-06-23  Chung-Lin Tang  <cltang@codesourcery.com>

	gcc/testsuite/
        * c-c++-common/goacc/kernels-loop.c (ACC_LOOP): Add #ifndef/#define.
        (main): Tag loops inside kernels construct with '#pragma ACC_LOOP'.
        * c-c++-common/goacc/kernels-loop-2.c: Likewise.
        * c-c++-common/goacc/kernels-loop-3.c: Likewise.
        * c-c++-common/goacc/kernels-loop-n.c: Likewise.
        * c-c++-common/goacc/kernels-loop-acc-loop.c: New test.
        * c-c++-common/goacc/kernels-loop-2-acc-loop.c: New test.
        * c-c++-common/goacc/kernels-loop-3-acc-loop.c: New test.
        * c-c++-common/goacc/kernels-loop-n-acc-loop.c: New test.


[-- Attachment #2: kernels-acc-loop.patch --]
[-- Type: text/x-patch, Size: 8286 bytes --]

Index: gcc/testsuite/c-c++-common/goacc/kernels-loop-3-acc-loop.c
===================================================================
--- gcc/testsuite/c-c++-common/goacc/kernels-loop-3-acc-loop.c	(revision 0)
+++ gcc/testsuite/c-c++-common/goacc/kernels-loop-3-acc-loop.c	(revision 0)
@@ -0,0 +1,20 @@
+/* { dg-additional-options "-O2" } */
+/* { dg-additional-options "-ftree-parallelize-loops=32" } */
+/* { dg-additional-options "-fdump-tree-parloops_oacc_kernels-all" } */
+/* { dg-additional-options "-fdump-tree-optimized" } */
+
+/* Check that loops with '#pragma acc loop' tagged gets properly parallelized.  */
+#define ACC_LOOP acc loop
+#include "kernels-loop-3.c"
+
+/* Check that only one loop is analyzed, and that it can be parallelized.  */
+/* { dg-final { scan-tree-dump-times "SUCCESS: may be parallelized" 1 "parloops_oacc_kernels" } } */
+/* { dg-final { scan-tree-dump-not "FAILED:" "parloops_oacc_kernels" } } */
+
+/* Check that the loop has been split off into a function.  */
+/* { dg-final { scan-tree-dump-times "(?n);; Function .*main._omp_fn.0" 1 "optimized" } } */
+
+/* { dg-final { scan-tree-dump-times "(?n)pragma omp target oacc_parallel.*num_gangs\\(32\\)" 1 "parloops_oacc_kernels" } } */
+
+/* { dg-final { cleanup-tree-dump "parloops_oacc_kernels" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
Index: gcc/testsuite/c-c++-common/goacc/kernels-loop-2.c
===================================================================
--- gcc/testsuite/c-c++-common/goacc/kernels-loop-2.c	(revision 224836)
+++ gcc/testsuite/c-c++-common/goacc/kernels-loop-2.c	(working copy)
@@ -8,6 +8,10 @@
 #define N (1024 * 512)
 #define COUNTERTYPE unsigned int
 
+#ifndef ACC_LOOP
+#define ACC_LOOP
+#endif
+
 int
 main (void)
 {
@@ -21,18 +25,21 @@ main (void)
 
 #pragma acc kernels copyout (a[0:N])
   {
+    #pragma ACC_LOOP
     for (COUNTERTYPE i = 0; i < N; i++)
       a[i] = i * 2;
   }
 
 #pragma acc kernels copyout (b[0:N])
   {
+    #pragma ACC_LOOP
     for (COUNTERTYPE i = 0; i < N; i++)
       b[i] = i * 4;
   }
 
 #pragma acc kernels copyin (a[0:N], b[0:N]) copyout (c[0:N])
   {
+    #pragma ACC_LOOP
     for (COUNTERTYPE ii = 0; ii < N; ii++)
       c[ii] = a[ii] + b[ii];
   }
Index: gcc/testsuite/c-c++-common/goacc/kernels-loop.c
===================================================================
--- gcc/testsuite/c-c++-common/goacc/kernels-loop.c	(revision 224836)
+++ gcc/testsuite/c-c++-common/goacc/kernels-loop.c	(working copy)
@@ -8,6 +8,10 @@
 #define N (1024 * 512)
 #define COUNTERTYPE unsigned int
 
+#ifndef ACC_LOOP
+#define ACC_LOOP
+#endif
+
 int
 main (void)
 {
@@ -27,6 +31,7 @@ main (void)
 
 #pragma acc kernels copyin (a[0:N], b[0:N]) copyout (c[0:N])
   {
+    #pragma ACC_LOOP
     for (COUNTERTYPE ii = 0; ii < N; ii++)
       c[ii] = a[ii] + b[ii];
   }
Index: gcc/testsuite/c-c++-common/goacc/kernels-loop-2-acc-loop.c
===================================================================
--- gcc/testsuite/c-c++-common/goacc/kernels-loop-2-acc-loop.c	(revision 0)
+++ gcc/testsuite/c-c++-common/goacc/kernels-loop-2-acc-loop.c	(revision 0)
@@ -0,0 +1,23 @@
+/* { dg-additional-options "-O2" } */
+/* { dg-additional-options "-ftree-parallelize-loops=32" } */
+/* { dg-additional-options "-fdump-tree-parloops_oacc_kernels-all" } */
+/* { dg-additional-options "-fdump-tree-optimized" } */
+
+/* Check that loops with '#pragma acc loop' tagged gets properly parallelized.  */
+#define ACC_LOOP acc loop
+#include "kernels-loop-2.c"
+
+/* Check that only three loops are analyzed, and that all can be
+   parallelized.  */
+/* { dg-final { scan-tree-dump-times "SUCCESS: may be parallelized" 3 "parloops_oacc_kernels" } } */
+/* { dg-final { scan-tree-dump-not "FAILED:" "parloops_oacc_kernels" } } */
+
+/* Check that the loop has been split off into a function.  */
+/* { dg-final { scan-tree-dump-times "(?n);; Function .*main._omp_fn.0" 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "(?n);; Function .*main._omp_fn.1" 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "(?n);; Function .*main._omp_fn.2" 1 "optimized" } } */
+
+/* { dg-final { scan-tree-dump-times "(?n)pragma omp target oacc_parallel.*num_gangs\\(32\\)" 3 "parloops_oacc_kernels" } } */
+
+/* { dg-final { cleanup-tree-dump "parloops_oacc_kernels" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
Index: gcc/testsuite/c-c++-common/goacc/kernels-loop-n.c
===================================================================
--- gcc/testsuite/c-c++-common/goacc/kernels-loop-n.c	(revision 224836)
+++ gcc/testsuite/c-c++-common/goacc/kernels-loop-n.c	(working copy)
@@ -8,6 +8,10 @@
 #define N ((1024 * 512) + 1)
 #define COUNTERTYPE unsigned int
 
+#ifndef ACC_LOOP
+#define ACC_LOOP
+#endif
+
 int
 foo (COUNTERTYPE n)
 {
@@ -27,6 +31,7 @@ foo (COUNTERTYPE n)
 
 #pragma acc kernels copyin (a[0:n], b[0:n]) copyout (c[0:n])
   {
+    #pragma ACC_LOOP
     for (COUNTERTYPE ii = 0; ii < n; ii++)
       c[ii] = a[ii] + b[ii];
   }
Index: gcc/testsuite/c-c++-common/goacc/kernels-loop-acc-loop.c
===================================================================
--- gcc/testsuite/c-c++-common/goacc/kernels-loop-acc-loop.c	(revision 0)
+++ gcc/testsuite/c-c++-common/goacc/kernels-loop-acc-loop.c	(revision 0)
@@ -0,0 +1,20 @@
+/* { dg-additional-options "-O2" } */
+/* { dg-additional-options "-ftree-parallelize-loops=32" } */
+/* { dg-additional-options "-fdump-tree-parloops_oacc_kernels-all" } */
+/* { dg-additional-options "-fdump-tree-optimized" } */
+
+/* Check that loops with '#pragma acc loop' tagged gets properly parallelized.  */
+#define ACC_LOOP acc loop
+#include "kernels-loop.c"
+
+/* Check that only one loop is analyzed, and that it can be parallelized.  */
+/* { dg-final { scan-tree-dump-times "SUCCESS: may be parallelized" 1 "parloops_oacc_kernels" } } */
+/* { dg-final { scan-tree-dump-not "FAILED:" "parloops_oacc_kernels" } } */
+
+/* Check that the loop has been split off into a function.  */
+/* { dg-final { scan-tree-dump-times "(?n);; Function .*main._omp_fn.0" 1 "optimized" } } */
+
+/* { dg-final { scan-tree-dump-times "(?n)pragma omp target oacc_parallel.*num_gangs\\(32\\)" 1 "parloops_oacc_kernels" } } */
+
+/* { dg-final { cleanup-tree-dump "parloops_oacc_kernels" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
Index: gcc/testsuite/c-c++-common/goacc/kernels-loop-3.c
===================================================================
--- gcc/testsuite/c-c++-common/goacc/kernels-loop-3.c	(revision 224836)
+++ gcc/testsuite/c-c++-common/goacc/kernels-loop-3.c	(working copy)
@@ -8,6 +8,10 @@
 #define N (1024 * 512)
 #define COUNTERTYPE unsigned int
 
+#ifndef ACC_LOOP
+#define ACC_LOOP
+#endif
+
 int
 main (void)
 {
@@ -22,6 +26,7 @@ main (void)
 
 #pragma acc kernels copy (c[0:N])
   {
+    #pragma ACC_LOOP
     for (COUNTERTYPE ii = 0; ii < N; ii++)
       c[ii] = c[ii] + ii + 1;
   }
Index: gcc/testsuite/c-c++-common/goacc/kernels-loop-n-acc-loop.c
===================================================================
--- gcc/testsuite/c-c++-common/goacc/kernels-loop-n-acc-loop.c	(revision 0)
+++ gcc/testsuite/c-c++-common/goacc/kernels-loop-n-acc-loop.c	(revision 0)
@@ -0,0 +1,20 @@
+/* { dg-additional-options "-O2" } */
+/* { dg-additional-options "-ftree-parallelize-loops=32" } */
+/* { dg-additional-options "-fdump-tree-parloops_oacc_kernels-all" } */
+/* { dg-additional-options "-fdump-tree-optimized" } */
+
+/* Check that loops with '#pragma acc loop' tagged gets properly parallelized.  */
+#define ACC_LOOP acc loop
+#include "kernels-loop-n.c"
+
+/* Check that only one loop is analyzed, and that it can be parallelized.  */
+/* { dg-final { scan-tree-dump-times "SUCCESS: may be parallelized" 1 "parloops_oacc_kernels" } } */
+/* { dg-final { scan-tree-dump-not "FAILED:" "parloops_oacc_kernels" } } */
+
+/* Check that the loop has been split off into a function.  */
+/* { dg-final { scan-tree-dump-times "(?n);; Function .*foo.*._omp_fn.0" 1 "optimized" } } */
+
+/* { dg-final { scan-tree-dump-times "(?n)pragma omp target oacc_parallel.*num_gangs\\(32\\)" 1 "parloops_oacc_kernels" } } */
+
+/* { dg-final { cleanup-tree-dump "parloops_oacc_kernels" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2015-06-23 12:55 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-06-16  9:01 [gomp4] Generate sequential loop for OpenACC loop directive inside kernels Chung-Lin Tang
2015-06-16  9:05 ` Tom de Vries
2015-06-23 12:56   ` Chung-Lin Tang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).