public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc/devel/omp/gcc-11] openmp: Add omp_aligned_{, c}alloc and omp_{c,  re}alloc
@ 2021-09-30  7:51 Tobias Burnus
  0 siblings, 0 replies; only message in thread
From: Tobias Burnus @ 2021-09-30  7:51 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:9974875e87ef3f5726edcc262d160c39ca13d776

commit 9974875e87ef3f5726edcc262d160c39ca13d776
Author: Jakub Jelinek <jakub@redhat.com>
Date:   Thu Sep 30 09:50:07 2021 +0200

    openmp: Add omp_aligned_{,c}alloc and omp_{c,re}alloc
    
    This patch adds new OpenMP 5.1 allocator entrypoints and in addition to that
    fixes an omp_alloc bug which is hard to test for - if the first allocator
    fails but has a larger alignment trait and has a fallback allocator, either
    the default behavior or a user fallback, then the extra alignment will be used
    even in the fallback allocation, rather than just starting with whatever
    alignment has been requested (in GOMP_alloc or the minimum one in omp_alloc).
    
    Jonathan's comment on IRC this morning made me realize that I should add
    alloc_align attributes to 2 of the prototypes and I still need to add testsuite
    coverage for omp_realloc, will do that in a follow-up.
    
    2021-09-30  Jakub Jelinek  <jakub@redhat.com>
    
            * omp.h.in (omp_aligned_alloc, omp_calloc, omp_aligned_calloc,
            omp_realloc): New prototypes.
            (omp_alloc): Move after omp_free prototype, add __malloc__ (omp_free)
            attribute.
            * allocator.c: Include string.h.
            (omp_aligned_alloc): No longer static, add ialias.  Add new_alignment
            variable and use it instead of alignment so that when retrying the old
            alignment is used again.  Don't retry if new alignment is the same
            as old alignment, unless allocator had pool size.
            (omp_alloc, GOMP_alloc, GOMP_free): Use ialias_call.
            (omp_aligned_calloc, omp_calloc, omp_realloc): New functions.
            * libgomp.map (OMP_5.0.2): Export omp_aligned_alloc, omp_calloc,
            omp_aligned_calloc and omp_realloc.
            * testsuite/libgomp.c-c++-common/alloc-4.c (main): Add
            omp_aligned_alloc, omp_calloc and omp_aligned_calloc tests.
            * testsuite/libgomp.c-c++-common/alloc-5.c: New test.
            * testsuite/libgomp.c-c++-common/alloc-6.c: New test.
            * testsuite/libgomp.c-c++-common/alloc-7.c: New test.
            * testsuite/libgomp.c-c++-common/alloc-8.c: New test.
    
    (cherry picked from commit b38a4bd10249b5070ea1f4708a0fd228df268c26)

Diff:
---
 libgomp/ChangeLog.omp                            |  25 ++
 libgomp/allocator.c                              | 398 ++++++++++++++++++++++-
 libgomp/libgomp.map                              |   4 +
 libgomp/omp.h.in                                 |  25 +-
 libgomp/testsuite/libgomp.c-c++-common/alloc-4.c |  20 +-
 libgomp/testsuite/libgomp.c-c++-common/alloc-5.c | 159 +++++++++
 libgomp/testsuite/libgomp.c-c++-common/alloc-6.c |  58 ++++
 libgomp/testsuite/libgomp.c-c++-common/alloc-7.c | 182 +++++++++++
 libgomp/testsuite/libgomp.c-c++-common/alloc-8.c | 184 +++++++++++
 9 files changed, 1035 insertions(+), 20 deletions(-)

diff --git a/libgomp/ChangeLog.omp b/libgomp/ChangeLog.omp
index 84fe3eb676c..692bef056db 100644
--- a/libgomp/ChangeLog.omp
+++ b/libgomp/ChangeLog.omp
@@ -1,3 +1,28 @@
+2021-09-30  Tobias Burnus  <tobias@codesourcery.com>
+
+	Backported from master:
+	2021-09-30  Jakub Jelinek  <jakub@redhat.com>
+
+	* omp.h.in (omp_aligned_alloc, omp_calloc, omp_aligned_calloc,
+	omp_realloc): New prototypes.
+	(omp_alloc): Move after omp_free prototype, add __malloc__ (omp_free)
+	attribute.
+	* allocator.c: Include string.h.
+	(omp_aligned_alloc): No longer static, add ialias.  Add new_alignment
+	variable and use it instead of alignment so that when retrying the old
+	alignment is used again.  Don't retry if new alignment is the same
+	as old alignment, unless allocator had pool size.
+	(omp_alloc, GOMP_alloc, GOMP_free): Use ialias_call.
+	(omp_aligned_calloc, omp_calloc, omp_realloc): New functions.
+	* libgomp.map (OMP_5.0.2): Export omp_aligned_alloc, omp_calloc,
+	omp_aligned_calloc and omp_realloc.
+	* testsuite/libgomp.c-c++-common/alloc-4.c (main): Add
+	omp_aligned_alloc, omp_calloc and omp_aligned_calloc tests.
+	* testsuite/libgomp.c-c++-common/alloc-5.c: New test.
+	* testsuite/libgomp.c-c++-common/alloc-6.c: New test.
+	* testsuite/libgomp.c-c++-common/alloc-7.c: New test.
+	* testsuite/libgomp.c-c++-common/alloc-8.c: New test.
+
 2021-09-27  Tobias Burnus  <tobias@codesourcery.com>
 
 	Backported from master:
diff --git a/libgomp/allocator.c b/libgomp/allocator.c
index 9fcfc4ea5c6..dce600f5bd7 100644
--- a/libgomp/allocator.c
+++ b/libgomp/allocator.c
@@ -30,6 +30,7 @@
 #define _GNU_SOURCE
 #include "libgomp.h"
 #include <stdlib.h>
+#include <string.h>
 
 #define omp_max_predefined_alloc omp_thread_mem_alloc
 
@@ -205,18 +206,19 @@ omp_destroy_allocator (omp_allocator_handle_t allocator)
 ialias (omp_init_allocator)
 ialias (omp_destroy_allocator)
 
-static void *
+void *
 omp_aligned_alloc (size_t alignment, size_t size,
 		   omp_allocator_handle_t allocator)
 {
   struct omp_allocator_data *allocator_data;
-  size_t new_size;
+  size_t new_size, new_alignment;
   void *ptr, *ret;
 
   if (__builtin_expect (size == 0, 0))
     return NULL;
 
 retry:
+  new_alignment = alignment;
   if (allocator == omp_null_allocator)
     {
       struct gomp_thread *thr = gomp_thread ();
@@ -228,19 +230,19 @@ retry:
   if (allocator > omp_max_predefined_alloc)
     {
       allocator_data = (struct omp_allocator_data *) allocator;
-      if (alignment < allocator_data->alignment)
-	alignment = allocator_data->alignment;
+      if (new_alignment < allocator_data->alignment)
+	new_alignment = allocator_data->alignment;
     }
   else
     {
       allocator_data = NULL;
-      if (alignment < sizeof (void *))
-	alignment = sizeof (void *);
+      if (new_alignment < sizeof (void *))
+	new_alignment = sizeof (void *);
     }
 
   new_size = sizeof (struct omp_mem_header);
-  if (alignment > sizeof (void *))
-    new_size += alignment - sizeof (void *);
+  if (new_alignment > sizeof (void *))
+    new_size += new_alignment - sizeof (void *);
   if (__builtin_add_overflow (size, new_size, &new_size))
     goto fail;
 
@@ -300,10 +302,11 @@ retry:
 	goto fail;
     }
 
-  if (alignment > sizeof (void *))
+  if (new_alignment > sizeof (void *))
     ret = (void *) (((uintptr_t) ptr
 		     + sizeof (struct omp_mem_header)
-		     + alignment - sizeof (void *)) & ~(alignment - 1));
+		     + new_alignment - sizeof (void *))
+		    & ~(new_alignment - 1));
   else
     ret = (char *) ptr + sizeof (struct omp_mem_header);
   ((struct omp_mem_header *) ret)[-1].ptr = ptr;
@@ -317,7 +320,7 @@ fail:
       switch (allocator_data->fallback)
 	{
 	case omp_atv_default_mem_fb:
-	  if (alignment > sizeof (void *)
+	  if ((new_alignment > sizeof (void *) && new_alignment > alignment)
 	      || (allocator_data
 		  && allocator_data->pool_size < ~(uintptr_t) 0))
 	    {
@@ -326,7 +329,7 @@ fail:
 	    }
 	  /* Otherwise, we've already performed default mem allocation
 	     and if that failed, it won't succeed again (unless it was
-	     intermitent.  Return NULL then, as that is the fallback.  */
+	     intermittent.  Return NULL then, as that is the fallback.  */
 	  break;
 	case omp_atv_null_fb:
 	  break;
@@ -342,10 +345,12 @@ fail:
   return NULL;
 }
 
+ialias (omp_aligned_alloc)
+
 void *
 omp_alloc (size_t size, omp_allocator_handle_t allocator)
 {
-  return omp_aligned_alloc (1, size, allocator);
+  return ialias_call (omp_aligned_alloc) (1, size, allocator);
 }
 
 /* Like omp_aligned_alloc, but apply on top of that:
@@ -355,8 +360,9 @@ omp_alloc (size_t size, omp_allocator_handle_t allocator)
 void *
 GOMP_alloc (size_t alignment, size_t size, uintptr_t allocator)
 {
-  void *ret = omp_aligned_alloc (alignment, size,
-				 (omp_allocator_handle_t) allocator);
+  void *ret
+    = ialias_call (omp_aligned_alloc) (alignment, size,
+				       (omp_allocator_handle_t) allocator);
   if (__builtin_expect (ret == NULL, 0) && size)
     gomp_fatal ("Out of memory allocating %lu bytes",
 		(unsigned long) size);
@@ -396,5 +402,365 @@ ialias (omp_free)
 void
 GOMP_free (void *ptr, uintptr_t allocator)
 {
-  return omp_free (ptr, (omp_allocator_handle_t) allocator);
+  return ialias_call (omp_free) (ptr, (omp_allocator_handle_t) allocator);
+}
+
+void *
+omp_aligned_calloc (size_t alignment, size_t nmemb, size_t size,
+		    omp_allocator_handle_t allocator)
+{
+  struct omp_allocator_data *allocator_data;
+  size_t new_size, size_temp, new_alignment;
+  void *ptr, *ret;
+
+  if (__builtin_expect (size == 0 || nmemb == 0, 0))
+    return NULL;
+
+retry:
+  new_alignment = alignment;
+  if (allocator == omp_null_allocator)
+    {
+      struct gomp_thread *thr = gomp_thread ();
+      if (thr->ts.def_allocator == omp_null_allocator)
+	thr->ts.def_allocator = gomp_def_allocator;
+      allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
+    }
+
+  if (allocator > omp_max_predefined_alloc)
+    {
+      allocator_data = (struct omp_allocator_data *) allocator;
+      if (new_alignment < allocator_data->alignment)
+	new_alignment = allocator_data->alignment;
+    }
+  else
+    {
+      allocator_data = NULL;
+      if (new_alignment < sizeof (void *))
+	new_alignment = sizeof (void *);
+    }
+
+  new_size = sizeof (struct omp_mem_header);
+  if (new_alignment > sizeof (void *))
+    new_size += new_alignment - sizeof (void *);
+  if (__builtin_mul_overflow (size, nmemb, &size_temp))
+    goto fail;
+  if (__builtin_add_overflow (size_temp, new_size, &new_size))
+    goto fail;
+
+  if (__builtin_expect (allocator_data
+			&& allocator_data->pool_size < ~(uintptr_t) 0, 0))
+    {
+      uintptr_t used_pool_size;
+      if (new_size > allocator_data->pool_size)
+	goto fail;
+#ifdef HAVE_SYNC_BUILTINS
+      used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
+					MEMMODEL_RELAXED);
+      do
+	{
+	  uintptr_t new_pool_size;
+	  if (__builtin_add_overflow (used_pool_size, new_size,
+				      &new_pool_size)
+	      || new_pool_size > allocator_data->pool_size)
+	    goto fail;
+	  if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
+					   &used_pool_size, new_pool_size,
+					   true, MEMMODEL_RELAXED,
+					   MEMMODEL_RELAXED))
+	    break;
+	}
+      while (1);
+#else
+      gomp_mutex_lock (&allocator_data->lock);
+      if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
+				  &used_pool_size)
+	  || used_pool_size > allocator_data->pool_size)
+	{
+	  gomp_mutex_unlock (&allocator_data->lock);
+	  goto fail;
+	}
+      allocator_data->used_pool_size = used_pool_size;
+      gomp_mutex_unlock (&allocator_data->lock);
+#endif
+      ptr = calloc (1, new_size);
+      if (ptr == NULL)
+	{
+#ifdef HAVE_SYNC_BUILTINS
+	  __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
+			      MEMMODEL_RELAXED);
+#else
+	  gomp_mutex_lock (&allocator_data->lock);
+	  allocator_data->used_pool_size -= new_size;
+	  gomp_mutex_unlock (&allocator_data->lock);
+#endif
+	  goto fail;
+	}
+    }
+  else
+    {
+      ptr = calloc (1, new_size);
+      if (ptr == NULL)
+	goto fail;
+    }
+
+  if (new_alignment > sizeof (void *))
+    ret = (void *) (((uintptr_t) ptr
+		     + sizeof (struct omp_mem_header)
+		     + new_alignment - sizeof (void *))
+		    & ~(new_alignment - 1));
+  else
+    ret = (char *) ptr + sizeof (struct omp_mem_header);
+  ((struct omp_mem_header *) ret)[-1].ptr = ptr;
+  ((struct omp_mem_header *) ret)[-1].size = new_size;
+  ((struct omp_mem_header *) ret)[-1].allocator = allocator;
+  return ret;
+
+fail:
+  if (allocator_data)
+    {
+      switch (allocator_data->fallback)
+	{
+	case omp_atv_default_mem_fb:
+	  if ((new_alignment > sizeof (void *) && new_alignment > alignment)
+	      || (allocator_data
+		  && allocator_data->pool_size < ~(uintptr_t) 0))
+	    {
+	      allocator = omp_default_mem_alloc;
+	      goto retry;
+	    }
+	  /* Otherwise, we've already performed default mem allocation
+	     and if that failed, it won't succeed again (unless it was
+	     intermittent.  Return NULL then, as that is the fallback.  */
+	  break;
+	case omp_atv_null_fb:
+	  break;
+	default:
+	case omp_atv_abort_fb:
+	  gomp_fatal ("Out of memory allocating %lu bytes",
+		      (unsigned long) (size * nmemb));
+	case omp_atv_allocator_fb:
+	  allocator = allocator_data->fb_data;
+	  goto retry;
+	}
+    }
+  return NULL;
+}
+
+ialias (omp_aligned_calloc)
+
+void *
+omp_calloc (size_t nmemb, size_t size, omp_allocator_handle_t allocator)
+{
+  return ialias_call (omp_aligned_calloc) (1, nmemb, size, allocator);
+}
+
+void *
+omp_realloc (void *ptr, size_t size, omp_allocator_handle_t allocator,
+	     omp_allocator_handle_t free_allocator)
+{
+  struct omp_allocator_data *allocator_data, *free_allocator_data;
+  size_t new_size, old_size, new_alignment, old_alignment;
+  void *new_ptr, *ret;
+  struct omp_mem_header *data;
+
+  if (__builtin_expect (ptr == NULL, 0))
+    return ialias_call (omp_aligned_alloc) (1, size, allocator);
+
+  if (__builtin_expect (size == 0, 0))
+    {
+      ialias_call (omp_free) (ptr, free_allocator);
+      return NULL;
+    }
+
+  data = &((struct omp_mem_header *) ptr)[-1];
+  free_allocator = data->allocator;
+
+retry:
+  new_alignment = sizeof (void *);
+  if (allocator == omp_null_allocator)
+    allocator = free_allocator;
+
+  if (allocator > omp_max_predefined_alloc)
+    {
+      allocator_data = (struct omp_allocator_data *) allocator;
+      if (new_alignment < allocator_data->alignment)
+	new_alignment = allocator_data->alignment;
+    }
+  else
+    allocator_data = NULL;
+  if (free_allocator > omp_max_predefined_alloc)
+    free_allocator_data = (struct omp_allocator_data *) free_allocator;
+  else
+    free_allocator_data = NULL;
+  old_alignment = (uintptr_t) ptr - (uintptr_t) (data->ptr);
+
+  new_size = sizeof (struct omp_mem_header);
+  if (new_alignment > sizeof (void *))
+    new_size += new_alignment - sizeof (void *);
+  if (__builtin_add_overflow (size, new_size, &new_size))
+    goto fail;
+  old_size = data->size;
+
+  if (__builtin_expect (allocator_data
+			&& allocator_data->pool_size < ~(uintptr_t) 0, 0))
+    {
+      uintptr_t used_pool_size;
+      size_t prev_size = 0;
+      /* Check if we can use realloc.  Don't use it if extra alignment
+	 was used previously or newly, because realloc might return a pointer
+	 with different alignment and then we'd need to memmove the data
+	 again.  */
+      if (free_allocator_data
+	  && free_allocator_data == allocator_data
+	  && new_alignment == sizeof (void *)
+	  && old_alignment == sizeof (struct omp_mem_header))
+	prev_size = old_size;
+      if (new_size > prev_size
+	  && new_size - prev_size > allocator_data->pool_size)
+	goto fail;
+#ifdef HAVE_SYNC_BUILTINS
+      used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
+					MEMMODEL_RELAXED);
+      do
+	{
+	  uintptr_t new_pool_size;
+	  if (new_size > prev_size)
+	    {
+	      if (__builtin_add_overflow (used_pool_size, new_size - prev_size,
+					  &new_pool_size)
+		  || new_pool_size > allocator_data->pool_size)
+		goto fail;
+	    }
+	  else
+	    new_pool_size = used_pool_size + new_size - prev_size;
+	  if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
+					   &used_pool_size, new_pool_size,
+					   true, MEMMODEL_RELAXED,
+					   MEMMODEL_RELAXED))
+	    break;
+	}
+      while (1);
+#else
+      gomp_mutex_lock (&allocator_data->lock);
+      if (new_size > prev_size)
+	{
+	  if (__builtin_add_overflow (allocator_data->used_pool_size,
+				      new_size - prev_size,
+				      &used_pool_size)
+	      || used_pool_size > allocator_data->pool_size)
+	    {
+	      gomp_mutex_unlock (&allocator_data->lock);
+	      goto fail;
+	    }
+	}
+      else
+	used_pool_size = (allocator_data->used_pool_size
+			  + new_size - prev_size);
+      allocator_data->used_pool_size = used_pool_size;
+      gomp_mutex_unlock (&allocator_data->lock);
+#endif
+      if (prev_size)
+	new_ptr = realloc (data->ptr, new_size);
+      else
+	new_ptr = malloc (new_size);
+      if (new_ptr == NULL)
+	{
+#ifdef HAVE_SYNC_BUILTINS
+	  __atomic_add_fetch (&allocator_data->used_pool_size,
+			      prev_size - new_size,
+			      MEMMODEL_RELAXED);
+#else
+	  gomp_mutex_lock (&allocator_data->lock);
+	  allocator_data->used_pool_size -= new_size - prev_size;
+	  gomp_mutex_unlock (&allocator_data->lock);
+#endif
+	  goto fail;
+	}
+      else if (prev_size)
+	{
+	  ret = (char *) new_ptr + sizeof (struct omp_mem_header);
+	  ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
+	  ((struct omp_mem_header *) ret)[-1].size = new_size;
+	  ((struct omp_mem_header *) ret)[-1].allocator = allocator;
+	  return ret;
+	}
+    }
+  else if (new_alignment == sizeof (void *)
+	   && old_alignment == sizeof (struct omp_mem_header)
+	   && (free_allocator_data == NULL
+	       || free_allocator_data->pool_size == ~(uintptr_t) 0))
+    {
+      new_ptr = realloc (data->ptr, new_size);
+      if (new_ptr == NULL)
+	goto fail;
+      ret = (char *) new_ptr + sizeof (struct omp_mem_header);
+      ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
+      ((struct omp_mem_header *) ret)[-1].size = new_size;
+      ((struct omp_mem_header *) ret)[-1].allocator = allocator;
+      return ret;
+    }
+  else
+    {
+      new_ptr = malloc (new_size);
+      if (new_ptr == NULL)
+	goto fail;
+    }
+
+  if (new_alignment > sizeof (void *))
+    ret = (void *) (((uintptr_t) new_ptr
+		     + sizeof (struct omp_mem_header)
+		     + new_alignment - sizeof (void *))
+		    & ~(new_alignment - 1));
+  else
+    ret = (char *) new_ptr + sizeof (struct omp_mem_header);
+  ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
+  ((struct omp_mem_header *) ret)[-1].size = new_size;
+  ((struct omp_mem_header *) ret)[-1].allocator = allocator;
+  if (old_size - old_alignment < size)
+    size = old_size - old_alignment;
+  memcpy (ret, ptr, size);
+  if (__builtin_expect (free_allocator_data
+			&& free_allocator_data->pool_size < ~(uintptr_t) 0, 0))
+    {
+#ifdef HAVE_SYNC_BUILTINS
+      __atomic_add_fetch (&free_allocator_data->used_pool_size, -data->size,
+			  MEMMODEL_RELAXED);
+#else
+      gomp_mutex_lock (&free_allocator_data->lock);
+      free_allocator_data->used_pool_size -= data->size;
+      gomp_mutex_unlock (&free_allocator_data->lock);
+#endif
+    }
+  free (data->ptr);
+  return ret;
+
+fail:
+  if (allocator_data)
+    {
+      switch (allocator_data->fallback)
+	{
+	case omp_atv_default_mem_fb:
+	  if (new_alignment > sizeof (void *)
+	      || (allocator_data
+		  && allocator_data->pool_size < ~(uintptr_t) 0))
+	    {
+	      allocator = omp_default_mem_alloc;
+	      goto retry;
+	    }
+	  /* Otherwise, we've already performed default mem allocation
+	     and if that failed, it won't succeed again (unless it was
+	     intermittent.  Return NULL then, as that is the fallback.  */
+	  break;
+	case omp_atv_null_fb:
+	  break;
+	default:
+	case omp_atv_abort_fb:
+	  gomp_fatal ("Out of memory allocating %lu bytes",
+		      (unsigned long) size);
+	case omp_atv_allocator_fb:
+	  allocator = allocator_data->fb_data;
+	  goto retry;
+	}
+    }
+  return NULL;
 }
diff --git a/libgomp/libgomp.map b/libgomp/libgomp.map
index e5ac670029c..058d904e24a 100644
--- a/libgomp/libgomp.map
+++ b/libgomp/libgomp.map
@@ -203,6 +203,10 @@ OMP_5.0.2 {
   global:
 	omp_get_device_num;
 	omp_get_device_num_;
+	omp_aligned_alloc;
+	omp_calloc;
+	omp_aligned_calloc;
+	omp_realloc;
 } OMP_5.0.1;
 
 OMP_5.1 {
diff --git a/libgomp/omp.h.in b/libgomp/omp.h.in
index 314f964f841..e39988e7cbd 100644
--- a/libgomp/omp.h.in
+++ b/libgomp/omp.h.in
@@ -295,12 +295,31 @@ extern omp_allocator_handle_t omp_init_allocator (omp_memspace_handle_t,
 extern void omp_destroy_allocator (omp_allocator_handle_t) __GOMP_NOTHROW;
 extern void omp_set_default_allocator (omp_allocator_handle_t) __GOMP_NOTHROW;
 extern omp_allocator_handle_t omp_get_default_allocator (void) __GOMP_NOTHROW;
-extern void *omp_alloc (__SIZE_TYPE__,
-			omp_allocator_handle_t __GOMP_DEFAULT_NULL_ALLOCATOR)
-  __GOMP_NOTHROW __attribute__((__malloc__, __alloc_size__ (1)));
 extern void omp_free (void *,
 		      omp_allocator_handle_t __GOMP_DEFAULT_NULL_ALLOCATOR)
   __GOMP_NOTHROW;
+extern void *omp_alloc (__SIZE_TYPE__,
+			omp_allocator_handle_t __GOMP_DEFAULT_NULL_ALLOCATOR)
+  __GOMP_NOTHROW __attribute__((__malloc__, __malloc__ (omp_free),
+				__alloc_size__ (1)));
+extern void *omp_aligned_alloc (__SIZE_TYPE__, __SIZE_TYPE__,
+				omp_allocator_handle_t
+				__GOMP_DEFAULT_NULL_ALLOCATOR)
+  __GOMP_NOTHROW __attribute__((__malloc__, __malloc__ (omp_free),
+				__alloc_size__ (2)));
+extern void *omp_calloc (__SIZE_TYPE__, __SIZE_TYPE__,
+			 omp_allocator_handle_t __GOMP_DEFAULT_NULL_ALLOCATOR)
+  __GOMP_NOTHROW __attribute__((__malloc__, __malloc__ (omp_free),
+				__alloc_size__ (1, 2)));
+extern void *omp_aligned_calloc (__SIZE_TYPE__, __SIZE_TYPE__, __SIZE_TYPE__,
+				 omp_allocator_handle_t
+				 __GOMP_DEFAULT_NULL_ALLOCATOR)
+  __GOMP_NOTHROW __attribute__((__malloc__, __malloc__ (omp_free),
+				__alloc_size__ (2, 3)));
+extern void *omp_realloc (void *, __SIZE_TYPE__,
+			  omp_allocator_handle_t __GOMP_DEFAULT_NULL_ALLOCATOR,
+			  omp_allocator_handle_t __GOMP_DEFAULT_NULL_ALLOCATOR)
+  __GOMP_NOTHROW __attribute__((__malloc__ (omp_free), __alloc_size__ (2)));
 
 extern void omp_display_env (int) __GOMP_NOTHROW;
 
diff --git a/libgomp/testsuite/libgomp.c-c++-common/alloc-4.c b/libgomp/testsuite/libgomp.c-c++-common/alloc-4.c
index 841e1bcc201..67ba1cda083 100644
--- a/libgomp/testsuite/libgomp.c-c++-common/alloc-4.c
+++ b/libgomp/testsuite/libgomp.c-c++-common/alloc-4.c
@@ -12,12 +12,30 @@ main ()
 
   if (omp_alloc (0, omp_null_allocator) != NULL)
     abort ();
+  if (omp_aligned_alloc (64, 0, omp_null_allocator) != NULL)
+    abort ();
+  if (omp_calloc (0, 0, omp_null_allocator) != NULL
+      || omp_calloc (32, 0, omp_null_allocator) != NULL
+      || omp_calloc (0, 64, omp_null_allocator) != NULL)
+    abort ();
+  if (omp_aligned_calloc (32, 0, 0, omp_null_allocator) != NULL
+      || omp_aligned_calloc (64, 32, 0, omp_null_allocator) != NULL
+      || omp_aligned_calloc (16, 0, 64, omp_null_allocator) != NULL)
+    abort ();
   a = omp_init_allocator (omp_default_mem_space, 2, traits);
   if (a != omp_null_allocator)
     {
       if (omp_alloc (0, a) != NULL
 	  || omp_alloc (0, a) != NULL
-	  || omp_alloc (0, a) != NULL)
+	  || omp_alloc (0, a) != NULL
+	  || omp_aligned_alloc (16, 0, a) != NULL
+	  || omp_aligned_alloc (128, 0, a) != NULL
+	  || omp_calloc (0, 0, a) != NULL
+	  || omp_calloc (32, 0, a) != NULL
+	  || omp_calloc (0, 64, a) != NULL
+	  || omp_aligned_calloc (32, 0, 0, a) != NULL
+	  || omp_aligned_calloc (64, 32, 0, a) != NULL
+	  || omp_aligned_calloc (16, 0, 64, a) != NULL)
 	abort ();
       omp_destroy_allocator (a);
     }
diff --git a/libgomp/testsuite/libgomp.c-c++-common/alloc-5.c b/libgomp/testsuite/libgomp.c-c++-common/alloc-5.c
new file mode 100644
index 00000000000..8b4a489a92b
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c-c++-common/alloc-5.c
@@ -0,0 +1,159 @@
+#include <omp.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+const omp_alloctrait_t traits2[]
+= { { omp_atk_alignment, 16 },
+    { omp_atk_sync_hint, omp_atv_default },
+    { omp_atk_access, omp_atv_default },
+    { omp_atk_pool_size, 1024 },
+    { omp_atk_fallback, omp_atv_default_mem_fb },
+    { omp_atk_partition, omp_atv_environment } };
+omp_alloctrait_t traits3[]
+= { { omp_atk_sync_hint, omp_atv_uncontended },
+    { omp_atk_alignment, 32 },
+    { omp_atk_access, omp_atv_all },
+    { omp_atk_pool_size, 512 },
+    { omp_atk_fallback, omp_atv_allocator_fb },
+    { omp_atk_fb_data, 0 },
+    { omp_atk_partition, omp_atv_default } };
+const omp_alloctrait_t traits4[]
+= { { omp_atk_alignment, 128 },
+    { omp_atk_pool_size, 1024 },
+    { omp_atk_fallback, omp_atv_null_fb } };
+
+int
+main ()
+{
+  int *volatile p = (int *) omp_aligned_alloc (sizeof (int), 3 * sizeof (int), omp_default_mem_alloc);
+  int *volatile q;
+  int *volatile r;
+  omp_alloctrait_t traits[3]
+    = { { omp_atk_alignment, 64 },
+	{ omp_atk_fallback, omp_atv_null_fb },
+	{ omp_atk_pool_size, 4096 } };
+  omp_allocator_handle_t a, a2;
+
+  if ((((uintptr_t) p) % __alignof (int)) != 0)
+    abort ();
+  p[0] = 1;
+  p[1] = 2;
+  p[2] = 3;
+  omp_free (p, omp_default_mem_alloc);
+  p = (int *) omp_aligned_alloc (2 * sizeof (int), 2 * sizeof (int), omp_default_mem_alloc);
+  if ((((uintptr_t) p) % (2 * sizeof (int))) != 0)
+    abort ();
+  p[0] = 1;
+  p[1] = 2;
+  omp_free (p, omp_null_allocator);
+  omp_set_default_allocator (omp_default_mem_alloc);
+  p = (int *) omp_aligned_alloc (1, sizeof (int), omp_null_allocator);
+  if ((((uintptr_t) p) % __alignof (int)) != 0)
+    abort ();
+  p[0] = 3;
+  omp_free (p, omp_get_default_allocator ());
+
+  a = omp_init_allocator (omp_default_mem_space, 3, traits);
+  if (a == omp_null_allocator)
+    abort ();
+  p = (int *) omp_aligned_alloc (32, 3072, a);
+  if ((((uintptr_t) p) % 64) != 0)
+    abort ();
+  p[0] = 1;
+  p[3071 / sizeof (int)] = 2;
+  if (omp_aligned_alloc (8, 3072, a) != NULL)
+    abort ();
+  omp_free (p, a);
+  p = (int *) omp_aligned_alloc (128, 3072, a);
+  if ((((uintptr_t) p) % 128) != 0)
+    abort ();
+  p[0] = 3;
+  p[3071 / sizeof (int)] = 4;
+  omp_free (p, omp_null_allocator);
+  omp_set_default_allocator (a);
+  if (omp_get_default_allocator () != a)
+    abort ();
+  p = (int *) omp_aligned_alloc (64, 3072, omp_null_allocator);
+  if (omp_aligned_alloc (8, 3072, omp_null_allocator) != NULL)
+    abort ();
+  omp_free (p, a);
+  omp_destroy_allocator (a);
+
+  a = omp_init_allocator (omp_default_mem_space,
+			  sizeof (traits2) / sizeof (traits2[0]),
+			  traits2);
+  if (a == omp_null_allocator)
+    abort ();
+  if (traits3[5].key != omp_atk_fb_data)
+    abort ();
+  traits3[5].value = (uintptr_t) a;
+  a2 = omp_init_allocator (omp_default_mem_space,
+			   sizeof (traits3) / sizeof (traits3[0]),
+			   traits3);
+  if (a2 == omp_null_allocator)
+    abort ();
+  p = (int *) omp_aligned_alloc (4, 420, a2);
+  if ((((uintptr_t) p) % 32) != 0)
+    abort ();
+  p[0] = 5;
+  p[419 / sizeof (int)] = 6;
+  q = (int *) omp_aligned_alloc (8, 768, a2);
+  if ((((uintptr_t) q) % 16) != 0)
+    abort ();
+  q[0] = 7;
+  q[767 / sizeof (int)] = 8;
+  r = (int *) omp_aligned_alloc (8, 512, a2);
+  if ((((uintptr_t) r) % 8) != 0)
+    abort ();
+  r[0] = 9;
+  r[511 / sizeof (int)] = 10;
+  omp_free (p, omp_null_allocator);
+  omp_free (q, a2);
+  omp_free (r, omp_null_allocator);
+  omp_destroy_allocator (a2);
+  omp_destroy_allocator (a);
+
+  a = omp_init_allocator (omp_default_mem_space,
+			  sizeof (traits4) / sizeof (traits4[0]),
+			  traits4);
+  if (a == omp_null_allocator)
+    abort ();
+  if (traits3[5].key != omp_atk_fb_data)
+    abort ();
+  traits3[5].value = (uintptr_t) a;
+  a2 = omp_init_allocator (omp_default_mem_space,
+			   sizeof (traits3) / sizeof (traits3[0]),
+			   traits3);
+  if (a2 == omp_null_allocator)
+    abort ();
+  omp_set_default_allocator (a2);
+#ifdef __cplusplus
+  p = static_cast <int *> (omp_aligned_alloc (4, 420));
+#else
+  p = (int *) omp_aligned_alloc (4, 420, omp_null_allocator);
+#endif
+  if ((((uintptr_t) p) % 32) != 0)
+    abort ();
+  p[0] = 5;
+  p[419 / sizeof (int)] = 6;
+  q = (int *) omp_aligned_alloc (64, 768, omp_null_allocator);
+  if ((((uintptr_t) q) % 128) != 0)
+    abort ();
+  q[0] = 7;
+  q[767 / sizeof (int)] = 8;
+  if (omp_aligned_alloc (8, 768, omp_null_allocator) != NULL)
+    abort ();
+#ifdef __cplusplus
+  omp_free (p);
+  omp_free (q);
+  omp_free (NULL);
+#else
+  omp_free (p, omp_null_allocator);
+  omp_free (q, omp_null_allocator);
+  omp_free (NULL, omp_null_allocator);
+#endif
+  omp_free (NULL, omp_null_allocator);
+  omp_destroy_allocator (a2);
+  omp_destroy_allocator (a);
+  return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c-c++-common/alloc-6.c b/libgomp/testsuite/libgomp.c-c++-common/alloc-6.c
new file mode 100644
index 00000000000..be571b7ecea
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c-c++-common/alloc-6.c
@@ -0,0 +1,58 @@
+#include <omp.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+const omp_alloctrait_t traits[]
+= { { omp_atk_alignment, 16 },
+    { omp_atk_sync_hint, omp_atv_default },
+    { omp_atk_access, omp_atv_default },
+    { omp_atk_fallback, omp_atv_default_mem_fb },
+    { omp_atk_partition, omp_atv_environment } };
+
+int
+main ()
+{
+  omp_allocator_handle_t a;
+  void *p, *q;
+  volatile size_t large_sz;
+
+  a = omp_init_allocator (omp_default_mem_space,
+			  sizeof (traits) / sizeof (traits[0]),
+			  traits);
+  if (a == omp_null_allocator)
+    abort ();
+  p = omp_alloc (2048, a);
+  if ((((uintptr_t) p) % 16) != 0)
+    abort ();
+  large_sz = ~(size_t) 1023;
+  q = omp_alloc (large_sz, a);
+  if (q != NULL)
+    abort ();
+  q = omp_aligned_alloc (32, large_sz, a);
+  if (q != NULL)
+    abort ();
+  q = omp_calloc (large_sz / 4, 4, a);
+  if (q != NULL)
+    abort ();
+  q = omp_aligned_calloc (1, 2, large_sz / 2, a);
+  if (q != NULL)
+    abort ();
+  omp_free (p, a);
+  large_sz = ~(size_t) 0;
+  large_sz >>= 1;
+  large_sz += 1;
+  if (omp_calloc (2, large_sz, a) != NULL)
+    abort ();
+  if (omp_calloc (large_sz, 1024, a) != NULL)
+    abort ();
+  if (omp_calloc (large_sz, large_sz, a) != NULL)
+    abort ();
+  if (omp_aligned_calloc (16, 2, large_sz, a) != NULL)
+    abort ();
+  if (omp_aligned_calloc (32, large_sz, 1024, a) != NULL)
+    abort ();
+  if (omp_aligned_calloc (64, large_sz, large_sz, a) != NULL)
+    abort ();
+  omp_destroy_allocator (a);
+  return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c-c++-common/alloc-7.c b/libgomp/testsuite/libgomp.c-c++-common/alloc-7.c
new file mode 100644
index 00000000000..d66e25ac7d6
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c-c++-common/alloc-7.c
@@ -0,0 +1,182 @@
+#include <omp.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+const omp_alloctrait_t traits2[]
+= { { omp_atk_alignment, 16 },
+    { omp_atk_sync_hint, omp_atv_default },
+    { omp_atk_access, omp_atv_default },
+    { omp_atk_pool_size, 1024 },
+    { omp_atk_fallback, omp_atv_default_mem_fb },
+    { omp_atk_partition, omp_atv_environment } };
+omp_alloctrait_t traits3[]
+= { { omp_atk_sync_hint, omp_atv_uncontended },
+    { omp_atk_alignment, 32 },
+    { omp_atk_access, omp_atv_all },
+    { omp_atk_pool_size, 512 },
+    { omp_atk_fallback, omp_atv_allocator_fb },
+    { omp_atk_fb_data, 0 },
+    { omp_atk_partition, omp_atv_default } };
+const omp_alloctrait_t traits4[]
+= { { omp_atk_alignment, 128 },
+    { omp_atk_pool_size, 1024 },
+    { omp_atk_fallback, omp_atv_null_fb } };
+
+int
+main ()
+{
+  int *volatile p = (int *) omp_calloc (3, sizeof (int), omp_default_mem_alloc);
+  int *volatile q;
+  int *volatile r;
+  int i;
+  omp_alloctrait_t traits[3]
+    = { { omp_atk_alignment, 64 },
+	{ omp_atk_fallback, omp_atv_null_fb },
+	{ omp_atk_pool_size, 4096 } };
+  omp_allocator_handle_t a, a2;
+
+  if ((((uintptr_t) p) % __alignof (int)) != 0 || p[0] || p[1] || p[2])
+    abort ();
+  p[0] = 1;
+  p[1] = 2;
+  p[2] = 3;
+  omp_free (p, omp_default_mem_alloc);
+  p = (int *) omp_calloc (2, sizeof (int), omp_default_mem_alloc);
+  if ((((uintptr_t) p) % __alignof (int)) != 0 || p[0] || p[1])
+    abort ();
+  p[0] = 1;
+  p[1] = 2;
+  omp_free (p, omp_null_allocator);
+  omp_set_default_allocator (omp_default_mem_alloc);
+  p = (int *) omp_calloc (1, sizeof (int), omp_null_allocator);
+  if ((((uintptr_t) p) % __alignof (int)) != 0 || p[0])
+    abort ();
+  p[0] = 3;
+  omp_free (p, omp_get_default_allocator ());
+
+  a = omp_init_allocator (omp_default_mem_space, 3, traits);
+  if (a == omp_null_allocator)
+    abort ();
+  p = (int *) omp_calloc (3, 1024, a);
+  if ((((uintptr_t) p) % 64) != 0)
+    abort ();
+  for (i = 0; i < 3072 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  p[0] = 1;
+  p[3071 / sizeof (int)] = 2;
+  if (omp_calloc (1024, 3, a) != NULL)
+    abort ();
+  omp_free (p, a);
+  p = (int *) omp_calloc (512, 6, a);
+  for (i = 0; i < 3072 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  p[0] = 3;
+  p[3071 / sizeof (int)] = 4;
+  omp_free (p, omp_null_allocator);
+  omp_set_default_allocator (a);
+  if (omp_get_default_allocator () != a)
+    abort ();
+  p = (int *) omp_calloc (12, 256, omp_null_allocator);
+  for (i = 0; i < 3072 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  if (omp_calloc (128, 24, omp_null_allocator) != NULL)
+    abort ();
+  omp_free (p, a);
+  omp_destroy_allocator (a);
+
+  a = omp_init_allocator (omp_default_mem_space,
+			  sizeof (traits2) / sizeof (traits2[0]),
+			  traits2);
+  if (a == omp_null_allocator)
+    abort ();
+  if (traits3[5].key != omp_atk_fb_data)
+    abort ();
+  traits3[5].value = (uintptr_t) a;
+  a2 = omp_init_allocator (omp_default_mem_space,
+			   sizeof (traits3) / sizeof (traits3[0]),
+			   traits3);
+  if (a2 == omp_null_allocator)
+    abort ();
+  p = (int *) omp_calloc (10, 42, a2);
+  for (i = 0; i < 420 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  if ((((uintptr_t) p) % 32) != 0)
+    abort ();
+  p[0] = 5;
+  p[419 / sizeof (int)] = 6;
+  q = (int *) omp_calloc (24, 32, a2);
+  if ((((uintptr_t) q) % 16) != 0)
+    abort ();
+  for (i = 0; i < 768 / sizeof (int); i++)
+    if (q[i])
+      abort ();
+  q[0] = 7;
+  q[767 / sizeof (int)] = 8;
+  r = (int *) omp_calloc (128, 4, a2);
+  if ((((uintptr_t) r) % __alignof (int)) != 0)
+    abort ();
+  for (i = 0; i < 512 / sizeof (int); i++)
+    if (r[i])
+      abort ();
+  r[0] = 9;
+  r[511 / sizeof (int)] = 10;
+  omp_free (p, omp_null_allocator);
+  omp_free (q, a2);
+  omp_free (r, omp_null_allocator);
+  omp_destroy_allocator (a2);
+  omp_destroy_allocator (a);
+
+  a = omp_init_allocator (omp_default_mem_space,
+			  sizeof (traits4) / sizeof (traits4[0]),
+			  traits4);
+  if (a == omp_null_allocator)
+    abort ();
+  if (traits3[5].key != omp_atk_fb_data)
+    abort ();
+  traits3[5].value = (uintptr_t) a;
+  a2 = omp_init_allocator (omp_default_mem_space,
+			   sizeof (traits3) / sizeof (traits3[0]),
+			   traits3);
+  if (a2 == omp_null_allocator)
+    abort ();
+  omp_set_default_allocator (a2);
+#ifdef __cplusplus
+  p = static_cast <int *> (omp_calloc (42, 10));
+#else
+  p = (int *) omp_calloc (42, 10, omp_null_allocator);
+#endif
+  if ((((uintptr_t) p) % 32) != 0)
+    abort ();
+  for (i = 0; i < 420 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  p[0] = 5;
+  p[419 / sizeof (int)] = 6;
+  q = (int *) omp_calloc (32, 24, omp_null_allocator);
+  if ((((uintptr_t) q) % 128) != 0)
+    abort ();
+  for (i = 0; i < 768 / sizeof (int); i++)
+    if (q[i])
+      abort ();
+  q[0] = 7;
+  q[767 / sizeof (int)] = 8;
+  if (omp_calloc (24, 32, omp_null_allocator) != NULL)
+    abort ();
+#ifdef __cplusplus
+  omp_free (p);
+  omp_free (q);
+  omp_free (NULL);
+#else
+  omp_free (p, omp_null_allocator);
+  omp_free (q, omp_null_allocator);
+  omp_free (NULL, omp_null_allocator);
+#endif
+  omp_free (NULL, omp_null_allocator);
+  omp_destroy_allocator (a2);
+  omp_destroy_allocator (a);
+  return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c-c++-common/alloc-8.c b/libgomp/testsuite/libgomp.c-c++-common/alloc-8.c
new file mode 100644
index 00000000000..bd4e08a6f91
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c-c++-common/alloc-8.c
@@ -0,0 +1,184 @@
+#include <omp.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+const omp_alloctrait_t traits2[]
+= { { omp_atk_alignment, 16 },
+    { omp_atk_sync_hint, omp_atv_default },
+    { omp_atk_access, omp_atv_default },
+    { omp_atk_pool_size, 1024 },
+    { omp_atk_fallback, omp_atv_default_mem_fb },
+    { omp_atk_partition, omp_atv_environment } };
+omp_alloctrait_t traits3[]
+= { { omp_atk_sync_hint, omp_atv_uncontended },
+    { omp_atk_alignment, 32 },
+    { omp_atk_access, omp_atv_all },
+    { omp_atk_pool_size, 512 },
+    { omp_atk_fallback, omp_atv_allocator_fb },
+    { omp_atk_fb_data, 0 },
+    { omp_atk_partition, omp_atv_default } };
+const omp_alloctrait_t traits4[]
+= { { omp_atk_alignment, 128 },
+    { omp_atk_pool_size, 1024 },
+    { omp_atk_fallback, omp_atv_null_fb } };
+
+int
+main ()
+{
+  int *volatile p = (int *) omp_aligned_calloc (sizeof (int), 3, sizeof (int), omp_default_mem_alloc);
+  int *volatile q;
+  int *volatile r;
+  int i;
+  omp_alloctrait_t traits[3]
+    = { { omp_atk_alignment, 64 },
+	{ omp_atk_fallback, omp_atv_null_fb },
+	{ omp_atk_pool_size, 4096 } };
+  omp_allocator_handle_t a, a2;
+
+  if ((((uintptr_t) p) % __alignof (int)) != 0 || p[0] || p[1] || p[2])
+    abort ();
+  p[0] = 1;
+  p[1] = 2;
+  p[2] = 3;
+  omp_free (p, omp_default_mem_alloc);
+  p = (int *) omp_aligned_calloc (2 * sizeof (int), 1, 2 * sizeof (int), omp_default_mem_alloc);
+  if ((((uintptr_t) p) % (2 * sizeof (int))) != 0 || p[0] || p[1])
+    abort ();
+  p[0] = 1;
+  p[1] = 2;
+  omp_free (p, omp_null_allocator);
+  omp_set_default_allocator (omp_default_mem_alloc);
+  p = (int *) omp_aligned_calloc (1, 1, sizeof (int), omp_null_allocator);
+  if ((((uintptr_t) p) % __alignof (int)) != 0 || p[0])
+    abort ();
+  p[0] = 3;
+  omp_free (p, omp_get_default_allocator ());
+
+  a = omp_init_allocator (omp_default_mem_space, 3, traits);
+  if (a == omp_null_allocator)
+    abort ();
+  p = (int *) omp_aligned_calloc (32, 3, 1024, a);
+  if ((((uintptr_t) p) % 64) != 0)
+    abort ();
+  for (i = 0; i < 3072 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  p[0] = 1;
+  p[3071 / sizeof (int)] = 2;
+  if (omp_aligned_calloc (8, 192, 16, a) != NULL)
+    abort ();
+  omp_free (p, a);
+  p = (int *) omp_aligned_calloc (128, 6, 512, a);
+  if ((((uintptr_t) p) % 128) != 0)
+    abort ();
+  for (i = 0; i < 3072 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  p[0] = 3;
+  p[3071 / sizeof (int)] = 4;
+  omp_free (p, omp_null_allocator);
+  omp_set_default_allocator (a);
+  if (omp_get_default_allocator () != a)
+    abort ();
+  p = (int *) omp_aligned_calloc (64, 12, 256, omp_null_allocator);
+  for (i = 0; i < 3072 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  if (omp_aligned_calloc (8, 128, 24, omp_null_allocator) != NULL)
+    abort ();
+  omp_free (p, a);
+  omp_destroy_allocator (a);
+
+  a = omp_init_allocator (omp_default_mem_space,
+			  sizeof (traits2) / sizeof (traits2[0]),
+			  traits2);
+  if (a == omp_null_allocator)
+    abort ();
+  if (traits3[5].key != omp_atk_fb_data)
+    abort ();
+  traits3[5].value = (uintptr_t) a;
+  a2 = omp_init_allocator (omp_default_mem_space,
+			   sizeof (traits3) / sizeof (traits3[0]),
+			   traits3);
+  if (a2 == omp_null_allocator)
+    abort ();
+  p = (int *) omp_aligned_calloc (4, 5, 84, a2);
+  for (i = 0; i < 420 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  if ((((uintptr_t) p) % 32) != 0)
+    abort ();
+  p[0] = 5;
+  p[419 / sizeof (int)] = 6;
+  q = (int *) omp_aligned_calloc (8, 24, 32, a2);
+  if ((((uintptr_t) q) % 16) != 0)
+    abort ();
+  for (i = 0; i < 768 / sizeof (int); i++)
+    if (q[i])
+      abort ();
+  q[0] = 7;
+  q[767 / sizeof (int)] = 8;
+  r = (int *) omp_aligned_calloc (8, 64, 8, a2);
+  if ((((uintptr_t) r) % 8) != 0)
+    abort ();
+  for (i = 0; i < 512 / sizeof (int); i++)
+    if (r[i])
+      abort ();
+  r[0] = 9;
+  r[511 / sizeof (int)] = 10;
+  omp_free (p, omp_null_allocator);
+  omp_free (q, a2);
+  omp_free (r, omp_null_allocator);
+  omp_destroy_allocator (a2);
+  omp_destroy_allocator (a);
+
+  a = omp_init_allocator (omp_default_mem_space,
+			  sizeof (traits4) / sizeof (traits4[0]),
+			  traits4);
+  if (a == omp_null_allocator)
+    abort ();
+  if (traits3[5].key != omp_atk_fb_data)
+    abort ();
+  traits3[5].value = (uintptr_t) a;
+  a2 = omp_init_allocator (omp_default_mem_space,
+			   sizeof (traits3) / sizeof (traits3[0]),
+			   traits3);
+  if (a2 == omp_null_allocator)
+    abort ();
+  omp_set_default_allocator (a2);
+#ifdef __cplusplus
+  p = static_cast <int *> (omp_aligned_calloc (4, 21, 20));
+#else
+  p = (int *) omp_aligned_calloc (4, 21, 20, omp_null_allocator);
+#endif
+  if ((((uintptr_t) p) % 32) != 0)
+    abort ();
+  for (i = 0; i < 420 / sizeof (int); i++)
+    if (p[i])
+      abort ();
+  p[0] = 5;
+  p[419 / sizeof (int)] = 6;
+  q = (int *) omp_aligned_calloc (64, 12, 64, omp_null_allocator);
+  if ((((uintptr_t) q) % 128) != 0)
+    abort ();
+  for (i = 0; i < 768 / sizeof (int); i++)
+    if (q[i])
+      abort ();
+  q[0] = 7;
+  q[767 / sizeof (int)] = 8;
+  if (omp_aligned_calloc (8, 24, 32, omp_null_allocator) != NULL)
+    abort ();
+#ifdef __cplusplus
+  omp_free (p);
+  omp_free (q);
+  omp_free (NULL);
+#else
+  omp_free (p, omp_null_allocator);
+  omp_free (q, omp_null_allocator);
+  omp_free (NULL, omp_null_allocator);
+#endif
+  omp_free (NULL, omp_null_allocator);
+  omp_destroy_allocator (a2);
+  omp_destroy_allocator (a);
+  return 0;
+}


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-09-30  7:51 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-30  7:51 [gcc/devel/omp/gcc-11] openmp: Add omp_aligned_{, c}alloc and omp_{c, re}alloc Tobias Burnus

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).