diff --git a/libgomp/config/nvptx/bar.c b/libgomp/config/nvptx/bar.c index eee2107..0b958ed 100644 --- a/libgomp/config/nvptx/bar.c +++ b/libgomp/config/nvptx/bar.c @@ -30,137 +30,143 @@ #include #include "libgomp.h" -/* For cpu_relax. */ -#include "doacross.h" - -/* Assuming ADDR is &bar->generation, return bar. Copied from - rtems/bar.c. */ +void +gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) +{ + if (__builtin_expect (state & BAR_WAS_LAST, 0)) + { + /* Next time we'll be awaiting TOTAL threads again. */ + bar->awaited = bar->total; + __atomic_store_n (&bar->generation, bar->generation + BAR_INCR, + MEMMODEL_RELEASE); + } + if (bar->total > 1) + asm ("bar.sync 1, %0;" : : "r" (32 * bar->total)); +} -static gomp_barrier_t * -generation_to_barrier (int *addr) +void +gomp_barrier_wait (gomp_barrier_t *bar) { - char *bar - = (char *) addr - __builtin_offsetof (gomp_barrier_t, generation); - return (gomp_barrier_t *)bar; + gomp_barrier_wait_end (bar, gomp_barrier_wait_start (bar)); } -/* Implement futex_wait-like behaviour to plug into the linux/bar.c - implementation. Assumes ADDR is &bar->generation. */ +/* Like gomp_barrier_wait, except that if the encountering thread + is not the last one to hit the barrier, it returns immediately. + The intended usage is that a thread which intends to gomp_barrier_destroy + this barrier calls gomp_barrier_wait, while all other threads + call gomp_barrier_wait_last. When gomp_barrier_wait returns, + the barrier can be safely destroyed. */ -static inline void -futex_wait (int *addr, int val) +void +gomp_barrier_wait_last (gomp_barrier_t *bar) { - gomp_barrier_t *bar = generation_to_barrier (addr); + /* The above described behavior matches 'bar.arrive' perfectly. */ + if (bar->total > 1) + asm ("bar.arrive 1, %0;" : : "r" (32 * bar->total)); +} - if (bar->total < 2) - /* A barrier with less than two threads, nop. */ - return; +void +gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; - gomp_mutex_lock (&bar->lock); + bool run_tasks = (team->task_count != 0); + if (bar->total > 1) + run_tasks = __builtin_nvptx_bar_red_or (1, 32 * bar->total, true, + (team->task_count != 0)); - /* Futex semantics: only go to sleep if *addr == val. */ - if (__builtin_expect (__atomic_load_n (addr, MEMMODEL_ACQUIRE) != val, 0)) + if (__builtin_expect (state & BAR_WAS_LAST, 0)) { - gomp_mutex_unlock (&bar->lock); - return; + /* Next time we'll be awaiting TOTAL threads again. */ + bar->awaited = bar->total; + team->work_share_cancelled = 0; } - /* Register as waiter. */ - unsigned int waiters - = __atomic_add_fetch (&bar->waiters, 1, MEMMODEL_ACQ_REL); - if (waiters == 0) - __builtin_abort (); - unsigned int waiter_id = waiters; - - if (waiters > 1) + if (__builtin_expect (run_tasks == true, 0)) { - /* Wake other threads in bar.sync. */ - asm volatile ("bar.sync 1, %0;" : : "r" (32 * waiters)); + while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) + & BAR_TASK_PENDING) + gomp_barrier_handle_tasks (state); - /* Ensure that they have updated waiters. */ - asm volatile ("bar.sync 1, %0;" : : "r" (32 * waiters)); + if (bar->total > 1) + asm volatile ("bar.sync 1, %0;" : : "r" (32 * bar->total)); } +} - gomp_mutex_unlock (&bar->lock); - - while (1) - { - /* Wait for next thread in barrier. */ - asm volatile ("bar.sync 1, %0;" : : "r" (32 * (waiters + 1))); - - /* Get updated waiters. */ - unsigned int updated_waiters - = __atomic_load_n (&bar->waiters, MEMMODEL_ACQUIRE); - - /* Notify that we have updated waiters. */ - asm volatile ("bar.sync 1, %0;" : : "r" (32 * (waiters + 1))); - - waiters = updated_waiters; - - if (waiter_id > waiters) - /* A wake happened, and we're in the group of woken threads. */ - break; - - /* Continue waiting. */ - } +void +gomp_team_barrier_wait (gomp_barrier_t *bar) +{ + gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar)); } -/* Implement futex_wake-like behaviour to plug into the linux/bar.c - implementation. Assumes ADDR is &bar->generation. */ +void +gomp_team_barrier_wait_final (gomp_barrier_t *bar) +{ + gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar); + if (__builtin_expect (state & BAR_WAS_LAST, 0)) + bar->awaited_final = bar->total; + gomp_team_barrier_wait_end (bar, state); +} -static inline void -futex_wake (int *addr, int count) +bool +gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar, + gomp_barrier_state_t state) { - gomp_barrier_t *bar = generation_to_barrier (addr); + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; - if (bar->total < 2) - /* A barrier with less than two threads, nop. */ - return; + bool run_tasks = (team->task_count != 0); + if (bar->total > 1) + run_tasks = __builtin_nvptx_bar_red_or (1, 32 * bar->total, true, + (team->task_count != 0)); + if (state & BAR_CANCELLED) + return true; - gomp_mutex_lock (&bar->lock); - unsigned int waiters = __atomic_load_n (&bar->waiters, MEMMODEL_ACQUIRE); - if (waiters == 0) + if (__builtin_expect (state & BAR_WAS_LAST, 0)) { - /* No threads to wake. */ - gomp_mutex_unlock (&bar->lock); - return; + /* Note: BAR_CANCELLED should never be set in state here, because + cancellation means that at least one of the threads has been + cancelled, thus on a cancellable barrier we should never see + all threads to arrive. */ + + /* Next time we'll be awaiting TOTAL threads again. */ + bar->awaited = bar->total; + team->work_share_cancelled = 0; } - if (count == INT_MAX) - /* Release all threads. */ - __atomic_store_n (&bar->waiters, 0, MEMMODEL_RELEASE); - else if (count < bar->total) - /* Release count threads. */ - __atomic_add_fetch (&bar->waiters, -count, MEMMODEL_ACQ_REL); - else - /* Count has an illegal value. */ - __builtin_abort (); - - /* Wake other threads in bar.sync. */ - asm volatile ("bar.sync 1, %0;" : : "r" (32 * (waiters + 1))); + if (__builtin_expect (run_tasks == true, 0)) + { + while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) + & BAR_TASK_PENDING) + gomp_barrier_handle_tasks (state); - /* Let them get the updated waiters. */ - asm volatile ("bar.sync 1, %0;" : : "r" (32 * (waiters + 1))); + if (bar->total > 1) + asm volatile ("bar.sync 1, %0;" : : "r" (32 * bar->total)); + } - gomp_mutex_unlock (&bar->lock); + return false; } -/* Copied from linux/wait.h. */ - -static inline int do_spin (int *addr, int val) +bool +gomp_team_barrier_wait_cancel (gomp_barrier_t *bar) { - /* The current implementation doesn't spin. */ - return 1; + return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar)); } -/* Copied from linux/wait.h. */ - -static inline void do_wait (int *addr, int val) +void +gomp_team_barrier_cancel (struct gomp_team *team) { - if (do_spin (addr, val)) - futex_wait (addr, val); -} + gomp_mutex_lock (&team->task_lock); + if (team->barrier.generation & BAR_CANCELLED) + { + gomp_mutex_unlock (&team->task_lock); + return; + } + team->barrier.generation |= BAR_CANCELLED; + gomp_mutex_unlock (&team->task_lock); -/* Reuse the linux implementation. */ -#define GOMP_WAIT_H 1 -#include "../linux/bar.c" + /* The 'exit' instruction cancels this thread and also fullfills any other + CTA threads waiting on barriers. */ + asm volatile ("exit;"); +} diff --git a/libgomp/config/nvptx/bar.h b/libgomp/config/nvptx/bar.h index 28bf7f4..ddda33e 100644 --- a/libgomp/config/nvptx/bar.h +++ b/libgomp/config/nvptx/bar.h @@ -83,10 +83,16 @@ extern void gomp_team_barrier_wait_end (gomp_barrier_t *, extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *); extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *, gomp_barrier_state_t); -extern void gomp_team_barrier_wake (gomp_barrier_t *, int); struct gomp_team; extern void gomp_team_barrier_cancel (struct gomp_team *); +static inline void +gomp_team_barrier_wake (gomp_barrier_t *bar, int count) +{ + /* We never "wake up" threads on nvptx. Threads wait at barrier + instructions till barrier fullfilled. Do nothing here. */ +} + static inline gomp_barrier_state_t gomp_barrier_wait_start (gomp_barrier_t *bar) {