Use conservative non-iterative strategy for caches 2015-07-09 Tom de Vries PR libgomp/66714 * hash-table.h (gt_cleare_cache): Don't recurse cache entries, just mark. * trans-mem.c (struct tm_wrapper_hasher::ggc_mx (tree_map *&m)): New function. * tree.c (struct tree_vec_map_cache_hasher::ggc_mx (tree_vec_map *&m): Same. * tree.h (struct tree_decl_map_cache_hasher::ggc_mx (tree_decl_map *&m)): Same. * ubsan.c (struct tree_type_map_cache_hasher::ggc_mx (tree_type_map *&m)): Same. * varasm.c (struct tm_clone_hasher::ggc_mx (tree_map *&m)): Same. * testsuite/libgomp.c/pr66714.c: New test. --- gcc/hash-table.h | 28 +++++++++++++++++++++++++++- gcc/trans-mem.c | 4 ++++ gcc/tree.c | 7 +++++++ gcc/tree.h | 6 ++++++ gcc/ubsan.c | 4 ++++ gcc/varasm.c | 4 ++++ libgomp/testsuite/libgomp.c/pr66714.c | 17 +++++++++++++++++ 7 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 libgomp/testsuite/libgomp.c/pr66714.c diff --git a/gcc/hash-table.h b/gcc/hash-table.h index 12e0c96..ce899bd 100644 --- a/gcc/hash-table.h +++ b/gcc/hash-table.h @@ -1046,6 +1046,32 @@ gt_cleare_cache (hash_table *h) if (!h) return; + /* Say we have a cache entry E with key 'from' and non-key 'to'. + + The marking of non-key 'to' should be done in ggc_mx () during the marking + phase. We mark the non-key 'to' conservatively, that is, regardless of + whether the key 'from' is live or not. + + The marking of key 'from' has already been done or not during the marking + phase, and determines whether we keep entry E live during the clear-cache + phase. + If key 'from' is alive, we mark entry E as such. + If key 'from' is not alive: + - we remove the entry E from the cache, and + - entry E will be ggc-freed, and + - key 'from' will be ggc-freed. + - non-key 'to' will not be freed, since we conservatively marked it. But + next ggc invocation, entry E will be gone and no longer cause 'to' to be + marked. So 'to' may be gcc-freed the next ggc invocation. + + Background: A more optimal marking strategy would be to mark the non-key + 'to' only if key 'from' is live. But in order to get to the transitive + closure of that marking, we'd need an iterate-till-fixed-point loop around + the traversing of all cache tables and their entries. + So instead we mark conservatively. The drawback of this strategy + is that cache cycles are not freed. Also, it can take several ggc + invocations for the effects to fully propagate. */ + for (typename table::iterator iter = h->begin (); iter != h->end (); ++iter) if (!table::is_empty (*iter) && !table::is_deleted (*iter)) { @@ -1053,7 +1079,7 @@ gt_cleare_cache (hash_table *h) if (res == 0) h->clear_slot (&*iter); else if (res != -1) - gt_ggc_mx (*iter); + ggc_set_mark (*iter); } } diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c index c809a2e..748a2b6 100644 --- a/gcc/trans-mem.c +++ b/gcc/trans-mem.c @@ -478,6 +478,10 @@ struct tm_wrapper_hasher : ggc_cache_ptr_hash return a->base.from == b->base.from; } + static void ggc_mx (tree_map *&m) { + gt_ggc_mx (m->to); + } + static int keep_cache_entry (tree_map *&m) { diff --git a/gcc/tree.c b/gcc/tree.c index 6628a38..16afae5 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -261,6 +261,13 @@ struct tree_vec_map_cache_hasher : ggc_cache_ptr_hash return a->base.from == b->base.from; } + static void ggc_mx (tree_vec_map *&m) { + /* gt_ggc_mx (vec *v) from vec.h does not do + ggc_test_and_set_mark, so do it here. */ + if (ggc_test_and_set_mark (m->to)) + gt_ggc_mx (m->to); + } + static int keep_cache_entry (tree_vec_map *&m) { diff --git a/gcc/tree.h b/gcc/tree.h index 250f99d..ae48a80 100644 --- a/gcc/tree.h +++ b/gcc/tree.h @@ -4626,6 +4626,8 @@ extern unsigned int tree_map_hash (const void *); extern unsigned int tree_decl_map_hash (const void *); #define tree_decl_map_marked_p tree_map_base_marked_p +extern void gt_ggc_mx (tree&); + struct tree_decl_map_cache_hasher : ggc_cache_ptr_hash { static hashval_t hash (tree_decl_map *m) { return tree_decl_map_hash (m); } @@ -4635,6 +4637,10 @@ struct tree_decl_map_cache_hasher : ggc_cache_ptr_hash return tree_decl_map_eq (a, b); } + static void ggc_mx (tree_decl_map *&m) { + gt_ggc_mx (m->to); + } + static int keep_cache_entry (tree_decl_map *&m) { diff --git a/gcc/ubsan.c b/gcc/ubsan.c index 19eafab..6ea5be8 100644 --- a/gcc/ubsan.c +++ b/gcc/ubsan.c @@ -95,6 +95,10 @@ struct tree_type_map_cache_hasher : ggc_cache_ptr_hash return a->type.from == b->type.from; } + static void ggc_mx (tree_type_map *&m) { + gt_ggc_mx (m->decl); + } + static int keep_cache_entry (tree_type_map *&m) { diff --git a/gcc/varasm.c b/gcc/varasm.c index 3e76032..ff20941 100644 --- a/gcc/varasm.c +++ b/gcc/varasm.c @@ -5793,6 +5793,10 @@ struct tm_clone_hasher : ggc_cache_ptr_hash static hashval_t hash (tree_map *m) { return tree_map_hash (m); } static bool equal (tree_map *a, tree_map *b) { return tree_map_eq (a, b); } + static void ggc_mx (tree_map *&m) { + gt_ggc_mx (m->to); + } + static int keep_cache_entry (tree_map *&e) { diff --git a/libgomp/testsuite/libgomp.c/pr66714.c b/libgomp/testsuite/libgomp.c/pr66714.c new file mode 100644 index 0000000..b60c74d --- /dev/null +++ b/libgomp/testsuite/libgomp.c/pr66714.c @@ -0,0 +1,17 @@ +/* { dg-do "compile" } */ +/* { dg-additional-options "--param ggc-min-expand=0" } */ +/* { dg-additional-options "--param ggc-min-heapsize=0" } */ +/* { dg-additional-options "-g" } */ + +/* Minimized from on target-2.c. */ + +void +fn3 (int x) +{ + double b[3 * x]; + int i; + #pragma omp target + #pragma omp parallel for + for (i = 0; i < x; i++) + b[i] += 1; +} -- 1.9.1