public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH 06/35] Change use to type-based pool allocator in ira-color.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
  2015-05-27 14:00 ` [PATCH 12/35] Change use to type-based pool allocator in cselib.c mliska
  2015-05-27 14:00 ` [PATCH 10/35] Change use to type-based pool allocator in cfg.c mliska
@ 2015-05-27 14:00 ` mliska
  2015-05-27 14:00 ` [PATCH 09/35] Change use to type-based pool allocator in c-format.c mliska
                   ` (31 subsequent siblings)
  34 siblings, 0 replies; 108+ messages in thread
From: mliska @ 2015-05-27 14:00 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-color.c (init_update_cost_records):Use new type-based pool allocator.
	(get_update_cost_record) Likewise.
	(free_update_cost_record_list) Likewise.
	(finish_update_cost_records) Likewise.
	(initiate_cost_update) Likewise.
---
 gcc/ira-color.c | 35 +++++++++++++++--------------------
 1 file changed, 15 insertions(+), 20 deletions(-)

diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index b719e7a..4750714 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -123,21 +123,6 @@ struct update_cost_record
   int divisor;
   /* Next record for given allocno.  */
   struct update_cost_record *next;
-
-  /* Pool allocation new operator.  */
-  inline void *operator new (size_t)
-  {
-    return pool.allocate ();
-  }
-
-  /* Delete operator utilizing pool allocation.  */
-  inline void operator delete (void *ptr)
-  {
-    pool.remove((update_cost_record *) ptr);
-  }
-
-  /* Memory allocation pool.  */
-  static pool_allocator<update_cost_record> pool;
 };
 
 /* To decrease footprint of ira_allocno structure we store all data
@@ -1181,16 +1166,25 @@ setup_profitable_hard_regs (void)
    allocnos.  */
 
 /* Pool for update cost records.  */
-pool_allocator<update_cost_record> update_cost_record::pool
-  ("update cost records", 100);
+static alloc_pool update_cost_record_pool;
+
+/* Initiate update cost records.  */
+static void
+init_update_cost_records (void)
+{
+  update_cost_record_pool
+    = create_alloc_pool ("update cost records",
+			 sizeof (struct update_cost_record), 100);
+}
 
 /* Return new update cost record with given params.  */
 static struct update_cost_record *
 get_update_cost_record (int hard_regno, int divisor,
 			struct update_cost_record *next)
 {
-  update_cost_record *record = new update_cost_record;
+  struct update_cost_record *record;
 
+  record = (struct update_cost_record *) pool_alloc (update_cost_record_pool);
   record->hard_regno = hard_regno;
   record->divisor = divisor;
   record->next = next;
@@ -1206,7 +1200,7 @@ free_update_cost_record_list (struct update_cost_record *list)
   while (list != NULL)
     {
       next = list->next;
-      delete list;
+      pool_free (update_cost_record_pool, list);
       list = next;
     }
 }
@@ -1215,7 +1209,7 @@ free_update_cost_record_list (struct update_cost_record *list)
 static void
 finish_update_cost_records (void)
 {
-  update_cost_record::pool.release ();
+  free_alloc_pool (update_cost_record_pool);
 }
 
 /* Array whose element value is TRUE if the corresponding hard
@@ -1270,6 +1264,7 @@ initiate_cost_update (void)
     = (struct update_cost_queue_elem *) ira_allocate (size);
   memset (update_cost_queue_elems, 0, size);
   update_cost_check = 0;
+  init_update_cost_records ();
 }
 
 /* Deallocate data used by function update_costs_from_copies.  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 10/35] Change use to type-based pool allocator in cfg.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
  2015-05-27 14:00 ` [PATCH 12/35] Change use to type-based pool allocator in cselib.c mliska
@ 2015-05-27 14:00 ` mliska
  2015-05-27 18:01   ` Jeff Law
  2015-05-27 14:00 ` [PATCH 06/35] Change use to type-based pool allocator in ira-color.c mliska
                   ` (32 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:00 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* cfg.c (initialize_original_copy_tables):Use new type-based pool allocator.
	(free_original_copy_tables) Likewise.
	(copy_original_table_clear) Likewise.
	(copy_original_table_set) Likewise.
---
 gcc/cfg.c | 17 +++++++----------
 1 file changed, 7 insertions(+), 10 deletions(-)

diff --git a/gcc/cfg.c b/gcc/cfg.c
index cdcc01c..ddfecdc 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -1066,18 +1066,16 @@ static hash_table<bb_copy_hasher> *bb_copy;
 
 /* And between loops and copies.  */
 static hash_table<bb_copy_hasher> *loop_copy;
-static alloc_pool original_copy_bb_pool;
-
+static pool_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
 
 /* Initialize the data structures to maintain mapping between blocks
    and its copies.  */
 void
 initialize_original_copy_tables (void)
 {
-  gcc_assert (!original_copy_bb_pool);
-  original_copy_bb_pool
-    = create_alloc_pool ("original_copy",
-			 sizeof (struct htab_bb_copy_original_entry), 10);
+
+  original_copy_bb_pool = new pool_allocator<htab_bb_copy_original_entry>
+    ("original_copy", 10);
   bb_original = new hash_table<bb_copy_hasher> (10);
   bb_copy = new hash_table<bb_copy_hasher> (10);
   loop_copy = new hash_table<bb_copy_hasher> (10);
@@ -1095,7 +1093,7 @@ free_original_copy_tables (void)
   bb_copy = NULL;
   delete loop_copy;
   loop_copy = NULL;
-  free_alloc_pool (original_copy_bb_pool);
+  delete original_copy_bb_pool;
   original_copy_bb_pool = NULL;
 }
 
@@ -1117,7 +1115,7 @@ copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
 
   elt = *slot;
   tab->clear_slot (slot);
-  pool_free (original_copy_bb_pool, elt);
+  original_copy_bb_pool->remove (elt);
 }
 
 /* Sets the value associated with OBJ in table TAB to VAL.
@@ -1137,8 +1135,7 @@ copy_original_table_set (hash_table<bb_copy_hasher> *tab,
   slot = tab->find_slot (&key, INSERT);
   if (!*slot)
     {
-      *slot = (struct htab_bb_copy_original_entry *)
-		pool_alloc (original_copy_bb_pool);
+      *slot = original_copy_bb_pool->allocate ();
       (*slot)->index1 = obj;
     }
   (*slot)->index2 = val;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 04/35] Change use to type-based pool allocator in lra.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (4 preceding siblings ...)
  2015-05-27 14:00 ` [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c mliska
@ 2015-05-27 14:00 ` mliska
  2015-05-27 17:55   ` Jeff Law
  2015-05-27 14:07 ` [PATCH 19/35] Change use to type-based pool allocator in sel-sched-ir.c mliska
                   ` (28 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:00 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* lra.c (init_insn_regs): Use new type-based pool allocator.
	(new_insn_reg) Likewise.
	(free_insn_reg) Likewise.
	(free_insn_regs) Likewise.
	(finish_insn_regs) Likewise.
	(init_insn_recog_data) Likewise.
	(init_reg_info) Likewise.
	(finish_reg_info) Likewise.
	(lra_free_copies) Likewise.
	(lra_create_copy) Likewise.
	(invalidate_insn_data_regno_info) Likewise.
---
 gcc/lra-int.h | 31 +++++++++++++++++++++++++++++++
 gcc/lra.c     | 40 ++++++++++------------------------------
 2 files changed, 41 insertions(+), 30 deletions(-)

diff --git a/gcc/lra-int.h b/gcc/lra-int.h
index 4bdd2c6..ef137e0 100644
--- a/gcc/lra-int.h
+++ b/gcc/lra-int.h
@@ -84,6 +84,22 @@ struct lra_copy
   int regno1, regno2;
   /* Next copy with correspondingly REGNO1 and REGNO2.	*/
   lra_copy_t regno1_next, regno2_next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((lra_copy *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<lra_copy> pool;
+
 };
 
 /* Common info about a register (pseudo or hard register).  */
@@ -191,6 +207,21 @@ struct lra_insn_reg
   int regno;
   /* Next reg info of the same insn.  */
   struct lra_insn_reg *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((lra_insn_reg *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<lra_insn_reg> pool;
 };
 
 /* Static part (common info for insns with the same ICODE) of LRA
diff --git a/gcc/lra.c b/gcc/lra.c
index 7440668..456f618 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -550,15 +550,7 @@ lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
    insns.  */
 
 /* Pools for insn reg info.  */
-static alloc_pool insn_reg_pool;
-
-/* Initiate pool for insn reg info.  */
-static void
-init_insn_regs (void)
-{
-  insn_reg_pool
-    = create_alloc_pool ("insn regs", sizeof (struct lra_insn_reg), 100);
-}
+pool_allocator<lra_insn_reg> lra_insn_reg::pool ("insn regs", 100);
 
 /* Create LRA insn related info about a reference to REGNO in INSN with
    TYPE (in/out/inout), biggest reference mode MODE, flag that it is
@@ -570,9 +562,7 @@ new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
 	      machine_mode mode,
 	      bool subreg_p, bool early_clobber, struct lra_insn_reg *next)
 {
-  struct lra_insn_reg *ir;
-
-  ir = (struct lra_insn_reg *) pool_alloc (insn_reg_pool);
+  lra_insn_reg *ir = new lra_insn_reg ();
   ir->type = type;
   ir->biggest_mode = mode;
   if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (lra_reg_info[regno].biggest_mode)
@@ -585,13 +575,6 @@ new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
   return ir;
 }
 
-/* Free insn reg info IR.  */
-static void
-free_insn_reg (struct lra_insn_reg *ir)
-{
-  pool_free (insn_reg_pool, ir);
-}
-
 /* Free insn reg info list IR.	*/
 static void
 free_insn_regs (struct lra_insn_reg *ir)
@@ -601,7 +584,7 @@ free_insn_regs (struct lra_insn_reg *ir)
   for (; ir != NULL; ir = next_ir)
     {
       next_ir = ir->next;
-      free_insn_reg (ir);
+      delete ir;
     }
 }
 
@@ -609,7 +592,7 @@ free_insn_regs (struct lra_insn_reg *ir)
 static void
 finish_insn_regs (void)
 {
-  free_alloc_pool (insn_reg_pool);
+  lra_insn_reg::pool.release ();
 }
 
 \f
@@ -737,7 +720,6 @@ init_insn_recog_data (void)
 {
   lra_insn_recog_data_len = 0;
   lra_insn_recog_data = NULL;
-  init_insn_regs ();
 }
 
 /* Expand, if necessary, LRA data about insns.	*/
@@ -791,6 +773,8 @@ finish_insn_recog_data (void)
     if ((data = lra_insn_recog_data[i]) != NULL)
       free_insn_recog_data (data);
   finish_insn_regs ();
+  lra_copy::pool.release ();
+  lra_insn_reg::pool.release ();
   free (lra_insn_recog_data);
 }
 
@@ -1310,7 +1294,7 @@ get_new_reg_value (void)
 }
 
 /* Pools for copies.  */
-static alloc_pool copy_pool;
+pool_allocator<lra_copy> lra_copy::pool ("lra copies", 100);
 
 /* Vec referring to pseudo copies.  */
 static vec<lra_copy_t> copy_vec;
@@ -1350,8 +1334,6 @@ init_reg_info (void)
   lra_reg_info = XNEWVEC (struct lra_reg, reg_info_size);
   for (i = 0; i < reg_info_size; i++)
     initialize_lra_reg_info_element (i);
-  copy_pool
-    = create_alloc_pool ("lra copies", sizeof (struct lra_copy), 100);
   copy_vec.create (100);
 }
 
@@ -1366,8 +1348,6 @@ finish_reg_info (void)
     bitmap_clear (&lra_reg_info[i].insn_bitmap);
   free (lra_reg_info);
   reg_info_size = 0;
-  free_alloc_pool (copy_pool);
-  copy_vec.release ();
 }
 
 /* Expand common reg info if it is necessary.  */
@@ -1394,7 +1374,7 @@ lra_free_copies (void)
     {
       cp = copy_vec.pop ();
       lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULL;
-      pool_free (copy_pool, cp);
+      delete cp;
     }
 }
 
@@ -1416,7 +1396,7 @@ lra_create_copy (int regno1, int regno2, int freq)
       regno2 = regno1;
       regno1 = temp;
     }
-  cp = (lra_copy_t) pool_alloc (copy_pool);
+  cp = new lra_copy ();
   copy_vec.safe_push (cp);
   cp->regno1_dest_p = regno1_dest_p;
   cp->freq = freq;
@@ -1585,7 +1565,7 @@ invalidate_insn_data_regno_info (lra_insn_recog_data_t data, rtx_insn *insn,
     {
       i = ir->regno;
       next_ir = ir->next;
-      free_insn_reg (ir);
+      delete ir;
       bitmap_clear_bit (&lra_reg_info[i].insn_bitmap, uid);
       if (i >= FIRST_PSEUDO_REGISTER && ! debug_p)
 	{
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (3 preceding siblings ...)
  2015-05-27 14:00 ` [PATCH 09/35] Change use to type-based pool allocator in c-format.c mliska
@ 2015-05-27 14:00 ` mliska
  2015-05-27 17:53   ` Jeff Law
  2015-05-28  0:48   ` Trevor Saunders
  2015-05-27 14:00 ` [PATCH 04/35] Change use to type-based pool allocator in lra.c mliska
                   ` (29 subsequent siblings)
  34 siblings, 2 replies; 108+ messages in thread
From: mliska @ 2015-05-27 14:00 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* lra-lives.c (free_live_range): Use new type-based pool allocator.
	(free_live_range_list) Likewise.
	(create_live_range) Likewise.
	(copy_live_range) Likewise.
	(lra_merge_live_ranges) Likewise.
	(remove_some_program_points_and_update_live_ranges) Likewise.
	(lra_live_ranges_init) Likewise.
	(lra_live_ranges_finish) Likewise.
---
 gcc/lra-coalesce.c |  1 +
 gcc/lra-int.h      | 15 +++++++++++++++
 gcc/lra-lives.c    | 27 +++++++--------------------
 gcc/lra-spills.c   |  1 +
 gcc/lra.c          |  1 +
 5 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/gcc/lra-coalesce.c b/gcc/lra-coalesce.c
index 045691d..b385603 100644
--- a/gcc/lra-coalesce.c
+++ b/gcc/lra-coalesce.c
@@ -84,6 +84,7 @@ along with GCC; see the file COPYING3.	If not see
 #include "except.h"
 #include "timevar.h"
 #include "ira.h"
+#include "alloc-pool.h"
 #include "lra-int.h"
 #include "df.h"
 
diff --git a/gcc/lra-int.h b/gcc/lra-int.h
index 12923ee..4bdd2c6 100644
--- a/gcc/lra-int.h
+++ b/gcc/lra-int.h
@@ -54,6 +54,21 @@ struct lra_live_range
   lra_live_range_t next;
   /* Pointer to structures with the same start.	 */
   lra_live_range_t start_next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((lra_live_range *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<lra_live_range> pool;
 };
 
 typedef struct lra_copy *lra_copy_t;
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index 085411e..9b5f74e 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -121,14 +121,7 @@ static sparseset unused_set, dead_set;
 static bitmap_head temp_bitmap;
 
 /* Pool for pseudo live ranges.	 */
-static alloc_pool live_range_pool;
-
-/* Free live range LR.	*/
-static void
-free_live_range (lra_live_range_t lr)
-{
-  pool_free (live_range_pool, lr);
-}
+pool_allocator <lra_live_range> lra_live_range::pool ("live ranges", 100);
 
 /* Free live range list LR.  */
 static void
@@ -139,7 +132,7 @@ free_live_range_list (lra_live_range_t lr)
   while (lr != NULL)
     {
       next = lr->next;
-      free_live_range (lr);
+      delete lr;
       lr = next;
     }
 }
@@ -148,9 +141,7 @@ free_live_range_list (lra_live_range_t lr)
 static lra_live_range_t
 create_live_range (int regno, int start, int finish, lra_live_range_t next)
 {
-  lra_live_range_t p;
-
-  p = (lra_live_range_t) pool_alloc (live_range_pool);
+  lra_live_range_t p = new lra_live_range; 
   p->regno = regno;
   p->start = start;
   p->finish = finish;
@@ -162,9 +153,7 @@ create_live_range (int regno, int start, int finish, lra_live_range_t next)
 static lra_live_range_t
 copy_live_range (lra_live_range_t r)
 {
-  lra_live_range_t p;
-
-  p = (lra_live_range_t) pool_alloc (live_range_pool);
+  lra_live_range_t p = new lra_live_range;
   *p = *r;
   return p;
 }
@@ -209,7 +198,7 @@ lra_merge_live_ranges (lra_live_range_t r1, lra_live_range_t r2)
 	  r1->start = r2->start;
 	  lra_live_range_t temp = r2;
 	  r2 = r2->next;
-	  pool_free (live_range_pool, temp);
+	  delete temp; 
 	}
       else
 	{
@@ -1109,7 +1098,7 @@ remove_some_program_points_and_update_live_ranges (void)
 		}
 	      prev_r->start = r->start;
 	      prev_r->next = next_r;
-	      free_live_range (r);
+	      delete r;
 	    }
 	}
     }
@@ -1380,8 +1369,6 @@ lra_clear_live_ranges (void)
 void
 lra_live_ranges_init (void)
 {
-  live_range_pool = create_alloc_pool ("live ranges",
-				       sizeof (struct lra_live_range), 100);
   bitmap_initialize (&temp_bitmap, &reg_obstack);
   initiate_live_solver ();
 }
@@ -1392,5 +1379,5 @@ lra_live_ranges_finish (void)
 {
   finish_live_solver ();
   bitmap_clear (&temp_bitmap);
-  free_alloc_pool (live_range_pool);
+  lra_live_range::pool.release ();
 }
diff --git a/gcc/lra-spills.c b/gcc/lra-spills.c
index 19ece20..caece9a 100644
--- a/gcc/lra-spills.c
+++ b/gcc/lra-spills.c
@@ -98,6 +98,7 @@ along with GCC; see the file COPYING3.	If not see
 #include "except.h"
 #include "timevar.h"
 #include "target.h"
+#include "alloc-pool.h"
 #include "lra-int.h"
 #include "ira.h"
 #include "df.h"
diff --git a/gcc/lra.c b/gcc/lra.c
index 7c33636..7440668 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -149,6 +149,7 @@ along with GCC; see the file COPYING3.	If not see
 #include "timevar.h"
 #include "target.h"
 #include "ira.h"
+#include "alloc-pool.h"
 #include "lra-int.h"
 #include "df.h"
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 09/35] Change use to type-based pool allocator in c-format.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (2 preceding siblings ...)
  2015-05-27 14:00 ` [PATCH 06/35] Change use to type-based pool allocator in ira-color.c mliska
@ 2015-05-27 14:00 ` mliska
  2015-05-27 14:16   ` Jakub Jelinek
  2015-05-27 18:01   ` Jeff Law
  2015-05-27 14:00 ` [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c mliska
                   ` (30 subsequent siblings)
  34 siblings, 2 replies; 108+ messages in thread
From: mliska @ 2015-05-27 14:00 UTC (permalink / raw)
  To: gcc-patches

gcc/c-family/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* c-format.c (check_format_arg):Use new type-based pool allocator.
	(check_format_info_main) Likewise.
---
 gcc/c-family/c-format.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c
index 145bbfd..7b9bf38 100644
--- a/gcc/c-family/c-format.c
+++ b/gcc/c-family/c-format.c
@@ -1031,7 +1031,8 @@ static void check_format_arg (void *, tree, unsigned HOST_WIDE_INT);
 static void check_format_info_main (format_check_results *,
 				    function_format_info *,
 				    const char *, int, tree,
-                                    unsigned HOST_WIDE_INT, alloc_pool);
+                                    unsigned HOST_WIDE_INT,
+				    pool_allocator<format_wanted_type> &);
 
 static void init_dollar_format_checking (int, tree);
 static int maybe_read_dollar_number (const char **, int,
@@ -1518,7 +1519,6 @@ check_format_arg (void *ctx, tree format_tree,
   const char *format_chars;
   tree array_size = 0;
   tree array_init;
-  alloc_pool fwt_pool;
 
   if (TREE_CODE (format_tree) == VAR_DECL)
     {
@@ -1694,11 +1694,9 @@ check_format_arg (void *ctx, tree format_tree,
      will decrement it if it finds there are extra arguments, but this way
      need not adjust it for every return.  */
   res->number_other++;
-  fwt_pool = create_alloc_pool ("format_wanted_type pool",
-                                sizeof (format_wanted_type), 10);
+  pool_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool", 10);
   check_format_info_main (res, info, format_chars, format_length,
                           params, arg_num, fwt_pool);
-  free_alloc_pool (fwt_pool);
 }
 
 
@@ -1713,7 +1711,8 @@ static void
 check_format_info_main (format_check_results *res,
 			function_format_info *info, const char *format_chars,
 			int format_length, tree params,
-                        unsigned HOST_WIDE_INT arg_num, alloc_pool fwt_pool)
+                        unsigned HOST_WIDE_INT arg_num,
+			pool_allocator<format_wanted_type> &fwt_pool)
 {
   const char *orig_format_chars = format_chars;
   tree first_fillin_param = params;
@@ -2424,8 +2423,7 @@ check_format_info_main (format_check_results *res,
 	      fci = fci->chain;
 	      if (fci)
 		{
-                  wanted_type_ptr = (format_wanted_type *)
-                      pool_alloc (fwt_pool);
+                  wanted_type_ptr = fwt_pool.allocate ();
 		  arg_num++;
 		  wanted_type = *fci->types[length_chars_val].type;
 		  wanted_type_name = fci->types[length_chars_val].name;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 12/35] Change use to type-based pool allocator in cselib.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
@ 2015-05-27 14:00 ` mliska
  2015-05-29 13:38   ` Martin Liška
  2015-05-27 14:00 ` [PATCH 10/35] Change use to type-based pool allocator in cfg.c mliska
                   ` (33 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:00 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* cselib.c (new_elt_list):Use new type-based pool allocator.
	(new_elt_loc_list) Likewise.
	(unchain_one_elt_list) Likewise.
	(unchain_one_elt_loc_list) Likewise.
	(unchain_one_value) Likewise.
	(new_cselib_val) Likewise.
	(cselib_init) Likewise.
	(cselib_finish) Likewise.
---
 gcc/alias.c          |  1 +
 gcc/cfgcleanup.c     |  1 +
 gcc/cprop.c          |  1 +
 gcc/cselib.c         | 63 ++++++++++++++++++++++++++++++++--------------------
 gcc/cselib.h         | 33 ++++++++++++++++++++++++++-
 gcc/gcse.c           |  1 +
 gcc/postreload.c     |  1 +
 gcc/print-rtl.c      |  1 +
 gcc/sel-sched-dump.c |  1 +
 9 files changed, 78 insertions(+), 25 deletions(-)

diff --git a/gcc/alias.c b/gcc/alias.c
index aa7dc21..bc8e2b4 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -53,6 +53,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "tm_p.h"
 #include "regs.h"
 #include "diagnostic-core.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "hash-map.h"
 #include "langhooks.h"
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index aff64ef..fc2ed31 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -50,6 +50,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "flags.h"
 #include "recog.h"
 #include "diagnostic-core.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "params.h"
 #include "tm_p.h"
diff --git a/gcc/cprop.c b/gcc/cprop.c
index 57c44ef..41ca201 100644
--- a/gcc/cprop.c
+++ b/gcc/cprop.c
@@ -63,6 +63,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "expr.h"
 #include "except.h"
 #include "params.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "intl.h"
 #include "obstack.h"
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 7a50f50..8de85bc 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "ggc.h"
 #include "hash-table.h"
 #include "dumpfile.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "predict.h"
 #include "basic-block.h"
@@ -56,9 +57,25 @@ along with GCC; see the file COPYING3.  If not see
 #include "bitmap.h"
 
 /* A list of cselib_val structures.  */
-struct elt_list {
-    struct elt_list *next;
-    cselib_val *elt;
+struct elt_list
+{
+  struct elt_list *next;
+  cselib_val *elt;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((elt_list *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<elt_list> pool;
 };
 
 static bool cselib_record_memory;
@@ -260,7 +277,13 @@ static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
    May or may not contain the useless values - the list is compacted
    each time memory is invalidated.  */
 static cselib_val *first_containing_mem = &dummy_val;
-static alloc_pool elt_loc_list_pool, elt_list_pool, cselib_val_pool, value_pool;
+
+pool_allocator<elt_list> elt_list::pool ("elt_list", 10);
+pool_allocator<elt_loc_list> elt_loc_list::pool ("elt_loc_list", 10);
+pool_allocator<cselib_val> cselib_val::pool ("cselib_val_list", 10);
+
+static pool_allocator<rtx_def> value_pool ("value", 100, RTX_CODE_SIZE (VALUE),
+					   true);
 
 /* If nonnull, cselib will call this function before freeing useless
    VALUEs.  A VALUE is deemed useless if its "locs" field is null.  */
@@ -288,8 +311,7 @@ void (*cselib_record_sets_hook) (rtx_insn *insn, struct cselib_set *sets,
 static inline struct elt_list *
 new_elt_list (struct elt_list *next, cselib_val *elt)
 {
-  struct elt_list *el;
-  el = (struct elt_list *) pool_alloc (elt_list_pool);
+  elt_list *el = new elt_list ();
   el->next = next;
   el->elt = elt;
   return el;
@@ -373,14 +395,14 @@ new_elt_loc_list (cselib_val *val, rtx loc)
 	}
 
       /* Chain LOC back to VAL.  */
-      el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
+      el = new elt_loc_list;
       el->loc = val->val_rtx;
       el->setting_insn = cselib_current_insn;
       el->next = NULL;
       CSELIB_VAL_PTR (loc)->locs = el;
     }
 
-  el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
+  el = new elt_loc_list;
   el->loc = loc;
   el->setting_insn = cselib_current_insn;
   el->next = next;
@@ -420,7 +442,7 @@ unchain_one_elt_list (struct elt_list **pl)
   struct elt_list *l = *pl;
 
   *pl = l->next;
-  pool_free (elt_list_pool, l);
+  delete l;
 }
 
 /* Likewise for elt_loc_lists.  */
@@ -431,7 +453,7 @@ unchain_one_elt_loc_list (struct elt_loc_list **pl)
   struct elt_loc_list *l = *pl;
 
   *pl = l->next;
-  pool_free (elt_loc_list_pool, l);
+  delete l;
 }
 
 /* Likewise for cselib_vals.  This also frees the addr_list associated with
@@ -443,7 +465,7 @@ unchain_one_value (cselib_val *v)
   while (v->addr_list)
     unchain_one_elt_list (&v->addr_list);
 
-  pool_free (cselib_val_pool, v);
+  delete v;
 }
 
 /* Remove all entries from the hash table.  Also used during
@@ -1306,7 +1328,7 @@ cselib_hash_rtx (rtx x, int create, machine_mode memmode)
 static inline cselib_val *
 new_cselib_val (unsigned int hash, machine_mode mode, rtx x)
 {
-  cselib_val *e = (cselib_val *) pool_alloc (cselib_val_pool);
+  cselib_val *e = new cselib_val;
 
   gcc_assert (hash);
   gcc_assert (next_uid);
@@ -1318,7 +1340,7 @@ new_cselib_val (unsigned int hash, machine_mode mode, rtx x)
      precisely when we can have VALUE RTXen (when cselib is active)
      so we don't need to put them in garbage collected memory.
      ??? Why should a VALUE be an RTX in the first place?  */
-  e->val_rtx = (rtx) pool_alloc (value_pool);
+  e->val_rtx = value_pool.allocate ();
   memset (e->val_rtx, 0, RTX_HDR_SIZE);
   PUT_CODE (e->val_rtx, VALUE);
   PUT_MODE (e->val_rtx, mode);
@@ -2729,13 +2751,6 @@ cselib_process_insn (rtx_insn *insn)
 void
 cselib_init (int record_what)
 {
-  elt_list_pool = create_alloc_pool ("elt_list",
-				     sizeof (struct elt_list), 10);
-  elt_loc_list_pool = create_alloc_pool ("elt_loc_list",
-				         sizeof (struct elt_loc_list), 10);
-  cselib_val_pool = create_alloc_pool ("cselib_val_list",
-				       sizeof (cselib_val), 10);
-  value_pool = create_alloc_pool ("value", RTX_CODE_SIZE (VALUE), 100);
   cselib_record_memory = record_what & CSELIB_RECORD_MEMORY;
   cselib_preserve_constants = record_what & CSELIB_PRESERVE_CONSTANTS;
   cselib_any_perm_equivs = false;
@@ -2777,10 +2792,10 @@ cselib_finish (void)
   cselib_any_perm_equivs = false;
   cfa_base_preserved_val = NULL;
   cfa_base_preserved_regno = INVALID_REGNUM;
-  free_alloc_pool (elt_list_pool);
-  free_alloc_pool (elt_loc_list_pool);
-  free_alloc_pool (cselib_val_pool);
-  free_alloc_pool (value_pool);
+  elt_list::pool.release ();
+  elt_loc_list::pool.release ();
+  cselib_val::pool.release ();
+  value_pool.release ();
   cselib_clear_table ();
   delete cselib_hash_table;
   cselib_hash_table = NULL;
diff --git a/gcc/cselib.h b/gcc/cselib.h
index 082bf54..5fe9076 100644
--- a/gcc/cselib.h
+++ b/gcc/cselib.h
@@ -21,7 +21,8 @@ along with GCC; see the file COPYING3.  If not see
 #define GCC_CSELIB_H
 
 /* Describe a value.  */
-struct cselib_val {
+struct cselib_val
+{
   /* The hash value.  */
   unsigned int hash;
 
@@ -40,6 +41,21 @@ struct cselib_val {
   struct elt_list *addr_list;
 
   struct cselib_val *next_containing_mem;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((cselib_val *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<cselib_val> pool;
 };
 
 /* A list of rtl expressions that hold the same value.  */
@@ -50,6 +66,21 @@ struct elt_loc_list {
   rtx loc;
   /* The insn that made the equivalence.  */
   rtx_insn *setting_insn;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((elt_loc_list *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<elt_loc_list> pool;
 };
 
 /* Describe a single set that is part of an insn.  */
diff --git a/gcc/gcse.c b/gcc/gcse.c
index efbe4f4..28476fb 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -180,6 +180,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "except.h"
 #include "ggc.h"
 #include "params.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "intl.h"
 #include "obstack.h"
diff --git a/gcc/postreload.c b/gcc/postreload.c
index 4d3c26f..06c4973 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -63,6 +63,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "basic-block.h"
 #include "reload.h"
 #include "recog.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "diagnostic-core.h"
 #include "except.h"
diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c
index 882f808..5e8838a 100644
--- a/gcc/print-rtl.c
+++ b/gcc/print-rtl.c
@@ -52,6 +52,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "basic-block.h"
 #include "diagnostic.h"
 #include "tree-pretty-print.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "dumpfile.h"	/* for dump_flags */
 #include "dwarf2out.h"
diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
index 6f174a5..943fdd0 100644
--- a/gcc/sel-sched-dump.c
+++ b/gcc/sel-sched-dump.c
@@ -40,6 +40,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "insn-config.h"
 #include "insn-attr.h"
 #include "params.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "target.h"
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 19/35] Change use to type-based pool allocator in sel-sched-ir.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (5 preceding siblings ...)
  2015-05-27 14:00 ` [PATCH 04/35] Change use to type-based pool allocator in lra.c mliska
@ 2015-05-27 14:07 ` mliska
  2015-05-27 18:12   ` Jeff Law
  2015-05-27 14:09 ` [PATCH 02/35] Change use to type-based pool allocator in et-forest.c mliska
                   ` (27 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:07 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* sel-sched-ir.c (alloc_sched_pools): Use new type-based pool allocator.
	(free_sched_pools): Likewise.
	* sel-sched-ir.h (_list_alloc): Likewise.
	(_list_remove): Likewise.
---
 gcc/sel-sched-ir.c | 7 ++-----
 gcc/sel-sched-ir.h | 6 +++---
 2 files changed, 5 insertions(+), 8 deletions(-)

diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 94f6c43..ffaba56 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -70,7 +70,7 @@ vec<sel_region_bb_info_def>
     sel_region_bb_info = vNULL;
 
 /* A pool for allocating all lists.  */
-alloc_pool sched_lists_pool;
+pool_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
 
 /* This contains information about successors for compute_av_set.  */
 struct succs_info current_succs;
@@ -5030,9 +5030,6 @@ alloc_sched_pools (void)
   succs_info_pool.size = succs_size;
   succs_info_pool.top = -1;
   succs_info_pool.max_top = -1;
-
-  sched_lists_pool = create_alloc_pool ("sel-sched-lists",
-                                        sizeof (struct _list_node), 500);
 }
 
 /* Free the pools.  */
@@ -5041,7 +5038,7 @@ free_sched_pools (void)
 {
   int i;
 
-  free_alloc_pool (sched_lists_pool);
+  sched_lists_pool.release ();
   gcc_assert (succs_info_pool.top == -1);
   for (i = 0; i <= succs_info_pool.max_top; i++)
     {
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index 91ce92f..3707a87 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -364,12 +364,12 @@ struct _list_node
 /* _list_t functions.
    All of _*list_* functions are used through accessor macros, thus
    we can't move them in sel-sched-ir.c.  */
-extern alloc_pool sched_lists_pool;
+extern pool_allocator<_list_node> sched_lists_pool;
 
 static inline _list_t
 _list_alloc (void)
 {
-  return (_list_t) pool_alloc (sched_lists_pool);
+  return sched_lists_pool.allocate ();
 }
 
 static inline void
@@ -395,7 +395,7 @@ _list_remove (_list_t *lp)
   _list_t n = *lp;
 
   *lp = _LIST_NEXT (n);
-  pool_free (sched_lists_pool, n);
+  sched_lists_pool.remove (n);
 }
 
 static inline void
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 01/35] Introduce new type-based pool allocator.
@ 2015-05-27 14:09 mliska
  2015-05-27 14:00 ` [PATCH 12/35] Change use to type-based pool allocator in cselib.c mliska
                   ` (34 more replies)
  0 siblings, 35 replies; 108+ messages in thread
From: mliska @ 2015-05-27 14:09 UTC (permalink / raw)
  To: gcc-patches

Hello.

Following patch set attempts to replace old-style pool allocator
to a type-based one. Moreover, as we utilize  classes and structs that are used
just by a pool allocator, these types have overwritten ctors and dtors.
Thus, using the allocator is much easier and we shouldn't cast types
back and forth. Another beneficat can be achieved in future, as we will
be able to call a class constructors to correctly register a location,
where a memory is allocated (-fgather-detailed-mem-stats).

Patch can boostrap on x86_64-linux-gnu and ppc64-linux-gnu and
survives regression tests on x86_64-linux-gnu.

Ready for trunk?
Thanks,
Martin

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* alloc-pool.c (struct alloc_pool_descriptor): Move definition
	to header file.
	* alloc-pool.h (pool_allocator::pool_allocator): New function.
	(pool_allocator::release): Likewise.
	(inline pool_allocator::release_if_empty): Likewise.
	(inline pool_allocator::~pool_allocator): Likewise.
	(pool_allocator::allocate): Likewise.
	(pool_allocator::remove): Likewise.
---
 gcc/alloc-pool.c |  33 +-----
 gcc/alloc-pool.h | 350 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 355 insertions(+), 28 deletions(-)

diff --git a/gcc/alloc-pool.c b/gcc/alloc-pool.c
index 81909d8..0bea7a6 100644
--- a/gcc/alloc-pool.c
+++ b/gcc/alloc-pool.c
@@ -25,6 +25,8 @@ along with GCC; see the file COPYING3.  If not see
 #include "hash-table.h"
 #include "hash-map.h"
 
+ALLOC_POOL_ID_TYPE last_id;
+
 #define align_eight(x) (((x+7) >> 3) << 3)
 
 /* The internal allocation object.  */
@@ -58,36 +60,10 @@ typedef struct allocation_object_def
 #define USER_PTR_FROM_ALLOCATION_OBJECT_PTR(X)				\
    ((void *) (((allocation_object *) (X))->u.data))
 
-#ifdef ENABLE_CHECKING
-/* Last used ID.  */
-static ALLOC_POOL_ID_TYPE last_id;
-#endif
-
-/* Store information about each particular alloc_pool.  Note that this
-   will underestimate the amount the amount of storage used by a small amount:
-   1) The overhead in a pool is not accounted for.
-   2) The unallocated elements in a block are not accounted for.  Note
-   that this can at worst case be one element smaller that the block
-   size for that pool.  */
-struct alloc_pool_descriptor
-{
-  /* Number of pools allocated.  */
-  unsigned long created;
-  /* Gross allocated storage.  */
-  unsigned long allocated;
-  /* Amount of currently active storage. */
-  unsigned long current;
-  /* Peak amount of storage used.  */
-  unsigned long peak;
-  /* Size of element in the pool.  */
-  int elt_size;
-};
-
 /* Hashtable mapping alloc_pool names to descriptors.  */
-static hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
 
-/* For given name, return descriptor, create new if needed.  */
-static struct alloc_pool_descriptor *
+struct alloc_pool_descriptor *
 allocate_pool_descriptor (const char *name)
 {
   if (!alloc_pool_hash)
@@ -96,6 +72,7 @@ allocate_pool_descriptor (const char *name)
   return &alloc_pool_hash->get_or_insert (name);
 }
 
+
 /* Create a pool of things of size SIZE, with NUM in each block we
    allocate.  */
 
diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index 0c30711..8fd664f 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -20,6 +20,8 @@ along with GCC; see the file COPYING3.  If not see
 #ifndef ALLOC_POOL_H
 #define ALLOC_POOL_H
 
+#include "hash-map.h"
+
 typedef unsigned long ALLOC_POOL_ID_TYPE;
 
 typedef struct alloc_pool_list_def
@@ -63,4 +65,352 @@ extern void free_alloc_pool_if_empty (alloc_pool *);
 extern void *pool_alloc (alloc_pool) ATTRIBUTE_MALLOC;
 extern void pool_free (alloc_pool, void *);
 extern void dump_alloc_pool_statistics (void);
+
+typedef unsigned long ALLOC_POOL_ID_TYPE;
+
+/* Type based memory pool allocator.  */
+template <typename T>
+class pool_allocator
+{
+public:
+  /* Default constructor for pool allocator called NAME. Each block
+     has NUM elements. The allocator support EXTRA_SIZE and can
+     potentially IGNORE_TYPE_SIZE.  */
+  pool_allocator (const char *name, size_t num, size_t extra_size = 0,
+		  bool ignore_type_size = false);
+
+  /* Default destuctor.  */
+  ~pool_allocator ();
+
+  /* Release internal data structures.  */
+  void release ();
+
+  /* Release internal data structures if the pool has not allocated
+     an object.  */
+  void release_if_empty ();
+
+  /* Allocate a new object.  */
+  T *allocate () ATTRIBUTE_MALLOC;
+
+  /* Release OBJECT that must come from the pool.  */
+  void remove (T *object);
+
+private:
+  struct allocation_pool_list
+  {
+    allocation_pool_list *next;
+  };
+
+  template <typename U>
+  struct allocation_object
+  {
+#ifdef ENABLE_CHECKING
+    /* The ID of alloc pool which the object was allocated from.  */
+    ALLOC_POOL_ID_TYPE id;
+#endif
+
+    union
+      {
+	/* The data of the object.  */
+	char data[1];
+
+	/* Because we want any type of data to be well aligned after the ID,
+	   the following elements are here.  They are never accessed so
+	   the allocated object may be even smaller than this structure.
+	   We do not care about alignment for floating-point types.  */
+	char *align_p;
+	int64_t align_i;
+      } u;
+
+    static inline allocation_object<U> *get_instance (void *data_ptr)
+    {
+      return (allocation_object<U> *)(((char *)(data_ptr)) - offsetof (allocation_object<U>, u.data));
+    }
+
+    static inline U *get_data (void *instance_ptr)
+    {
+      return (U*)(((allocation_object<U> *) instance_ptr)->u.data);
+    }
+  };
+
+  /* Align X to 8.  */
+  size_t align_eight (size_t x)
+  {
+    return (((x+7) >> 3) << 3);
+  }
+
+  const char *m_name;
+#ifdef ENABLE_CHECKING
+  ALLOC_POOL_ID_TYPE m_id;
+#endif
+  size_t m_elts_per_block;
+
+  /* These are the elements that have been allocated at least once and freed.  */
+  allocation_pool_list *m_returned_free_list;
+
+  /* These are the elements that have not yet been allocated out of
+     the last block obtained from XNEWVEC.  */
+  char* m_virgin_free_list;
+
+  /* The number of elements in the virgin_free_list that can be
+     allocated before needing another block.  */
+  size_t m_virgin_elts_remaining;
+  size_t m_elts_allocated;
+  size_t m_elts_free;
+  size_t m_blocks_allocated;
+  allocation_pool_list *m_block_list;
+  size_t m_block_size;
+  size_t m_elt_size;
+};
+
+#ifdef ENABLE_CHECKING
+/* Last used ID.  */
+extern ALLOC_POOL_ID_TYPE last_id;
+#endif
+
+/* Store information about each particular alloc_pool.  Note that this
+   will underestimate the amount the amount of storage used by a small amount:
+   1) The overhead in a pool is not accounted for.
+   2) The unallocated elements in a block are not accounted for.  Note
+   that this can at worst case be one element smaller that the block
+   size for that pool.  */
+struct alloc_pool_descriptor
+{
+  /* Number of pools allocated.  */
+  unsigned long created;
+  /* Gross allocated storage.  */
+  unsigned long allocated;
+  /* Amount of currently active storage. */
+  unsigned long current;
+  /* Peak amount of storage used.  */
+  unsigned long peak;
+  /* Size of element in the pool.  */
+  int elt_size;
+};
+
+
+/* Hashtable mapping alloc_pool names to descriptors.  */
+extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+
+/* For given name, return descriptor, create new if needed.  */
+alloc_pool_descriptor *
+allocate_pool_descriptor (const char *name);
+
+template <typename T>
+inline
+pool_allocator<T>::pool_allocator (const char *name, size_t num,
+				   size_t extra_size, bool ignore_type_size):
+  m_name (name), m_elts_per_block (num), m_returned_free_list (NULL),
+  m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
+  m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL)
+{
+  size_t header_size;
+  size_t size = (ignore_type_size ? 0 : sizeof (T)) + extra_size;
+
+  gcc_checking_assert (m_name);
+
+  /* Make size large enough to store the list header.  */
+  if (size < sizeof (allocation_pool_list*))
+    size = sizeof (allocation_pool_list*);
+
+  /* Now align the size to a multiple of 4.  */
+  size = align_eight (size);
+
+#ifdef ENABLE_CHECKING
+  /* Add the aligned size of ID.  */
+  size += offsetof (allocation_object<T>, u.data);
+#endif
+
+  /* Um, we can't really allocate 0 elements per block.  */
+  gcc_checking_assert (m_elts_per_block);
+
+  m_elt_size = size;
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->elt_size = size;
+      desc->created++;
+    }
+
+  /* List header size should be a multiple of 8.  */
+  header_size = align_eight (sizeof (allocation_pool_list));
+
+  m_block_size = (size * m_elts_per_block) + header_size;
+
+#ifdef ENABLE_CHECKING
+  /* Increase the last used ID and use it for this pool.
+     ID == 0 is used for free elements of pool so skip it.  */
+  last_id++;
+  if (last_id == 0)
+    last_id++;
+
+  m_id = last_id;
+#endif
+}
+
+/* Free all memory allocated for the given memory pool.  */
+template <typename T>
+inline void
+pool_allocator<T>::release ()
+{
+  allocation_pool_list *block, *next_block;
+
+  /* Free each block allocated to the pool.  */
+  for (block = m_block_list; block != NULL; block = next_block)
+    {
+      next_block = block->next;
+      free (block);
+    }
+
+  if (GATHER_STATISTICS && false)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->current -= (m_elts_allocated - m_elts_free) * m_elt_size;
+    }
+
+  m_returned_free_list = NULL;
+  m_virgin_free_list = NULL;
+  m_virgin_elts_remaining = 0;
+  m_elts_allocated = 0;
+  m_elts_free = 0;
+  m_blocks_allocated = 0;
+  m_block_list = NULL;
+}
+
+template <typename T>
+void
+inline pool_allocator<T>::release_if_empty ()
+{
+  if (m_elts_free == m_elts_allocated)
+    release ();
+}
+
+template <typename T>
+inline pool_allocator<T>::~pool_allocator()
+{
+  release ();
+}
+
+/* Allocates one element from the pool specified.  */
+template <typename T>
+inline T *
+pool_allocator<T>::allocate ()
+{
+  allocation_pool_list *header;
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+  int size;
+#endif
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+
+      desc->allocated += m_elt_size;
+      desc->current += m_elt_size;
+      if (desc->peak < desc->current)
+	desc->peak = desc->current;
+    }
+
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+  size = m_elt_size - offsetof (allocation_object<T>, u.data);
+#endif
+
+  /* If there are no more free elements, make some more!.  */
+  if (!m_returned_free_list)
+    {
+      char *block;
+      if (!m_virgin_elts_remaining)
+	{
+	  allocation_pool_list *block_header;
+
+	  /* Make the block.  */
+	  block = XNEWVEC (char, m_block_size);
+	  block_header = (allocation_pool_list*) block;
+	  block += align_eight (sizeof (allocation_pool_list));
+
+	  /* Throw it on the block list.  */
+	  block_header->next = m_block_list;
+	  m_block_list = block_header;
+
+	  /* Make the block available for allocation.  */
+	  m_virgin_free_list = block;
+	  m_virgin_elts_remaining = m_elts_per_block;
+
+	  /* Also update the number of elements we have free/allocated, and
+	     increment the allocated block count.  */
+	  m_elts_allocated += m_elts_per_block;
+	  m_elts_free += m_elts_per_block;
+	  m_blocks_allocated += 1;
+	}
+
+      /* We now know that we can take the first elt off the virgin list and
+	 put it on the returned list. */
+      block = m_virgin_free_list;
+      header = (allocation_pool_list*) allocation_object<T>::get_data (block);
+      header->next = NULL;
+#ifdef ENABLE_CHECKING
+      /* Mark the element to be free.  */
+      ((allocation_object<T> *) block)->id = 0;
+#endif
+      VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
+      m_returned_free_list = header;
+      m_virgin_free_list += m_elt_size;
+      m_virgin_elts_remaining--;
+
+    }
+
+  /* Pull the first free element from the free list, and return it.  */
+  header = m_returned_free_list;
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
+  m_returned_free_list = header->next;
+  m_elts_free--;
+
+#ifdef ENABLE_CHECKING
+  /* Set the ID for element.  */
+  allocation_object<T>::get_instance (header)->id = m_id;
+#endif
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
+
+  /* Call default constructor.  */
+  return (T *)(header);
+}
+
+/* Puts PTR back on POOL's free list.  */
+template <typename T>
+void
+pool_allocator<T>::remove (T *object)
+{
+  allocation_pool_list *header;
+#if defined(ENABLE_VALGRIND_ANNOTATIONS) || defined(ENABLE_CHECKING)
+  int size;
+  size = m_elt_size - offsetof (allocation_object<T>, u.data);
+#endif
+
+#ifdef ENABLE_CHECKING
+  gcc_assert (object
+	      /* Check if we free more than we allocated, which is Bad (TM).  */
+	      && m_elts_free < m_elts_allocated
+	      /* Check whether the PTR was allocated from POOL.  */
+	      && m_id == allocation_object<T>::get_instance (object)->id);
+
+  memset (object, 0xaf, size);
+
+  /* Mark the element to be free.  */
+  allocation_object<T>::get_instance (object)->id = 0;
+#endif
+
+  header = (allocation_pool_list*) object;
+  header->next = m_returned_free_list;
+  m_returned_free_list = header;
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
+  m_elts_free++;
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->current -= m_elt_size;
+    }
+}
+
 #endif
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 02/35] Change use to type-based pool allocator in et-forest.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (6 preceding siblings ...)
  2015-05-27 14:07 ` [PATCH 19/35] Change use to type-based pool allocator in sel-sched-ir.c mliska
@ 2015-05-27 14:09 ` mliska
  2015-05-27 17:50   ` Jeff Law
  2015-05-27 14:15 ` [PATCH 05/35] Change use to type-based pool allocator in ira-color.c mliska
                   ` (26 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:09 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* et-forest.c (et_new_occ): Use new type-based pool allocator.
	(et_new_tree): Likewise.
	(et_free_tree): Likewise.
	(et_free_tree_force): Likewise.
	(et_free_pools): Likewise.
	(et_split): Likewise.
---
 gcc/dominance.c |  1 +
 gcc/et-forest.c | 48 +++++++++++++++++++++++++++++-------------------
 gcc/et-forest.h | 15 +++++++++++++++
 3 files changed, 45 insertions(+), 19 deletions(-)

diff --git a/gcc/dominance.c b/gcc/dominance.c
index 09c8c90..f3c99ba 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -51,6 +51,7 @@
 #include "cfganal.h"
 #include "basic-block.h"
 #include "diagnostic-core.h"
+#include "alloc-pool.h"
 #include "et-forest.h"
 #include "timevar.h"
 #include "hash-map.h"
diff --git a/gcc/et-forest.c b/gcc/et-forest.c
index da6b7d7..fd451b8 100644
--- a/gcc/et-forest.c
+++ b/gcc/et-forest.c
@@ -25,8 +25,8 @@ License along with libiberty; see the file COPYING3.  If not see
 #include "config.h"
 #include "system.h"
 #include "coretypes.h"
-#include "et-forest.h"
 #include "alloc-pool.h"
+#include "et-forest.h"
 
 /* We do not enable this with ENABLE_CHECKING, since it is awfully slow.  */
 #undef DEBUG_ET
@@ -59,10 +59,26 @@ struct et_occ
 				   on the path to the root.  */
   struct et_occ *min_occ;	/* The occurrence in the subtree with the minimal
 				   depth.  */
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((et_occ *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<et_occ> pool;
+
 };
 
-static alloc_pool et_nodes;
-static alloc_pool et_occurrences;
+pool_allocator<et_node> et_node::pool ("et_nodes pool", 300);
+pool_allocator<et_occ> et_occ::pool ("et_occ pool", 300);
 
 /* Changes depth of OCC to D.  */
 
@@ -449,11 +465,7 @@ et_splay (struct et_occ *occ)
 static struct et_occ *
 et_new_occ (struct et_node *node)
 {
-  struct et_occ *nw;
-
-  if (!et_occurrences)
-    et_occurrences = create_alloc_pool ("et_occ pool", sizeof (struct et_occ), 300);
-  nw = (struct et_occ *) pool_alloc (et_occurrences);
+  et_occ *nw = new et_occ; 
 
   nw->of = node;
   nw->parent = NULL;
@@ -474,9 +486,7 @@ et_new_tree (void *data)
 {
   struct et_node *nw;
 
-  if (!et_nodes)
-    et_nodes = create_alloc_pool ("et_node pool", sizeof (struct et_node), 300);
-  nw = (struct et_node *) pool_alloc (et_nodes);
+  nw = new et_node;
 
   nw->data = data;
   nw->father = NULL;
@@ -501,8 +511,8 @@ et_free_tree (struct et_node *t)
   if (t->father)
     et_split (t);
 
-  pool_free (et_occurrences, t->rightmost_occ);
-  pool_free (et_nodes, t);
+  delete t->rightmost_occ;
+  delete t;
 }
 
 /* Releases et tree T without maintaining other nodes.  */
@@ -510,10 +520,10 @@ et_free_tree (struct et_node *t)
 void
 et_free_tree_force (struct et_node *t)
 {
-  pool_free (et_occurrences, t->rightmost_occ);
+  delete t->rightmost_occ;
   if (t->parent_occ)
-    pool_free (et_occurrences, t->parent_occ);
-  pool_free (et_nodes, t);
+    delete t->parent_occ;
+  delete t;
 }
 
 /* Release the alloc pools, if they are empty.  */
@@ -521,8 +531,8 @@ et_free_tree_force (struct et_node *t)
 void
 et_free_pools (void)
 {
-  free_alloc_pool_if_empty (&et_occurrences);
-  free_alloc_pool_if_empty (&et_nodes);
+  et_occ::pool.release_if_empty ();
+  et_node::pool.release_if_empty ();
 }
 
 /* Sets father of et tree T to FATHER.  */
@@ -614,7 +624,7 @@ et_split (struct et_node *t)
   rmost->depth = 0;
   rmost->min = 0;
 
-  pool_free (et_occurrences, p_occ);
+  delete p_occ; 
 
   /* Update the tree.  */
   if (father->son == t)
diff --git a/gcc/et-forest.h b/gcc/et-forest.h
index b507c64..1b3a16c 100644
--- a/gcc/et-forest.h
+++ b/gcc/et-forest.h
@@ -66,6 +66,21 @@ struct et_node
 
   struct et_occ *rightmost_occ;	/* The rightmost occurrence.  */
   struct et_occ *parent_occ;	/* The occurrence of the parent node.  */
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((et_node *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<et_node> pool;
 };
 
 struct et_node *et_new_tree (void *data);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 05/35] Change use to type-based pool allocator in ira-color.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (7 preceding siblings ...)
  2015-05-27 14:09 ` [PATCH 02/35] Change use to type-based pool allocator in et-forest.c mliska
@ 2015-05-27 14:15 ` mliska
  2015-05-27 17:59   ` Jeff Law
  2015-05-27 14:17 ` [PATCH 35/35] Remove old pool allocator mliska
                   ` (25 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:15 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-color.c (init_update_cost_records):Use new type-based pool allocator.
	(get_update_cost_record) Likewise.
	(free_update_cost_record_list) Likewise.
	(finish_update_cost_records) Likewise.
	(initiate_cost_update) Likewise.
---
 gcc/ira-color.c | 35 ++++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index 4750714..b719e7a 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -123,6 +123,21 @@ struct update_cost_record
   int divisor;
   /* Next record for given allocno.  */
   struct update_cost_record *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((update_cost_record *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<update_cost_record> pool;
 };
 
 /* To decrease footprint of ira_allocno structure we store all data
@@ -1166,25 +1181,16 @@ setup_profitable_hard_regs (void)
    allocnos.  */
 
 /* Pool for update cost records.  */
-static alloc_pool update_cost_record_pool;
-
-/* Initiate update cost records.  */
-static void
-init_update_cost_records (void)
-{
-  update_cost_record_pool
-    = create_alloc_pool ("update cost records",
-			 sizeof (struct update_cost_record), 100);
-}
+pool_allocator<update_cost_record> update_cost_record::pool
+  ("update cost records", 100);
 
 /* Return new update cost record with given params.  */
 static struct update_cost_record *
 get_update_cost_record (int hard_regno, int divisor,
 			struct update_cost_record *next)
 {
-  struct update_cost_record *record;
+  update_cost_record *record = new update_cost_record;
 
-  record = (struct update_cost_record *) pool_alloc (update_cost_record_pool);
   record->hard_regno = hard_regno;
   record->divisor = divisor;
   record->next = next;
@@ -1200,7 +1206,7 @@ free_update_cost_record_list (struct update_cost_record *list)
   while (list != NULL)
     {
       next = list->next;
-      pool_free (update_cost_record_pool, list);
+      delete list;
       list = next;
     }
 }
@@ -1209,7 +1215,7 @@ free_update_cost_record_list (struct update_cost_record *list)
 static void
 finish_update_cost_records (void)
 {
-  free_alloc_pool (update_cost_record_pool);
+  update_cost_record::pool.release ();
 }
 
 /* Array whose element value is TRUE if the corresponding hard
@@ -1264,7 +1270,6 @@ initiate_cost_update (void)
     = (struct update_cost_queue_elem *) ira_allocate (size);
   memset (update_cost_queue_elems, 0, size);
   update_cost_check = 0;
-  init_update_cost_records ();
 }
 
 /* Deallocate data used by function update_costs_from_copies.  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 09/35] Change use to type-based pool allocator in c-format.c.
  2015-05-27 14:00 ` [PATCH 09/35] Change use to type-based pool allocator in c-format.c mliska
@ 2015-05-27 14:16   ` Jakub Jelinek
  2015-05-27 18:01   ` Jeff Law
  1 sibling, 0 replies; 108+ messages in thread
From: Jakub Jelinek @ 2015-05-27 14:16 UTC (permalink / raw)
  To: mliska; +Cc: gcc-patches

On Wed, May 27, 2015 at 03:56:47PM +0200, mliska wrote:
> gcc/c-family/ChangeLog:
> 
> 2015-04-30  Martin Liska  <mliska@suse.cz>
> 
> 	* c-format.c (check_format_arg):Use new type-based pool allocator.
> 	(check_format_info_main) Likewise.

Please watch your ChangeLog entries.  Missing space after :
in many cases, missing : after ) in many cases.
Also, please grep your patches for '^+[ 	]*        ' (8 consecutive spaces)
+ lines in patches really should use tabs.

	Jakub

^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 32/35] Change use to type-based pool allocator in ira-build.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (9 preceding siblings ...)
  2015-05-27 14:17 ` [PATCH 35/35] Remove old pool allocator mliska
@ 2015-05-27 14:17 ` mliska
  2015-05-27 19:34   ` Jeff Law
  2015-05-27 14:17 ` [PATCH 34/35] " mliska
                   ` (23 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:17 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-build.c (finish_allocnos): Use new type-based pool allocator.
	(finish_prefs): Likewise.
	(finish_copies): Likewise.
---
 gcc/ira-build.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 2de7d34..ec718e1 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -1175,9 +1175,9 @@ finish_allocnos (void)
   ira_free (ira_regno_allocno_map);
   ira_object_id_map_vec.release ();
   allocno_vec.release ();
-  free_alloc_pool (allocno_pool);
-  free_alloc_pool (object_pool);
-  free_alloc_pool (live_range_pool);
+  delete allocno_pool;
+  delete object_pool;
+  delete live_range_pool;
 }
 
 \f
@@ -1366,7 +1366,7 @@ finish_prefs (void)
   FOR_EACH_PREF (pref, pi)
     finish_pref (pref);
   pref_vec.release ();
-  free_alloc_pool (pref_pool);
+  delete pref_pool;
 }
 
 \f
@@ -1627,7 +1627,7 @@ finish_copies (void)
   FOR_EACH_COPY (cp, ci)
     finish_copy (cp);
   copy_vec.release ();
-  free_alloc_pool (copy_pool);
+  delete copy_pool;
 }
 
 \f
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 34/35] Change use to type-based pool allocator in ira-build.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (10 preceding siblings ...)
  2015-05-27 14:17 ` [PATCH 32/35] Change use to type-based pool allocator in ira-build.c mliska
@ 2015-05-27 14:17 ` mliska
  2015-05-27 14:17 ` [PATCH 21/35] Change use to type-based pool allocator in regcprop.c mliska
                   ` (22 subsequent siblings)
  34 siblings, 0 replies; 108+ messages in thread
From: mliska @ 2015-05-27 14:17 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-build.c (initiate_allocnos): Use new type-based pool allocator.
	(ira_create_object): Likewise.
	(ira_create_allocno): Likewise.
	(ira_create_live_range): Likewise.
	(copy_live_range): Likewise.
	(ira_finish_live_range): Likewise.
	(ira_free_allocno_costs): Likewise.
	(finish_allocno): Likewise.
	(finish_allocnos): Likewise.
	(initiate_prefs): Likewise.
	(ira_create_pref): Likewise.
	(finish_pref): Likewise.
	(finish_prefs): Likewise.
	(initiate_copies): Likewise.
	(ira_create_copy): Likewise.

	(finish_copy): Likewise.
	(finish_copies): Likewise.
---
 gcc/ira-build.c | 51 +++++++++++++++++++++------------------------------
 1 file changed, 21 insertions(+), 30 deletions(-)

diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index ec718e1..534d0bc 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -428,7 +428,9 @@ rebuild_regno_allocno_maps (void)
 \f
 
 /* Pools for allocnos, allocno live ranges and objects.  */
-static alloc_pool allocno_pool, live_range_pool, object_pool;
+static pool_allocator<live_range> live_range_pool ("live ranges", 100);
+static pool_allocator<ira_allocno> allocno_pool ("allocnos", 100);
+static pool_allocator<ira_object> object_pool ("objects", 100);
 
 /* Vec containing references to all created allocnos.  It is a
    container of array allocnos.  */
@@ -442,13 +444,6 @@ static vec<ira_object_t> ira_object_id_map_vec;
 static void
 initiate_allocnos (void)
 {
-  live_range_pool
-    = create_alloc_pool ("live ranges",
-			 sizeof (struct live_range), 100);
-  allocno_pool
-    = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
-  object_pool
-    = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
   allocno_vec.create (max_reg_num () * 2);
   ira_allocnos = NULL;
   ira_allocnos_num = 0;
@@ -466,7 +461,7 @@ static ira_object_t
 ira_create_object (ira_allocno_t a, int subword)
 {
   enum reg_class aclass = ALLOCNO_CLASS (a);
-  ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
+  ira_object_t obj = object_pool.allocate ();
 
   OBJECT_ALLOCNO (obj) = a;
   OBJECT_SUBWORD (obj) = subword;
@@ -501,7 +496,7 @@ ira_create_allocno (int regno, bool cap_p,
 {
   ira_allocno_t a;
 
-  a = (ira_allocno_t) pool_alloc (allocno_pool);
+  a = allocno_pool.allocate ();
   ALLOCNO_REGNO (a) = regno;
   ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
   if (! cap_p)
@@ -943,7 +938,7 @@ ira_create_live_range (ira_object_t obj, int start, int finish,
 {
   live_range_t p;
 
-  p = (live_range_t) pool_alloc (live_range_pool);
+  p = live_range_pool.allocate ();
   p->object = obj;
   p->start = start;
   p->finish = finish;
@@ -968,7 +963,7 @@ copy_live_range (live_range_t r)
 {
   live_range_t p;
 
-  p = (live_range_t) pool_alloc (live_range_pool);
+  p = live_range_pool.allocate ();
   *p = *r;
   return p;
 }
@@ -1089,7 +1084,7 @@ ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
 void
 ira_finish_live_range (live_range_t r)
 {
-  pool_free (live_range_pool, r);
+  live_range_pool.remove (r);
 }
 
 /* Free list of allocno live ranges starting with R.  */
@@ -1136,7 +1131,7 @@ ira_free_allocno_costs (ira_allocno_t a)
       ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
       if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
 	ira_free (OBJECT_CONFLICT_ARRAY (obj));
-      pool_free (object_pool, obj);
+      object_pool.remove (obj);
     }
 
   ira_allocnos[ALLOCNO_NUM (a)] = NULL;
@@ -1160,7 +1155,7 @@ static void
 finish_allocno (ira_allocno_t a)
 {
   ira_free_allocno_costs (a);
-  pool_free (allocno_pool, a);
+  allocno_pool.remove (a);
 }
 
 /* Free the memory allocated for all allocnos.  */
@@ -1175,15 +1170,15 @@ finish_allocnos (void)
   ira_free (ira_regno_allocno_map);
   ira_object_id_map_vec.release ();
   allocno_vec.release ();
-  delete allocno_pool;
-  delete object_pool;
-  delete live_range_pool;
+  allocno_pool.release ();
+  object_pool.release ();
+  live_range_pool.release ();
 }
 
 \f
 
 /* Pools for allocno preferences.  */
-static alloc_pool pref_pool;
+static pool_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
 
 /* Vec containing references to all created preferences.  It is a
    container of array ira_prefs.  */
@@ -1193,8 +1188,6 @@ static vec<ira_pref_t> pref_vec;
 static void
 initiate_prefs (void)
 {
-  pref_pool
-    = create_alloc_pool ("prefs", sizeof (struct ira_allocno_pref), 100);
   pref_vec.create (get_max_uid ());
   ira_prefs = NULL;
   ira_prefs_num = 0;
@@ -1218,7 +1211,7 @@ ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
 {
   ira_pref_t pref;
 
-  pref = (ira_pref_t) pool_alloc (pref_pool);
+  pref = pref_pool.allocate ();
   pref->num = ira_prefs_num;
   pref->allocno = a;
   pref->hard_regno = hard_regno;
@@ -1316,7 +1309,7 @@ static void
 finish_pref (ira_pref_t pref)
 {
   ira_prefs[pref->num] = NULL;
-  pool_free (pref_pool, pref);
+  pref_pool.remove (pref);
 }
 
 /* Remove PREF from the list of allocno prefs and free memory for
@@ -1366,13 +1359,13 @@ finish_prefs (void)
   FOR_EACH_PREF (pref, pi)
     finish_pref (pref);
   pref_vec.release ();
-  delete pref_pool;
+  pref_pool.release ();
 }
 
 \f
 
 /* Pools for copies.  */
-static alloc_pool copy_pool;
+static pool_allocator<ira_allocno_copy> copy_pool ("copies", 100);
 
 /* Vec containing references to all created copies.  It is a
    container of array ira_copies.  */
@@ -1382,8 +1375,6 @@ static vec<ira_copy_t> copy_vec;
 static void
 initiate_copies (void)
 {
-  copy_pool
-    = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
   copy_vec.create (get_max_uid ());
   ira_copies = NULL;
   ira_copies_num = 0;
@@ -1428,7 +1419,7 @@ ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
 {
   ira_copy_t cp;
 
-  cp = (ira_copy_t) pool_alloc (copy_pool);
+  cp = copy_pool.allocate ();
   cp->num = ira_copies_num;
   cp->first = first;
   cp->second = second;
@@ -1613,7 +1604,7 @@ ira_debug_allocno_copies (ira_allocno_t a)
 static void
 finish_copy (ira_copy_t cp)
 {
-  pool_free (copy_pool, cp);
+  copy_pool.remove (cp);
 }
 
 
@@ -1627,7 +1618,7 @@ finish_copies (void)
   FOR_EACH_COPY (cp, ci)
     finish_copy (cp);
   copy_vec.release ();
-  delete copy_pool;
+  copy_pool.release ();
 }
 
 \f
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 23/35] Change use to type-based pool allocator in tree-ssa-pre.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (12 preceding siblings ...)
  2015-05-27 14:17 ` [PATCH 21/35] Change use to type-based pool allocator in regcprop.c mliska
@ 2015-05-27 14:17 ` mliska
  2015-05-27 18:59   ` Jeff Law
  2015-05-27 14:17 ` [PATCH 28/35] Change use to type-based pool allocator in ipa-profile.c mliska
                   ` (20 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:17 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-pre.c (get_or_alloc_expr_for_name): Use new type-based pool allocator.
	(bitmap_set_new): Likewise.
	(get_or_alloc_expr_for_constant): Likewise.
	(get_or_alloc_expr_for): Likewise.
	(phi_translate_1): Likewise.
	(compute_avail): Likewise.
	(init_pre): Likewise.
	(fini_pre): Likewise.
---
 gcc/tree-ssa-pre.c | 32 ++++++++++++++------------------
 1 file changed, 14 insertions(+), 18 deletions(-)

diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index d857d84..082dbaf 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -380,7 +380,7 @@ clear_expression_ids (void)
   expressions.release ();
 }
 
-static alloc_pool pre_expr_pool;
+static pool_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
 
 /* Given an SSA_NAME NAME, get or create a pre_expr to represent it.  */
 
@@ -398,7 +398,7 @@ get_or_alloc_expr_for_name (tree name)
   if (result_id != 0)
     return expression_for_id (result_id);
 
-  result = (pre_expr) pool_alloc (pre_expr_pool);
+  result = pre_expr_pool.allocate ();
   result->kind = NAME;
   PRE_EXPR_NAME (result) = name;
   alloc_expression_id (result);
@@ -519,7 +519,7 @@ static unsigned int get_expr_value_id (pre_expr);
 /* We can add and remove elements and entries to and from sets
    and hash tables, so we use alloc pools for them.  */
 
-static alloc_pool bitmap_set_pool;
+static pool_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
 static bitmap_obstack grand_bitmap_obstack;
 
 /* Set of blocks with statements that have had their EH properties changed.  */
@@ -635,7 +635,7 @@ add_to_value (unsigned int v, pre_expr e)
 static bitmap_set_t
 bitmap_set_new (void)
 {
-  bitmap_set_t ret = (bitmap_set_t) pool_alloc (bitmap_set_pool);
+  bitmap_set_t ret = bitmap_set_pool.allocate ();
   bitmap_initialize (&ret->expressions, &grand_bitmap_obstack);
   bitmap_initialize (&ret->values, &grand_bitmap_obstack);
   return ret;
@@ -1125,7 +1125,7 @@ get_or_alloc_expr_for_constant (tree constant)
   if (result_id != 0)
     return expression_for_id (result_id);
 
-  newexpr = (pre_expr) pool_alloc (pre_expr_pool);
+  newexpr = pre_expr_pool.allocate ();
   newexpr->kind = CONSTANT;
   PRE_EXPR_CONSTANT (newexpr) = constant;
   alloc_expression_id (newexpr);
@@ -1176,13 +1176,13 @@ get_or_alloc_expr_for (tree t)
       vn_nary_op_lookup (t, &result);
       if (result != NULL)
 	{
-	  pre_expr e = (pre_expr) pool_alloc (pre_expr_pool);
+	  pre_expr e = pre_expr_pool.allocate ();
 	  e->kind = NARY;
 	  PRE_EXPR_NARY (e) = result;
 	  result_id = lookup_expression_id (e);
 	  if (result_id != 0)
 	    {
-	      pool_free (pre_expr_pool, e);
+	      pre_expr_pool.remove (e);
 	      e = expression_for_id (result_id);
 	      return e;
 	    }
@@ -1526,7 +1526,7 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
 	    if (result && is_gimple_min_invariant (result))
 	      return get_or_alloc_expr_for_constant (result);
 
-	    expr = (pre_expr) pool_alloc (pre_expr_pool);
+	    expr = pre_expr_pool.allocate ();
 	    expr->kind = NARY;
 	    expr->id = 0;
 	    if (nary)
@@ -1688,7 +1688,7 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
 		return NULL;
 	      }
 
-	    expr = (pre_expr) pool_alloc (pre_expr_pool);
+	    expr = pre_expr_pool.allocate ();
 	    expr->kind = REFERENCE;
 	    expr->id = 0;
 
@@ -3795,7 +3795,7 @@ compute_avail (void)
 		    || gimple_bb (SSA_NAME_DEF_STMT
 				    (gimple_vuse (stmt))) != block)
 		  {
-		    result = (pre_expr) pool_alloc (pre_expr_pool);
+		    result = pre_expr_pool.allocate ();
 		    result->kind = REFERENCE;
 		    result->id = 0;
 		    PRE_EXPR_REFERENCE (result) = ref;
@@ -3835,7 +3835,7 @@ compute_avail (void)
 			  && vn_nary_may_trap (nary))
 			continue;
 
-		      result = (pre_expr) pool_alloc (pre_expr_pool);
+		      result = pre_expr_pool.allocate ();
 		      result->kind = NARY;
 		      result->id = 0;
 		      PRE_EXPR_NARY (result) = nary;
@@ -3876,7 +3876,7 @@ compute_avail (void)
 			    continue;
 			}
 
-		      result = (pre_expr) pool_alloc (pre_expr_pool);
+		      result = pre_expr_pool.allocate ();
 		      result->kind = REFERENCE;
 		      result->id = 0;
 		      PRE_EXPR_REFERENCE (result) = ref;
@@ -4779,10 +4779,6 @@ init_pre (void)
   bitmap_obstack_initialize (&grand_bitmap_obstack);
   phi_translate_table = new hash_table<expr_pred_trans_d> (5110);
   expression_to_id = new hash_table<pre_expr_d> (num_ssa_names * 3);
-  bitmap_set_pool = create_alloc_pool ("Bitmap sets",
-				       sizeof (struct bitmap_set), 30);
-  pre_expr_pool = create_alloc_pool ("pre_expr nodes",
-				     sizeof (struct pre_expr_d), 30);
   FOR_ALL_BB_FN (bb, cfun)
     {
       EXP_GEN (bb) = bitmap_set_new ();
@@ -4802,8 +4798,8 @@ fini_pre ()
   value_expressions.release ();
   BITMAP_FREE (inserted_exprs);
   bitmap_obstack_release (&grand_bitmap_obstack);
-  free_alloc_pool (bitmap_set_pool);
-  free_alloc_pool (pre_expr_pool);
+  bitmap_set_pool.release ();
+  pre_expr_pool.release ();
   delete phi_translate_table;
   phi_translate_table = NULL;
   delete expression_to_id;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 28/35] Change use to type-based pool allocator in ipa-profile.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (13 preceding siblings ...)
  2015-05-27 14:17 ` [PATCH 23/35] Change use to type-based pool allocator in tree-ssa-pre.c mliska
@ 2015-05-27 14:17 ` mliska
  2015-05-27 18:18   ` Jeff Law
  2015-05-27 14:18 ` [PATCH 27/35] Change use to type-based pool allocator in tree-ssa-structalias.c mliska
                   ` (19 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:17 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ipa-profile.c (account_time_size): Use new type-based pool allocator.
	(ipa_profile_generate_summary): Likewise.
	(ipa_profile_read_summary): Likewise.
	(ipa_profile): Likewise.
---
 gcc/ipa-profile.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c
index e0d4266..7c967f9 100644
--- a/gcc/ipa-profile.c
+++ b/gcc/ipa-profile.c
@@ -107,7 +107,8 @@ struct histogram_entry
    duplicate entries.  */
 
 vec<histogram_entry *> histogram;
-static alloc_pool histogram_pool;
+static pool_allocator<histogram_entry> histogram_pool
+  ("IPA histogram", 10);
 
 /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR.  */
 
@@ -144,7 +145,7 @@ account_time_size (hash_table<histogram_hash> *hashtable,
 
   if (!*val)
     {
-      *val = (histogram_entry *) pool_alloc (histogram_pool);
+      *val = histogram_pool.allocate ();
       **val = key;
       histogram.safe_push (*val);
     }
@@ -205,8 +206,6 @@ ipa_profile_generate_summary (void)
   basic_block bb;
 
   hash_table<histogram_hash> hashtable (10);
-  histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
-				      10);
   
   FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
     FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
@@ -287,8 +286,6 @@ ipa_profile_read_summary (void)
   int j = 0;
 
   hash_table<histogram_hash> hashtable (10);
-  histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
-				      10);
 
   while ((file_data = file_data_vec[j++]))
     {
@@ -593,7 +590,7 @@ ipa_profile (void)
 	}
     }
   histogram.release ();
-  free_alloc_pool (histogram_pool);
+  histogram_pool.release ();
 
   /* Produce speculative calls: we saved common traget from porfiling into
      e->common_target_id.  Now, at link time, we can look up corresponding
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 21/35] Change use to type-based pool allocator in regcprop.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (11 preceding siblings ...)
  2015-05-27 14:17 ` [PATCH 34/35] " mliska
@ 2015-05-27 14:17 ` mliska
  2015-05-27 18:14   ` Jeff Law
  2015-05-27 14:17 ` [PATCH 23/35] Change use to type-based pool allocator in tree-ssa-pre.c mliska
                   ` (21 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:17 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* regcprop.c (free_debug_insn_changes): Use new type-based pool allocator.
	(replace_oldest_value_reg): Likewise.
	(pass_cprop_hardreg::execute): Likewise.
---
 gcc/regcprop.c | 31 +++++++++++++++++++++----------
 1 file changed, 21 insertions(+), 10 deletions(-)

diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index 7d7a9a09..0755d83 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -62,6 +62,21 @@ struct queued_debug_insn_change
   rtx_insn *insn;
   rtx *loc;
   rtx new_rtx;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((queued_debug_insn_change *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<queued_debug_insn_change> pool;
 };
 
 /* For each register, we have a list of registers that contain the same
@@ -85,7 +100,9 @@ struct value_data
   unsigned int n_debug_insn_changes;
 };
 
-static alloc_pool debug_insn_changes_pool;
+pool_allocator<queued_debug_insn_change> queued_debug_insn_change::pool 
+  ("debug insn changes pool", 256);
+
 static bool skip_debug_insn_p;
 
 static void kill_value_one_regno (unsigned, struct value_data *);
@@ -124,7 +141,7 @@ free_debug_insn_changes (struct value_data *vd, unsigned int regno)
     {
       next = cur->next;
       --vd->n_debug_insn_changes;
-      pool_free (debug_insn_changes_pool, cur);
+      delete cur;
     }
   vd->e[regno].debug_insn_changes = NULL;
 }
@@ -495,8 +512,7 @@ replace_oldest_value_reg (rtx *loc, enum reg_class cl, rtx_insn *insn,
 	    fprintf (dump_file, "debug_insn %u: queued replacing reg %u with %u\n",
 		     INSN_UID (insn), REGNO (*loc), REGNO (new_rtx));
 
-	  change = (struct queued_debug_insn_change *)
-		   pool_alloc (debug_insn_changes_pool);
+	  change = new queued_debug_insn_change; 
 	  change->next = vd->e[REGNO (new_rtx)].debug_insn_changes;
 	  change->insn = insn;
 	  change->loc = loc;
@@ -1244,11 +1260,6 @@ pass_cprop_hardreg::execute (function *fun)
   visited = sbitmap_alloc (last_basic_block_for_fn (fun));
   bitmap_clear (visited);
 
-  if (MAY_HAVE_DEBUG_INSNS)
-    debug_insn_changes_pool
-      = create_alloc_pool ("debug insn changes pool",
-			   sizeof (struct queued_debug_insn_change), 256);
-
   FOR_EACH_BB_FN (bb, fun)
     {
       bitmap_set_bit (visited, bb->index);
@@ -1308,7 +1319,7 @@ pass_cprop_hardreg::execute (function *fun)
 		}
 	  }
 
-      free_alloc_pool (debug_insn_changes_pool);
+      queued_debug_insn_change::pool.release ();
     }
 
   sbitmap_free (visited);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 35/35] Remove old pool allocator.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (8 preceding siblings ...)
  2015-05-27 14:15 ` [PATCH 05/35] Change use to type-based pool allocator in ira-color.c mliska
@ 2015-05-27 14:17 ` mliska
  2015-05-27 19:40   ` Jeff Law
  2015-05-27 14:17 ` [PATCH 32/35] Change use to type-based pool allocator in ira-build.c mliska
                   ` (24 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:17 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* alloc-pool.c (create_alloc_pool): Remove.
	(empty_alloc_pool): Likewise.
	(free_alloc_pool): Likewise.
	(free_alloc_pool_if_empty): Likewise.
	(pool_alloc): Likewise.
	(pool_free): Likewise.
	* alloc-pool.h: Remove old declarations.
---
 gcc/alloc-pool.c | 274 -------------------------------------------------------
 gcc/alloc-pool.h |  56 ------------
 2 files changed, 330 deletions(-)

diff --git a/gcc/alloc-pool.c b/gcc/alloc-pool.c
index 0bea7a6..78bc305 100644
--- a/gcc/alloc-pool.c
+++ b/gcc/alloc-pool.c
@@ -27,39 +27,6 @@ along with GCC; see the file COPYING3.  If not see
 
 ALLOC_POOL_ID_TYPE last_id;
 
-#define align_eight(x) (((x+7) >> 3) << 3)
-
-/* The internal allocation object.  */
-typedef struct allocation_object_def
-{
-#ifdef ENABLE_CHECKING
-  /* The ID of alloc pool which the object was allocated from.  */
-  ALLOC_POOL_ID_TYPE id;
-#endif
-
-  union
-    {
-      /* The data of the object.  */
-      char data[1];
-
-      /* Because we want any type of data to be well aligned after the ID,
-	 the following elements are here.  They are never accessed so
-	 the allocated object may be even smaller than this structure.
-	 We do not care about alignment for floating-point types.  */
-      char *align_p;
-      int64_t align_i;
-    } u;
-} allocation_object;
-
-/* Convert a pointer to allocation_object from a pointer to user data.  */
-#define ALLOCATION_OBJECT_PTR_FROM_USER_PTR(X)				\
-   ((allocation_object *) (((char *) (X))				\
-			   - offsetof (allocation_object, u.data)))
-
-/* Convert a pointer to user data from a pointer to allocation_object.  */
-#define USER_PTR_FROM_ALLOCATION_OBJECT_PTR(X)				\
-   ((void *) (((allocation_object *) (X))->u.data))
-
 /* Hashtable mapping alloc_pool names to descriptors.  */
 hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
 
@@ -72,247 +39,6 @@ allocate_pool_descriptor (const char *name)
   return &alloc_pool_hash->get_or_insert (name);
 }
 
-
-/* Create a pool of things of size SIZE, with NUM in each block we
-   allocate.  */
-
-alloc_pool
-create_alloc_pool (const char *name, size_t size, size_t num)
-{
-  alloc_pool pool;
-  size_t header_size;
-
-  gcc_checking_assert (name);
-
-  /* Make size large enough to store the list header.  */
-  if (size < sizeof (alloc_pool_list))
-    size = sizeof (alloc_pool_list);
-
-  /* Now align the size to a multiple of 4.  */
-  size = align_eight (size);
-
-#ifdef ENABLE_CHECKING
-  /* Add the aligned size of ID.  */
-  size += offsetof (allocation_object, u.data);
-#endif
-
-  /* Um, we can't really allocate 0 elements per block.  */
-  gcc_checking_assert (num);
-
-  /* Allocate memory for the pool structure.  */
-  pool = XNEW (struct alloc_pool_def);
-
-  /* Now init the various pieces of our pool structure.  */
-  pool->name = /*xstrdup (name)*/name;
-  pool->elt_size = size;
-  pool->elts_per_block = num;
-
-  if (GATHER_STATISTICS)
-    {
-      struct alloc_pool_descriptor *desc = allocate_pool_descriptor (name);
-      desc->elt_size = size;
-      desc->created++;
-    }
-
-  /* List header size should be a multiple of 8.  */
-  header_size = align_eight (sizeof (struct alloc_pool_list_def));
-
-  pool->block_size = (size * num) + header_size;
-  pool->returned_free_list = NULL;
-  pool->virgin_free_list = NULL;
-  pool->virgin_elts_remaining = 0;
-  pool->elts_allocated = 0;
-  pool->elts_free = 0;
-  pool->blocks_allocated = 0;
-  pool->block_list = NULL;
-
-#ifdef ENABLE_CHECKING
-  /* Increase the last used ID and use it for this pool.
-     ID == 0 is used for free elements of pool so skip it.  */
-  last_id++;
-  if (last_id == 0)
-    last_id++;
-
-  pool->id = last_id;
-#endif
-
-  return (pool);
-}
-
-/* Free all memory allocated for the given memory pool.  */
-void
-empty_alloc_pool (alloc_pool pool)
-{
-  alloc_pool_list block, next_block;
-
-  gcc_checking_assert (pool);
-
-  /* Free each block allocated to the pool.  */
-  for (block = pool->block_list; block != NULL; block = next_block)
-    {
-      next_block = block->next;
-      free (block);
-    }
-
-  if (GATHER_STATISTICS)
-    {
-      struct alloc_pool_descriptor *desc = allocate_pool_descriptor (pool->name);
-      desc->current -= (pool->elts_allocated - pool->elts_free) * pool->elt_size;
-    }
-
-  pool->returned_free_list = NULL;
-  pool->virgin_free_list = NULL;
-  pool->virgin_elts_remaining = 0;
-  pool->elts_allocated = 0;
-  pool->elts_free = 0;
-  pool->blocks_allocated = 0;
-  pool->block_list = NULL;
-}
-
-/* Free all memory allocated for the given memory pool and the pool itself.  */
-void
-free_alloc_pool (alloc_pool pool)
-{
-  /* First empty the pool.  */
-  empty_alloc_pool (pool);
-#ifdef ENABLE_CHECKING
-  memset (pool, 0xaf, sizeof (*pool));
-#endif
-  /* Lastly, free the pool.  */
-  free (pool);
-}
-
-/* Frees the alloc_pool, if it is empty and zero *POOL in this case.  */
-void
-free_alloc_pool_if_empty (alloc_pool *pool)
-{
-  if ((*pool)->elts_free == (*pool)->elts_allocated)
-    {
-      free_alloc_pool (*pool);
-      *pool = NULL;
-    }
-}
-
-/* Allocates one element from the pool specified.  */
-void *
-pool_alloc (alloc_pool pool)
-{
-  alloc_pool_list header;
-#ifdef ENABLE_VALGRIND_ANNOTATIONS
-  int size;
-#endif
-
-  if (GATHER_STATISTICS)
-    {
-      struct alloc_pool_descriptor *desc = allocate_pool_descriptor (pool->name);
-
-      desc->allocated += pool->elt_size;
-      desc->current += pool->elt_size;
-      if (desc->peak < desc->current)
-	desc->peak = desc->current;
-    }
-
-  gcc_checking_assert (pool);
-#ifdef ENABLE_VALGRIND_ANNOTATIONS
-  size = pool->elt_size - offsetof (allocation_object, u.data);
-#endif
-
-  /* If there are no more free elements, make some more!.  */
-  if (!pool->returned_free_list)
-    {
-      char *block;
-      if (!pool->virgin_elts_remaining)
-	{
-	  alloc_pool_list block_header;
-
-	  /* Make the block.  */
-	  block = XNEWVEC (char, pool->block_size);
-	  block_header = (alloc_pool_list) block;
-	  block += align_eight (sizeof (struct alloc_pool_list_def));
-
-	  /* Throw it on the block list.  */
-	  block_header->next = pool->block_list;
-	  pool->block_list = block_header;
-
-	  /* Make the block available for allocation.  */
-	  pool->virgin_free_list = block;
-	  pool->virgin_elts_remaining = pool->elts_per_block;
-
-	  /* Also update the number of elements we have free/allocated, and
-	     increment the allocated block count.  */
-	  pool->elts_allocated += pool->elts_per_block;
-	  pool->elts_free += pool->elts_per_block;
-	  pool->blocks_allocated += 1;
-	}
-
-
-      /* We now know that we can take the first elt off the virgin list and
-	 put it on the returned list. */
-      block = pool->virgin_free_list;
-      header = (alloc_pool_list) USER_PTR_FROM_ALLOCATION_OBJECT_PTR (block);
-      header->next = NULL;
-#ifdef ENABLE_CHECKING
-      /* Mark the element to be free.  */
-      ((allocation_object *) block)->id = 0;
-#endif
-      VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
-      pool->returned_free_list = header;
-      pool->virgin_free_list += pool->elt_size;
-      pool->virgin_elts_remaining--;
-
-    }
-
-  /* Pull the first free element from the free list, and return it.  */
-  header = pool->returned_free_list;
-  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
-  pool->returned_free_list = header->next;
-  pool->elts_free--;
-
-#ifdef ENABLE_CHECKING
-  /* Set the ID for element.  */
-  ALLOCATION_OBJECT_PTR_FROM_USER_PTR (header)->id = pool->id;
-#endif
-  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
-
-  return ((void *) header);
-}
-
-/* Puts PTR back on POOL's free list.  */
-void
-pool_free (alloc_pool pool, void *ptr)
-{
-  alloc_pool_list header;
-#if defined(ENABLE_VALGRIND_ANNOTATIONS) || defined(ENABLE_CHECKING)
-  int size;
-  size = pool->elt_size - offsetof (allocation_object, u.data);
-#endif
-
-#ifdef ENABLE_CHECKING
-  gcc_assert (ptr
-	      /* Check if we free more than we allocated, which is Bad (TM).  */
-	      && pool->elts_free < pool->elts_allocated
-	      /* Check whether the PTR was allocated from POOL.  */
-	      && pool->id == ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id);
-
-  memset (ptr, 0xaf, size);
-
-  /* Mark the element to be free.  */
-  ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id = 0;
-#endif
-
-  header = (alloc_pool_list) ptr;
-  header->next = pool->returned_free_list;
-  pool->returned_free_list = header;
-  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr, size));
-  pool->elts_free++;
-
-  if (GATHER_STATISTICS)
-    {
-      struct alloc_pool_descriptor *desc = allocate_pool_descriptor (pool->name);
-      desc->current -= pool->elt_size;
-    }
-}
-
 /* Output per-alloc_pool statistics.  */
 
 /* Used to accumulate statistics about alloc_pool sizes.  */
diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index 8fd664f..a934303 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -22,48 +22,6 @@ along with GCC; see the file COPYING3.  If not see
 
 #include "hash-map.h"
 
-typedef unsigned long ALLOC_POOL_ID_TYPE;
-
-typedef struct alloc_pool_list_def
-{
-  struct alloc_pool_list_def *next;
-}
- *alloc_pool_list;
-
-typedef struct alloc_pool_def
-{
-  const char *name;
-#ifdef ENABLE_CHECKING
-  ALLOC_POOL_ID_TYPE id;
-#endif
-  size_t elts_per_block;
-
-  /* These are the elements that have been allocated at least once and freed.  */
-  alloc_pool_list returned_free_list;
-
-  /* These are the elements that have not yet been allocated out of
-     the last block obtained from XNEWVEC.  */
-  char* virgin_free_list;
-
-  /* The number of elements in the virgin_free_list that can be
-     allocated before needing another block.  */
-  size_t virgin_elts_remaining;
-
-  size_t elts_allocated;
-  size_t elts_free;
-  size_t blocks_allocated;
-  alloc_pool_list block_list;
-  size_t block_size;
-  size_t elt_size;
-}
- *alloc_pool;
-
-extern alloc_pool create_alloc_pool (const char *, size_t, size_t);
-extern void free_alloc_pool (alloc_pool);
-extern void empty_alloc_pool (alloc_pool);
-extern void free_alloc_pool_if_empty (alloc_pool *);
-extern void *pool_alloc (alloc_pool) ATTRIBUTE_MALLOC;
-extern void pool_free (alloc_pool, void *);
 extern void dump_alloc_pool_statistics (void);
 
 typedef unsigned long ALLOC_POOL_ID_TYPE;
@@ -73,26 +31,12 @@ template <typename T>
 class pool_allocator
 {
 public:
-  /* Default constructor for pool allocator called NAME. Each block
-     has NUM elements. The allocator support EXTRA_SIZE and can
-     potentially IGNORE_TYPE_SIZE.  */
   pool_allocator (const char *name, size_t num, size_t extra_size = 0,
 		  bool ignore_type_size = false);
-
-  /* Default destuctor.  */
   ~pool_allocator ();
-
-  /* Release internal data structures.  */
   void release ();
-
-  /* Release internal data structures if the pool has not allocated
-     an object.  */
   void release_if_empty ();
-
-  /* Allocate a new object.  */
   T *allocate () ATTRIBUTE_MALLOC;
-
-  /* Release OBJECT that must come from the pool.  */
   void remove (T *object);
 
 private:
-- 
2.1.4

^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 27/35] Change use to type-based pool allocator in tree-ssa-structalias.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (14 preceding siblings ...)
  2015-05-27 14:17 ` [PATCH 28/35] Change use to type-based pool allocator in ipa-profile.c mliska
@ 2015-05-27 14:18 ` mliska
  2015-05-27 18:20   ` Jeff Law
  2015-05-27 14:19 ` [PATCH 08/35] Change use to type-based pool allocator in asan.c mliska
                   ` (18 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:18 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-structalias.c (new_var_info): Use new type-based pool allocator.
	(new_constraint): Likewise.
	(init_alias_vars): Likewise.
	(delete_points_to_sets): Likewise.
---
 gcc/tree-ssa-structalias.c | 17 +++++++----------
 1 file changed, 7 insertions(+), 10 deletions(-)

diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index d6a9f67..e802d78 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -354,7 +354,8 @@ static varinfo_t lookup_vi_for_tree (tree);
 static inline bool type_can_have_subvars (const_tree);
 
 /* Pool of variable info structures.  */
-static alloc_pool variable_info_pool;
+static pool_allocator<variable_info> variable_info_pool
+  ("Variable info pool", 30);
 
 /* Map varinfo to final pt_solution.  */
 static hash_map<varinfo_t, pt_solution *> *final_solutions;
@@ -395,7 +396,7 @@ static varinfo_t
 new_var_info (tree t, const char *name)
 {
   unsigned index = varmap.length ();
-  varinfo_t ret = (varinfo_t) pool_alloc (variable_info_pool);
+  varinfo_t ret = variable_info_pool.allocate ();
 
   ret->id = index;
   ret->name = name;
@@ -554,7 +555,7 @@ struct constraint
 /* List of constraints that we use to build the constraint graph from.  */
 
 static vec<constraint_t> constraints;
-static alloc_pool constraint_pool;
+static pool_allocator<constraint> constraint_pool ("Constraint pool", 30);
 
 /* The constraint graph is represented as an array of bitmaps
    containing successor nodes.  */
@@ -676,7 +677,7 @@ static constraint_t
 new_constraint (const struct constraint_expr lhs,
 		const struct constraint_expr rhs)
 {
-  constraint_t ret = (constraint_t) pool_alloc (constraint_pool);
+  constraint_t ret = constraint_pool.allocate ();
   ret->lhs = lhs;
   ret->rhs = rhs;
   return ret;
@@ -6681,10 +6682,6 @@ init_alias_vars (void)
   bitmap_obstack_initialize (&oldpta_obstack);
   bitmap_obstack_initialize (&predbitmap_obstack);
 
-  constraint_pool = create_alloc_pool ("Constraint pool",
-				       sizeof (struct constraint), 30);
-  variable_info_pool = create_alloc_pool ("Variable info pool",
-					  sizeof (struct variable_info), 30);
   constraints.create (8);
   varmap.create (8);
   vi_for_tree = new hash_map<tree, varinfo_t>;
@@ -6964,8 +6961,8 @@ delete_points_to_sets (void)
   free (graph);
 
   varmap.release ();
-  free_alloc_pool (variable_info_pool);
-  free_alloc_pool (constraint_pool);
+  variable_info_pool.release ();
+  constraint_pool.release ();
 
   obstack_free (&fake_var_decl_obstack, NULL);
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 08/35] Change use to type-based pool allocator in asan.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (15 preceding siblings ...)
  2015-05-27 14:18 ` [PATCH 27/35] Change use to type-based pool allocator in tree-ssa-structalias.c mliska
@ 2015-05-27 14:19 ` mliska
  2015-05-27 18:01   ` Jeff Law
  2015-05-27 14:19 ` [PATCH 25/35] Change use to type-based pool allocator in tree-ssa-sccvn.c mliska
                   ` (17 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:19 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* asan.c (asan_mem_ref_get_alloc_pool):Use new type-based pool allocator.
	(asan_mem_ref_new) Likewise.
	(free_mem_ref_resources) Likewise.
---
 gcc/asan.c | 44 ++++++++++++++++++++------------------------
 1 file changed, 20 insertions(+), 24 deletions(-)

diff --git a/gcc/asan.c b/gcc/asan.c
index 479301a..19c8d95 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -176,7 +176,7 @@ along with GCC; see the file COPYING3.  If not see
 
 	where '(...){n}' means the content inside the parenthesis occurs 'n'
 	times, with 'n' being the number of variables on the stack.
-     
+
      3/ The following 8 bytes contain the PC of the current function which
      will be used by the run-time library to print an error message.
 
@@ -281,7 +281,7 @@ bool
 set_asan_shadow_offset (const char *val)
 {
   char *endp;
-  
+
   errno = 0;
 #ifdef HAVE_LONG_LONG
   asan_shadow_offset_value = strtoull (val, &endp, 0);
@@ -372,23 +372,24 @@ struct asan_mem_ref
 
   /* The size of the access.  */
   HOST_WIDE_INT access_size;
-};
 
-static alloc_pool asan_mem_ref_alloc_pool;
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
 
-/* This creates the alloc pool used to store the instances of
-   asan_mem_ref that are stored in the hash table asan_mem_ref_ht.  */
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((asan_mem_ref *) ptr);
+  }
 
-static alloc_pool
-asan_mem_ref_get_alloc_pool ()
-{
-  if (asan_mem_ref_alloc_pool == NULL)
-    asan_mem_ref_alloc_pool = create_alloc_pool ("asan_mem_ref",
-						 sizeof (asan_mem_ref),
-						 10);
-  return asan_mem_ref_alloc_pool;
-    
-}
+  /* Memory allocation pool.  */
+  static pool_allocator<asan_mem_ref> pool;
+};
+
+pool_allocator<asan_mem_ref> asan_mem_ref::pool ("asan_mem_ref", 10);
 
 /* Initializes an instance of asan_mem_ref.  */
 
@@ -408,8 +409,7 @@ asan_mem_ref_init (asan_mem_ref *ref, tree start, HOST_WIDE_INT access_size)
 static asan_mem_ref*
 asan_mem_ref_new (tree start, HOST_WIDE_INT access_size)
 {
-  asan_mem_ref *ref =
-    (asan_mem_ref *) pool_alloc (asan_mem_ref_get_alloc_pool ());
+  asan_mem_ref *ref = new asan_mem_ref;
 
   asan_mem_ref_init (ref, start, access_size);
   return ref;
@@ -501,11 +501,7 @@ free_mem_ref_resources ()
   delete asan_mem_ref_ht;
   asan_mem_ref_ht = NULL;
 
-  if (asan_mem_ref_alloc_pool)
-    {
-      free_alloc_pool (asan_mem_ref_alloc_pool);
-      asan_mem_ref_alloc_pool = NULL;
-    }
+  asan_mem_ref::pool.release ();
 }
 
 /* Return true iff the memory reference REF has been instrumented.  */
@@ -2035,7 +2031,7 @@ maybe_instrument_assignment (gimple_stmt_iterator *iter)
 			 is_store);
       is_instrumented = true;
     }
- 
+
   if (gimple_assign_load_p (s))
     {
       ref_expr = gimple_assign_rhs1 (s);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 25/35] Change use to type-based pool allocator in tree-ssa-sccvn.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (16 preceding siblings ...)
  2015-05-27 14:19 ` [PATCH 08/35] Change use to type-based pool allocator in asan.c mliska
@ 2015-05-27 14:19 ` mliska
  2015-05-27 18:16   ` Jeff Law
  2015-05-27 14:19 ` [PATCH 11/35] Change use to type-based pool allocator in sh.c mliska
                   ` (16 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:19 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-sccvn.c (vn_reference_insert): Use new type-based pool allocator.
	(vn_reference_insert_pieces): Likewise.
	(vn_phi_insert): Likewise.
	(visit_reference_op_call): Likewise.
	(copy_phi): Likewise.
	(copy_reference): Likewise.
	(process_scc): Likewise.
	(allocate_vn_table): Likewise.
	(free_vn_table): Likewise.
---
 gcc/tree-ssa-sccvn.c | 33 +++++++++++++++------------------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 03be480..98b0cc5 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -289,8 +289,8 @@ typedef struct vn_tables_s
   vn_phi_table_type *phis;
   vn_reference_table_type *references;
   struct obstack nary_obstack;
-  alloc_pool phis_pool;
-  alloc_pool references_pool;
+  pool_allocator<vn_phi_s> *phis_pool;
+  pool_allocator<vn_reference_s> *references_pool;
 } *vn_tables_t;
 
 
@@ -2285,7 +2285,7 @@ vn_reference_insert (tree op, tree result, tree vuse, tree vdef)
   vn_reference_t vr1;
   bool tem;
 
-  vr1 = (vn_reference_t) pool_alloc (current_info->references_pool);
+  vr1 = current_info->references_pool->allocate ();
   if (TREE_CODE (result) == SSA_NAME)
     vr1->value_id = VN_INFO (result)->value_id;
   else
@@ -2330,7 +2330,7 @@ vn_reference_insert_pieces (tree vuse, alias_set_type set, tree type,
   vn_reference_s **slot;
   vn_reference_t vr1;
 
-  vr1 = (vn_reference_t) pool_alloc (current_info->references_pool);
+  vr1 = current_info->references_pool->allocate ();
   vr1->value_id = value_id;
   vr1->vuse = vuse ? SSA_VAL (vuse) : NULL_TREE;
   vr1->operands = valueize_refs (operands);
@@ -2756,7 +2756,7 @@ static vn_phi_t
 vn_phi_insert (gimple phi, tree result)
 {
   vn_phi_s **slot;
-  vn_phi_t vp1 = (vn_phi_t) pool_alloc (current_info->phis_pool);
+  vn_phi_t vp1 = current_info->phis_pool->allocate ();
   unsigned i;
   vec<tree> args = vNULL;
 
@@ -2999,7 +2999,7 @@ visit_reference_op_call (tree lhs, gcall *stmt)
 	changed |= set_ssa_val_to (vdef, vdef);
       if (lhs)
 	changed |= set_ssa_val_to (lhs, lhs);
-      vr2 = (vn_reference_t) pool_alloc (current_info->references_pool);
+      vr2 = current_info->references_pool->allocate ();
       vr2->vuse = vr1.vuse;
       /* As we are not walking the virtual operand chain we know the
 	 shared_lookup_references are still original so we can re-use
@@ -3873,7 +3873,7 @@ copy_nary (vn_nary_op_t onary, vn_tables_t info)
 static void
 copy_phi (vn_phi_t ophi, vn_tables_t info)
 {
-  vn_phi_t phi = (vn_phi_t) pool_alloc (info->phis_pool);
+  vn_phi_t phi = info->phis_pool->allocate ();
   vn_phi_s **slot;
   memcpy (phi, ophi, sizeof (*phi));
   ophi->phiargs.create (0);
@@ -3889,7 +3889,7 @@ copy_reference (vn_reference_t oref, vn_tables_t info)
 {
   vn_reference_t ref;
   vn_reference_s **slot;
-  ref = (vn_reference_t) pool_alloc (info->references_pool);
+  ref = info->references_pool->allocate ();
   memcpy (ref, oref, sizeof (*ref));
   oref->operands.create (0);
   slot = info->references->find_slot_with_hash (ref, ref->hashcode, INSERT);
@@ -3954,8 +3954,8 @@ process_scc (vec<tree> scc)
       optimistic_info->references->empty ();
       obstack_free (&optimistic_info->nary_obstack, NULL);
       gcc_obstack_init (&optimistic_info->nary_obstack);
-      empty_alloc_pool (optimistic_info->phis_pool);
-      empty_alloc_pool (optimistic_info->references_pool);
+      optimistic_info->phis_pool->release ();
+      optimistic_info->references_pool->release ();
       FOR_EACH_VEC_ELT (scc, i, var)
 	VN_INFO (var)->expr = NULL_TREE;
       FOR_EACH_VEC_ELT (scc, i, var)
@@ -4132,12 +4132,9 @@ allocate_vn_table (vn_tables_t table)
   table->references = new vn_reference_table_type (23);
 
   gcc_obstack_init (&table->nary_obstack);
-  table->phis_pool = create_alloc_pool ("VN phis",
-					sizeof (struct vn_phi_s),
-					30);
-  table->references_pool = create_alloc_pool ("VN references",
-					      sizeof (struct vn_reference_s),
-					      30);
+  table->phis_pool = new pool_allocator<vn_phi_s> ("VN phis", 30);
+  table->references_pool = new pool_allocator<vn_reference_s> ("VN references",
+							       30);
 }
 
 /* Free a value number table.  */
@@ -4152,8 +4149,8 @@ free_vn_table (vn_tables_t table)
   delete table->references;
   table->references = NULL;
   obstack_free (&table->nary_obstack, NULL);
-  free_alloc_pool (table->phis_pool);
-  free_alloc_pool (table->references_pool);
+  delete table->phis_pool;
+  delete table->references_pool;
 }
 
 static void
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 14/35] Change use to type-based pool allocator in df-scan.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (18 preceding siblings ...)
  2015-05-27 14:19 ` [PATCH 11/35] Change use to type-based pool allocator in sh.c mliska
@ 2015-05-27 14:19 ` mliska
  2015-05-29 13:38   ` Martin Liška
  2015-05-27 14:20 ` [PATCH 31/35] Change use to type-based pool allocator in ipa-prop.c and ipa-cp.c mliska
                   ` (14 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:19 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* df-scan.c (struct df_scan_problem_data):Use new type-based pool allocator.
	(df_scan_free_internal) Likewise.
	(df_scan_alloc) Likewise.
	(df_grow_reg_info) Likewise.
	(df_free_ref) Likewise.
	(df_insn_create_insn_record) Likewise.
	(df_mw_hardreg_chain_delete) Likewise.
	(df_insn_info_delete) Likewise.
	(df_free_collection_rec) Likewise.
	(df_mw_hardreg_chain_delete_eq_uses) Likewise.
	(df_sort_and_compress_mws) Likewise.
	(df_ref_create_structure) Likewise.
	(df_ref_record) Likewise.
---
 gcc/df-scan.c | 94 +++++++++++++++++++++++++++++------------------------------
 1 file changed, 46 insertions(+), 48 deletions(-)

diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index e32eaf5..4646bcf 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -159,15 +159,18 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
    it gets run.  It also has no need for the iterative solver.
 ----------------------------------------------------------------------------*/
 
+#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
+
 /* Problem data for the scanning dataflow function.  */
 struct df_scan_problem_data
 {
-  alloc_pool ref_base_pool;
-  alloc_pool ref_artificial_pool;
-  alloc_pool ref_regular_pool;
-  alloc_pool insn_pool;
-  alloc_pool reg_pool;
-  alloc_pool mw_reg_pool;
+  pool_allocator<df_base_ref> *ref_base_pool;
+  pool_allocator<df_artificial_ref> *ref_artificial_pool;
+  pool_allocator<df_regular_ref> *ref_regular_pool;
+  pool_allocator<df_insn_info> *insn_pool;
+  pool_allocator<df_reg_info> *reg_pool;
+  pool_allocator<df_mw_hardreg> *mw_reg_pool;
+
   bitmap_obstack reg_bitmaps;
   bitmap_obstack insn_bitmaps;
 };
@@ -218,12 +221,12 @@ df_scan_free_internal (void)
   bitmap_clear (&df->insns_to_rescan);
   bitmap_clear (&df->insns_to_notes_rescan);
 
-  free_alloc_pool (problem_data->ref_base_pool);
-  free_alloc_pool (problem_data->ref_artificial_pool);
-  free_alloc_pool (problem_data->ref_regular_pool);
-  free_alloc_pool (problem_data->insn_pool);
-  free_alloc_pool (problem_data->reg_pool);
-  free_alloc_pool (problem_data->mw_reg_pool);
+  delete problem_data->ref_base_pool;
+  delete problem_data->ref_artificial_pool;
+  delete problem_data->ref_regular_pool;
+  delete problem_data->insn_pool;
+  delete problem_data->reg_pool;
+  delete problem_data->mw_reg_pool;
   bitmap_obstack_release (&problem_data->reg_bitmaps);
   bitmap_obstack_release (&problem_data->insn_bitmaps);
   free (df_scan->problem_data);
@@ -264,7 +267,6 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
 {
   struct df_scan_problem_data *problem_data;
   unsigned int insn_num = get_max_uid () + 1;
-  unsigned int block_size = 512;
   basic_block bb;
 
   /* Given the number of pools, this is really faster than tearing
@@ -276,24 +278,18 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
   df_scan->problem_data = problem_data;
   df_scan->computed = true;
 
-  problem_data->ref_base_pool
-    = create_alloc_pool ("df_scan ref base",
-			 sizeof (struct df_base_ref), block_size);
-  problem_data->ref_artificial_pool
-    = create_alloc_pool ("df_scan ref artificial",
-			 sizeof (struct df_artificial_ref), block_size);
-  problem_data->ref_regular_pool
-    = create_alloc_pool ("df_scan ref regular",
-			 sizeof (struct df_regular_ref), block_size);
-  problem_data->insn_pool
-    = create_alloc_pool ("df_scan insn",
-			 sizeof (struct df_insn_info), block_size);
-  problem_data->reg_pool
-    = create_alloc_pool ("df_scan reg",
-			 sizeof (struct df_reg_info), block_size);
-  problem_data->mw_reg_pool
-    = create_alloc_pool ("df_scan mw_reg",
-			 sizeof (struct df_mw_hardreg), block_size / 16);
+  problem_data->ref_base_pool = new pool_allocator<df_base_ref>
+    ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
+    ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
+    ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->insn_pool = new pool_allocator<df_insn_info>
+    ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->reg_pool = new pool_allocator<df_reg_info>
+    ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
+    ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
 
   bitmap_obstack_initialize (&problem_data->reg_bitmaps);
   bitmap_obstack_initialize (&problem_data->insn_bitmaps);
@@ -519,13 +515,14 @@ df_grow_reg_info (void)
     {
       struct df_reg_info *reg_info;
 
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      // TODO
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->def_regs[i] = reg_info;
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->use_regs[i] = reg_info;
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->eq_use_regs[i] = reg_info;
       df->def_info.begin[i] = 0;
@@ -740,15 +737,17 @@ df_free_ref (df_ref ref)
   switch (DF_REF_CLASS (ref))
     {
     case DF_REF_BASE:
-      pool_free (problem_data->ref_base_pool, ref);
+      problem_data->ref_base_pool->remove ((df_base_ref *) (ref));
       break;
 
     case DF_REF_ARTIFICIAL:
-      pool_free (problem_data->ref_artificial_pool, ref);
+      problem_data->ref_artificial_pool->remove
+	((df_artificial_ref *) (ref));
       break;
 
     case DF_REF_REGULAR:
-      pool_free (problem_data->ref_regular_pool, ref);
+      problem_data->ref_regular_pool->remove
+	((df_regular_ref *) (ref));
       break;
     }
 }
@@ -851,7 +850,7 @@ df_insn_create_insn_record (rtx_insn *insn)
   insn_rec = DF_INSN_INFO_GET (insn);
   if (!insn_rec)
     {
-      insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool);
+      insn_rec = problem_data->insn_pool->allocate ();
       DF_INSN_INFO_SET (insn, insn_rec);
     }
   memset (insn_rec, 0, sizeof (struct df_insn_info));
@@ -899,7 +898,7 @@ df_mw_hardreg_chain_delete (struct df_mw_hardreg *hardregs)
   for (; hardregs; hardregs = next)
     {
       next = DF_MWS_NEXT (hardregs);
-      pool_free (problem_data->mw_reg_pool, hardregs);
+      problem_data->mw_reg_pool->remove (hardregs);
     }
 }
 
@@ -940,7 +939,7 @@ df_insn_info_delete (unsigned int uid)
       df_ref_chain_delete (insn_info->uses);
       df_ref_chain_delete (insn_info->eq_uses);
 
-      pool_free (problem_data->insn_pool, insn_info);
+      problem_data->insn_pool->remove (insn_info);
       DF_INSN_UID_SET (uid, NULL);
     }
 }
@@ -1024,7 +1023,7 @@ df_free_collection_rec (struct df_collection_rec *collection_rec)
   FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
     df_free_ref (ref);
   FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
-    pool_free (problem_data->mw_reg_pool, mw);
+    problem_data->mw_reg_pool->remove (mw);
 
   collection_rec->def_vec.release ();
   collection_rec->use_vec.release ();
@@ -1949,7 +1948,7 @@ df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
       if (mw->flags & DF_REF_IN_NOTE)
 	{
 	  *mw_ptr = DF_MWS_NEXT (mw);
-	  pool_free (problem_data->mw_reg_pool, mw);
+	  problem_data->mw_reg_pool->remove (mw);
 	}
       else
 	mw_ptr = &DF_MWS_NEXT (mw);
@@ -2296,8 +2295,7 @@ df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
       while (i + dist + 1 < count
 	     && df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
 	{
-	  pool_free (problem_data->mw_reg_pool,
-		     (*mw_vec)[i + dist + 1]);
+	  problem_data->mw_reg_pool->remove ((*mw_vec)[i + dist + 1]);
 	  dist++;
 	}
       /* Copy it down to the next position.  */
@@ -2525,18 +2523,18 @@ df_ref_create_structure (enum df_ref_class cl,
   switch (cl)
     {
     case DF_REF_BASE:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
+      this_ref = (df_ref) (problem_data->ref_base_pool->allocate ());
       gcc_checking_assert (loc == NULL);
       break;
 
     case DF_REF_ARTIFICIAL:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
+      this_ref = (df_ref) (problem_data->ref_artificial_pool->allocate ());
       this_ref->artificial_ref.bb = bb;
       gcc_checking_assert (loc == NULL);
       break;
 
     case DF_REF_REGULAR:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
+      this_ref = (df_ref) (problem_data->ref_regular_pool->allocate ());
       this_ref->regular_ref.loc = loc;
       gcc_checking_assert (loc);
       break;
@@ -2638,7 +2636,7 @@ df_ref_record (enum df_ref_class cl,
 	    ref_flags |= DF_REF_PARTIAL;
 	  ref_flags |= DF_REF_MW_HARDREG;
 
-	  hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
+	  hardreg = problem_data->mw_reg_pool->allocate ();
 	  hardreg->type = ref_type;
 	  hardreg->flags = ref_flags;
 	  hardreg->mw_reg = reg;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 11/35] Change use to type-based pool allocator in sh.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (17 preceding siblings ...)
  2015-05-27 14:19 ` [PATCH 25/35] Change use to type-based pool allocator in tree-ssa-sccvn.c mliska
@ 2015-05-27 14:19 ` mliska
  2015-05-27 18:03   ` Jeff Law
  2015-05-27 14:19 ` [PATCH 14/35] Change use to type-based pool allocator in df-scan.c mliska
                   ` (15 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:19 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* config/sh/sh.c (add_constant):Use new type-based pool allocator.
	(sh_reorg) Likewise.
---
 gcc/config/sh/sh.c | 30 ++++++++++++++++++++++--------
 1 file changed, 22 insertions(+), 8 deletions(-)

diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index bc1ce24..cf44120 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -4648,14 +4648,31 @@ gen_datalabel_ref (rtx sym)
 }
 
 \f
-static alloc_pool label_ref_list_pool;
-
 typedef struct label_ref_list_d
 {
   rtx_code_label *label;
   struct label_ref_list_d *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((label_ref_list_d *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<label_ref_list_d> pool;
+
 } *label_ref_list_t;
 
+pool_allocator<label_ref_list_d> label_ref_list_d::pool
+  ("label references list", 30);
+
 /* The SH cannot load a large constant into a register, constants have to
    come from a pc relative load.  The reference of a pc relative load
    instruction must be less than 1k in front of the instruction.  This
@@ -4775,7 +4792,7 @@ add_constant (rtx x, machine_mode mode, rtx last_value)
 		}
 	      if (lab && pool_window_label)
 		{
-		  newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
+		  newref = new label_ref_list_d;
 		  newref->label = pool_window_label;
 		  ref = pool_vector[pool_window_last].wend;
 		  newref->next = ref;
@@ -4804,7 +4821,7 @@ add_constant (rtx x, machine_mode mode, rtx last_value)
   pool_vector[pool_size].part_of_sequence_p = (lab == 0);
   if (lab && pool_window_label)
     {
-      newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
+      newref = new label_ref_list_d;
       newref->label = pool_window_label;
       ref = pool_vector[pool_window_last].wend;
       newref->next = ref;
@@ -6359,9 +6376,6 @@ sh_reorg (void)
 
   /* Scan the function looking for move instructions which have to be
      changed to pc-relative loads and insert the literal tables.  */
-  label_ref_list_pool = create_alloc_pool ("label references list",
-					   sizeof (struct label_ref_list_d),
-					   30);
   mdep_reorg_phase = SH_FIXUP_PCLOAD;
   for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
     {
@@ -6553,7 +6567,7 @@ sh_reorg (void)
 	  insn = barrier;
 	}
     }
-  free_alloc_pool (label_ref_list_pool);
+  label_ref_list_d::pool.release ();
   for (insn = first; insn; insn = NEXT_INSN (insn))
     PUT_MODE (insn, VOIDmode);
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 07/35] Change use to type-based pool allocator in var-tracking.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (21 preceding siblings ...)
  2015-05-27 14:20 ` [PATCH 29/35] Change use to type-based pool allocator in ipa-prop.c mliska
@ 2015-05-27 14:20 ` mliska
  2015-05-29 13:34   ` Martin Liška
  2015-05-27 14:21 ` [PATCH 20/35] Change use to type-based pool allocator in ira-build.c mliska
                   ` (11 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:20 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* var-tracking.c (variable_htab_free):Use new type-based pool allocator.
	(attrs_list_clear) Likewise.
	(attrs_list_insert) Likewise.
	(attrs_list_copy) Likewise.
	(shared_hash_unshare) Likewise.
	(shared_hash_destroy) Likewise.
	(unshare_variable) Likewise.
	(var_reg_delete_and_set) Likewise.
	(var_reg_delete) Likewise.
	(var_regno_delete) Likewise.
	(drop_overlapping_mem_locs) Likewise.
	(variable_union) Likewise.
	(insert_into_intersection) Likewise.
	(canonicalize_values_star) Likewise.
	(variable_merge_over_cur) Likewise.
	(dataflow_set_merge) Likewise.
	(remove_duplicate_values) Likewise.
	(variable_post_merge_new_vals) Likewise.
	(dataflow_set_preserve_mem_locs) Likewise.
	(dataflow_set_remove_mem_locs) Likewise.
	(variable_from_dropped) Likewise.
	(variable_was_changed) Likewise.
	(set_slot_part) Likewise.
	(clobber_slot_part) Likewise.
	(delete_slot_part) Likewise.
	(loc_exp_insert_dep) Likewise.
	(notify_dependents_of_changed_value) Likewise.
	(emit_notes_for_differences_1) Likewise.
	(vt_emit_notes) Likewise.
	(vt_initialize) Likewise.
	(vt_finalize) Likewise.
---
 gcc/var-tracking.c | 201 ++++++++++++++++++++++++++++++++---------------------
 1 file changed, 122 insertions(+), 79 deletions(-)

diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 0db4358..f7afed1 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -282,6 +282,21 @@ typedef struct attrs_def
 
   /* Offset from start of DECL.  */
   HOST_WIDE_INT offset;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((attrs_def *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<attrs_def> pool;
 } *attrs;
 
 /* Structure for chaining the locations.  */
@@ -298,6 +313,21 @@ typedef struct location_chain_def
 
   /* Initialized? */
   enum var_init_status init;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((location_chain_def *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<location_chain_def> pool;
 } *location_chain;
 
 /* A vector of loc_exp_dep holds the active dependencies of a one-part
@@ -315,6 +345,21 @@ typedef struct loc_exp_dep_s
   /* A pointer to the pointer to this entry (head or prev's next) in
      the doubly-linked list.  */
   struct loc_exp_dep_s **pprev;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((loc_exp_dep_s *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<loc_exp_dep_s> pool;
 } loc_exp_dep;
 
 
@@ -554,6 +599,21 @@ typedef struct shared_hash_def
 
   /* Actual hash table.  */
   variable_table_type *htab;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((shared_hash_def *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<shared_hash_def> pool;
 } *shared_hash;
 
 /* Structure holding the IN or OUT set for a basic block.  */
@@ -598,22 +658,28 @@ typedef struct variable_tracking_info_def
 } *variable_tracking_info;
 
 /* Alloc pool for struct attrs_def.  */
-static alloc_pool attrs_pool;
+pool_allocator<attrs_def> attrs_def::pool ("attrs_def pool", 1024);
 
 /* Alloc pool for struct variable_def with MAX_VAR_PARTS entries.  */
-static alloc_pool var_pool;
+
+static pool_allocator<variable_def> var_pool
+  ("variable_def pool", 64,
+   (MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
 
 /* Alloc pool for struct variable_def with a single var_part entry.  */
-static alloc_pool valvar_pool;
+static pool_allocator<variable_def> valvar_pool
+  ("small variable_def pool", 256);
 
 /* Alloc pool for struct location_chain_def.  */
-static alloc_pool loc_chain_pool;
+pool_allocator<location_chain_def> location_chain_def::pool
+  ("location_chain_def pool", 1024);
 
 /* Alloc pool for struct shared_hash_def.  */
-static alloc_pool shared_hash_pool;
+pool_allocator<shared_hash_def> shared_hash_def::pool
+  ("shared_hash_def pool", 256);
 
 /* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables.  */
-static alloc_pool loc_exp_dep_pool;
+pool_allocator<loc_exp_dep> loc_exp_dep::pool ("loc_exp_dep pool", 64);
 
 /* Changed variables, notes will be emitted for them.  */
 static variable_table_type *changed_variables;
@@ -784,7 +850,7 @@ stack_adjust_offset_pre_post (rtx pattern, HOST_WIDE_INT *pre,
 	*post += INTVAL (XEXP (src, 1));
       else
 	*post -= INTVAL (XEXP (src, 1));
-      return;	
+      return;
     }
   HOST_WIDE_INT res[2] = { 0, 0 };
   for_each_inc_dec (pattern, stack_adjust_offset_pre_post_cb, res);
@@ -1374,7 +1440,7 @@ dv_onepart_p (decl_or_value dv)
 }
 
 /* Return the variable pool to be used for a dv of type ONEPART.  */
-static inline alloc_pool
+static inline pool_allocator <variable_def> &
 onepart_pool (onepart_enum_t onepart)
 {
   return onepart ? valvar_pool : var_pool;
@@ -1457,7 +1523,7 @@ variable_htab_free (void *elem)
       for (node = var->var_part[i].loc_chain; node; node = next)
 	{
 	  next = node->next;
-	  pool_free (loc_chain_pool, node);
+	  delete node;
 	}
       var->var_part[i].loc_chain = NULL;
     }
@@ -1472,7 +1538,7 @@ variable_htab_free (void *elem)
       if (var->onepart == ONEPART_DEXPR)
 	set_dv_changed (var->dv, true);
     }
-  pool_free (onepart_pool (var->onepart), var);
+  onepart_pool (var->onepart).remove (var);
 }
 
 /* Initialize the set (array) SET of attrs to empty lists.  */
@@ -1496,7 +1562,7 @@ attrs_list_clear (attrs *listp)
   for (list = *listp; list; list = next)
     {
       next = list->next;
-      pool_free (attrs_pool, list);
+      delete list;
     }
   *listp = NULL;
 }
@@ -1518,9 +1584,7 @@ static void
 attrs_list_insert (attrs *listp, decl_or_value dv,
 		   HOST_WIDE_INT offset, rtx loc)
 {
-  attrs list;
-
-  list = (attrs) pool_alloc (attrs_pool);
+  attrs list = new attrs_def;
   list->loc = loc;
   list->dv = dv;
   list->offset = offset;
@@ -1533,12 +1597,10 @@ attrs_list_insert (attrs *listp, decl_or_value dv,
 static void
 attrs_list_copy (attrs *dstp, attrs src)
 {
-  attrs n;
-
   attrs_list_clear (dstp);
   for (; src; src = src->next)
     {
-      n = (attrs) pool_alloc (attrs_pool);
+      attrs n = new attrs_def;
       n->loc = src->loc;
       n->dv = src->dv;
       n->offset = src->offset;
@@ -1612,7 +1674,7 @@ shared_var_p (variable var, shared_hash vars)
 static shared_hash
 shared_hash_unshare (shared_hash vars)
 {
-  shared_hash new_vars = (shared_hash) pool_alloc (shared_hash_pool);
+  shared_hash new_vars = new shared_hash_def;
   gcc_assert (vars->refcount > 1);
   new_vars->refcount = 1;
   new_vars->htab = new variable_table_type (vars->htab->elements () + 3);
@@ -1640,7 +1702,7 @@ shared_hash_destroy (shared_hash vars)
   if (--vars->refcount == 0)
     {
       delete vars->htab;
-      pool_free (shared_hash_pool, vars);
+      delete vars;
     }
 }
 
@@ -1738,7 +1800,7 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var,
   variable new_var;
   int i;
 
-  new_var = (variable) pool_alloc (onepart_pool (var->onepart));
+  new_var = onepart_pool (var->onepart).allocate ();
   new_var->dv = var->dv;
   new_var->refcount = 1;
   var->refcount--;
@@ -1771,7 +1833,7 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var,
 	{
 	  location_chain new_lc;
 
-	  new_lc = (location_chain) pool_alloc (loc_chain_pool);
+	  new_lc = new location_chain_def;
 	  new_lc->next = NULL;
 	  if (node->init > initialized)
 	    new_lc->init = node->init;
@@ -1936,7 +1998,7 @@ var_reg_delete_and_set (dataflow_set *set, rtx loc, bool modify,
       if (dv_as_opaque (node->dv) != decl || node->offset != offset)
 	{
 	  delete_variable_part (set, node->loc, node->dv, node->offset);
-	  pool_free (attrs_pool, node);
+	  delete node;
 	  *nextp = next;
 	}
       else
@@ -1977,7 +2039,7 @@ var_reg_delete (dataflow_set *set, rtx loc, bool clobber)
       if (clobber || !dv_onepart_p (node->dv))
 	{
 	  delete_variable_part (set, node->loc, node->dv, node->offset);
-	  pool_free (attrs_pool, node);
+	  delete node;
 	  *nextp = next;
 	}
       else
@@ -1997,7 +2059,7 @@ var_regno_delete (dataflow_set *set, int regno)
     {
       next = node->next;
       delete_variable_part (set, node->loc, node->dv, node->offset);
-      pool_free (attrs_pool, node);
+      delete node;
     }
   *reg = NULL;
 }
@@ -2047,7 +2109,7 @@ get_addr_from_global_cache (rtx const loc)
   rtx x;
 
   gcc_checking_assert (GET_CODE (loc) == VALUE);
-  
+
   bool existed;
   rtx *slot = &global_get_addr_cache->get_or_insert (loc, &existed);
   if (existed)
@@ -2085,14 +2147,14 @@ get_addr_from_local_cache (dataflow_set *set, rtx const loc)
   location_chain l;
 
   gcc_checking_assert (GET_CODE (loc) == VALUE);
-  
+
   bool existed;
   rtx *slot = &local_get_addr_cache->get_or_insert (loc, &existed);
   if (existed)
     return *slot;
 
   x = get_addr_from_global_cache (loc);
-  
+
   /* Tentative, avoiding infinite recursion.  */
   *slot = x;
 
@@ -2304,7 +2366,7 @@ drop_overlapping_mem_locs (variable_def **slot, overlapping_mems *coms)
 	      if (VAR_LOC_1PAUX (var))
 		VAR_LOC_FROM (var) = NULL;
 	    }
-	  pool_free (loc_chain_pool, loc);
+	  delete loc;
 	}
 
       if (!var->var_part[0].loc_chain)
@@ -2538,7 +2600,7 @@ val_reset (dataflow_set *set, decl_or_value dv)
   if (var->onepart == ONEPART_VALUE)
     {
       rtx x = dv_as_value (dv);
-      
+
       /* Relationships in the global cache don't change, so reset the
 	 local cache entry only.  */
       rtx *slot = local_get_addr_cache->get (x);
@@ -2807,7 +2869,7 @@ variable_union (variable src, dataflow_set *set)
 		  goto restart_onepart_unshared;
 		}
 
-	      *nodep = nnode = (location_chain) pool_alloc (loc_chain_pool);
+	      *nodep = nnode = new location_chain_def;
 	      nnode->loc = snode->loc;
 	      nnode->init = snode->init;
 	      if (!snode->set_src || MEM_P (snode->set_src))
@@ -2927,7 +2989,7 @@ variable_union (variable src, dataflow_set *set)
 		    location_chain new_node;
 
 		    /* Copy the location from SRC.  */
-		    new_node = (location_chain) pool_alloc (loc_chain_pool);
+		    new_node = new location_chain_def;
 		    new_node->loc = node->loc;
 		    new_node->init = node->init;
 		    if (!node->set_src || MEM_P (node->set_src))
@@ -2982,7 +3044,7 @@ variable_union (variable src, dataflow_set *set)
 		      location_chain new_node;
 
 		      /* Copy the location from SRC.  */
-		      new_node = (location_chain) pool_alloc (loc_chain_pool);
+		      new_node = new location_chain_def;
 		      new_node->loc = node->loc;
 		      new_node->init = node->init;
 		      if (!node->set_src || MEM_P (node->set_src))
@@ -3078,7 +3140,7 @@ variable_union (variable src, dataflow_set *set)
 	    {
 	      location_chain new_lc;
 
-	      new_lc = (location_chain) pool_alloc (loc_chain_pool);
+	      new_lc = new location_chain_def;
 	      new_lc->next = NULL;
 	      new_lc->init = node->init;
 	      if (!node->set_src || MEM_P (node->set_src))
@@ -3296,7 +3358,7 @@ insert_into_intersection (location_chain *nodep, rtx loc,
     else if (r > 0)
       break;
 
-  node = (location_chain) pool_alloc (loc_chain_pool);
+  node = new location_chain_def;
 
   node->loc = loc;
   node->set_src = NULL;
@@ -3817,7 +3879,7 @@ canonicalize_values_star (variable_def **slot, dataflow_set *set)
 		    if (dv_as_opaque (list->dv) == dv_as_opaque (cdv))
 		      {
 			*listp = list->next;
-			pool_free (attrs_pool, list);
+			delete list;
 			list = *listp;
 			break;
 		      }
@@ -3835,7 +3897,7 @@ canonicalize_values_star (variable_def **slot, dataflow_set *set)
 		    if (dv_as_opaque (list->dv) == dv_as_opaque (dv))
 		      {
 			*listp = list->next;
-			pool_free (attrs_pool, list);
+			delete list;
 			list = *listp;
 			break;
 		      }
@@ -4016,7 +4078,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
 	{
 	  if (node)
 	    {
-	      dvar = (variable) pool_alloc (onepart_pool (onepart));
+	      dvar = onepart_pool (onepart).allocate ();
 	      dvar->dv = dv;
 	      dvar->refcount = 1;
 	      dvar->n_var_parts = 1;
@@ -4152,8 +4214,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
 							  INSERT);
 		  if (!*slot)
 		    {
-		      variable var = (variable) pool_alloc (onepart_pool
-							    (ONEPART_VALUE));
+		      variable var = onepart_pool (ONEPART_VALUE).allocate ();
 		      var->dv = dv;
 		      var->refcount = 1;
 		      var->n_var_parts = 1;
@@ -4240,7 +4301,7 @@ dataflow_set_merge (dataflow_set *dst, dataflow_set *src2)
   dataflow_set_init (dst);
   dst->stack_adjust = cur.stack_adjust;
   shared_hash_destroy (dst->vars);
-  dst->vars = (shared_hash) pool_alloc (shared_hash_pool);
+  dst->vars = new shared_hash_def;
   dst->vars->refcount = 1;
   dst->vars->htab = new variable_table_type (MAX (src1_elems, src2_elems));
 
@@ -4366,7 +4427,7 @@ remove_duplicate_values (variable var)
 	    {
 	      /* Remove duplicate value node.  */
 	      *nodep = node->next;
-	      pool_free (loc_chain_pool, node);
+	      delete node;
 	      continue;
 	    }
 	  else
@@ -4519,7 +4580,7 @@ variable_post_merge_new_vals (variable_def **slot, dfset_post_merge *dfpm)
 		 to be added when we bring perm in.  */
 	      att = *curp;
 	      *curp = att->next;
-	      pool_free (attrs_pool, att);
+	      delete att;
 	    }
 	}
 
@@ -4779,7 +4840,7 @@ dataflow_set_preserve_mem_locs (variable_def **slot, dataflow_set *set)
 		}
 	    }
 	  *locp = loc->next;
-	  pool_free (loc_chain_pool, loc);
+	  delete loc;
 	}
 
       if (!var->var_part[0].loc_chain)
@@ -4851,7 +4912,7 @@ dataflow_set_remove_mem_locs (variable_def **slot, dataflow_set *set)
 	      if (VAR_LOC_1PAUX (var))
 		VAR_LOC_FROM (var) = NULL;
 	    }
-	  pool_free (loc_chain_pool, loc);
+	  delete loc;
 	}
 
       if (!var->var_part[0].loc_chain)
@@ -7302,7 +7363,7 @@ variable_from_dropped (decl_or_value dv, enum insert_option insert)
 
   gcc_checking_assert (onepart == ONEPART_VALUE || onepart == ONEPART_DEXPR);
 
-  empty_var = (variable) pool_alloc (onepart_pool (onepart));
+  empty_var = onepart_pool (onepart).allocate ();
   empty_var->dv = dv;
   empty_var->refcount = 1;
   empty_var->n_var_parts = 0;
@@ -7406,7 +7467,7 @@ variable_was_changed (variable var, dataflow_set *set)
 
 	  if (!empty_var)
 	    {
-	      empty_var = (variable) pool_alloc (onepart_pool (onepart));
+	      empty_var = onepart_pool (onepart).allocate ();
 	      empty_var->dv = var->dv;
 	      empty_var->refcount = 1;
 	      empty_var->n_var_parts = 0;
@@ -7530,7 +7591,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
   if (!var)
     {
       /* Create new variable information.  */
-      var = (variable) pool_alloc (onepart_pool (onepart));
+      var = onepart_pool (onepart).allocate ();
       var->dv = dv;
       var->refcount = 1;
       var->n_var_parts = 1;
@@ -7725,7 +7786,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
 		set_src = node->set_src;
 	      if (var->var_part[pos].cur_loc == node->loc)
 		var->var_part[pos].cur_loc = NULL;
-	      pool_free (loc_chain_pool, node);
+	      delete node;
 	      *nextp = next;
 	      break;
 	    }
@@ -7737,7 +7798,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
     }
 
   /* Add the location to the beginning.  */
-  node = (location_chain) pool_alloc (loc_chain_pool);
+  node = new location_chain_def;
   node->loc = loc;
   node->init = initialized;
   node->set_src = set_src;
@@ -7819,7 +7880,7 @@ clobber_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
 		      if (dv_as_opaque (anode->dv) == dv_as_opaque (var->dv)
 			  && anode->offset == offset)
 			{
-			  pool_free (attrs_pool, anode);
+			  delete anode;
 			  *anextp = anext;
 			}
 		      else
@@ -7919,7 +7980,7 @@ delete_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
 		  if (pos == 0 && var->onepart && VAR_LOC_1PAUX (var))
 		    VAR_LOC_FROM (var) = NULL;
 		}
-	      pool_free (loc_chain_pool, node);
+	      delete node;
 	      *nextp = next;
 	      break;
 	    }
@@ -8080,7 +8141,7 @@ loc_exp_insert_dep (variable var, rtx x, variable_table_type *vars)
     return;
 
   if (var->onepart == NOT_ONEPART)
-    led = (loc_exp_dep *) pool_alloc (loc_exp_dep_pool);
+    led = new loc_exp_dep;
   else
     {
       loc_exp_dep empty;
@@ -8888,7 +8949,7 @@ notify_dependents_of_changed_value (rtx val, variable_table_type *htab,
 	  break;
 
 	case NOT_ONEPART:
-	  pool_free (loc_exp_dep_pool, led);
+	  delete led;
 	  ivar = htab->find_with_hash (ldv, dv_htab_hash (ldv));
 	  if (ivar)
 	    {
@@ -9010,7 +9071,7 @@ emit_notes_for_differences_1 (variable_def **slot, variable_table_type *new_vars
 
       if (!empty_var)
 	{
-	  empty_var = (variable) pool_alloc (onepart_pool (old_var->onepart));
+	  empty_var = onepart_pool (old_var->onepart).allocate ();
 	  empty_var->dv = old_var->dv;
 	  empty_var->refcount = 0;
 	  empty_var->n_var_parts = 0;
@@ -9451,8 +9512,6 @@ vt_emit_notes (void)
   if (MAY_HAVE_DEBUG_INSNS)
     {
       dropped_values = new variable_table_type (cselib_get_next_uid () * 2);
-      loc_exp_dep_pool = create_alloc_pool ("loc_exp_dep pool",
-					    sizeof (loc_exp_dep), 64);
     }
 
   dataflow_set_init (&cur);
@@ -9871,18 +9930,7 @@ vt_initialize (void)
 
   alloc_aux_for_blocks (sizeof (struct variable_tracking_info_def));
 
-  attrs_pool = create_alloc_pool ("attrs_def pool",
-				  sizeof (struct attrs_def), 1024);
-  var_pool = create_alloc_pool ("variable_def pool",
-				sizeof (struct variable_def)
-				+ (MAX_VAR_PARTS - 1)
-				* sizeof (((variable)NULL)->var_part[0]), 64);
-  loc_chain_pool = create_alloc_pool ("location_chain_def pool",
-				      sizeof (struct location_chain_def),
-				      1024);
-  shared_hash_pool = create_alloc_pool ("shared_hash_def pool",
-					sizeof (struct shared_hash_def), 256);
-  empty_shared_hash = (shared_hash) pool_alloc (shared_hash_pool);
+  empty_shared_hash = new shared_hash_def;
   empty_shared_hash->refcount = 1;
   empty_shared_hash->htab = new variable_table_type (1);
   changed_variables = new variable_table_type (10);
@@ -9901,15 +9949,12 @@ vt_initialize (void)
     {
       cselib_init (CSELIB_RECORD_MEMORY | CSELIB_PRESERVE_CONSTANTS);
       scratch_regs = BITMAP_ALLOC (NULL);
-      valvar_pool = create_alloc_pool ("small variable_def pool",
-				       sizeof (struct variable_def), 256);
       preserved_values.create (256);
       global_get_addr_cache = new hash_map<rtx, rtx>;
     }
   else
     {
       scratch_regs = NULL;
-      valvar_pool = NULL;
       global_get_addr_cache = NULL;
     }
 
@@ -10243,20 +10288,18 @@ vt_finalize (void)
   empty_shared_hash->htab = NULL;
   delete changed_variables;
   changed_variables = NULL;
-  free_alloc_pool (attrs_pool);
-  free_alloc_pool (var_pool);
-  free_alloc_pool (loc_chain_pool);
-  free_alloc_pool (shared_hash_pool);
+  attrs_def::pool.release ();
+  var_pool.release ();
+  location_chain_def::pool.release ();
+  shared_hash_def::pool.release ();
 
   if (MAY_HAVE_DEBUG_INSNS)
     {
       if (global_get_addr_cache)
 	delete global_get_addr_cache;
       global_get_addr_cache = NULL;
-      if (loc_exp_dep_pool)
-	free_alloc_pool (loc_exp_dep_pool);
-      loc_exp_dep_pool = NULL;
-      free_alloc_pool (valvar_pool);
+      loc_exp_dep::pool.release ();
+      valvar_pool.release ();
       preserved_values.release ();
       cselib_finish ();
       BITMAP_FREE (scratch_regs);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 29/35] Change use to type-based pool allocator in ipa-prop.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (20 preceding siblings ...)
  2015-05-27 14:20 ` [PATCH 31/35] Change use to type-based pool allocator in ipa-prop.c and ipa-cp.c mliska
@ 2015-05-27 14:20 ` mliska
  2015-05-27 18:22   ` Jeff Law
  2015-05-27 14:20 ` [PATCH 07/35] Change use to type-based pool allocator in var-tracking.c mliska
                   ` (12 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:20 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ipa-prop.c (ipa_set_jf_constant): Use new type-based pool allocator.
	(ipa_edge_duplication_hook): Likewise.
	(ipa_free_all_structures_after_ipa_cp): Likewise.
	(ipa_free_all_structures_after_iinln): Likewise.
---
 gcc/ipa-prop.c | 23 +++++++----------------
 1 file changed, 7 insertions(+), 16 deletions(-)

diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 26be5f2..80ce6b8 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -176,7 +176,8 @@ struct ipa_cst_ref_desc
 
 /* Allocation pool for reference descriptions.  */
 
-static alloc_pool ipa_refdesc_pool;
+static pool_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
+  ("IPA-PROP ref descriptions", 32);
 
 /* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
    with NODE should prevent us from analyzing it for the purposes of IPA-CP.  */
@@ -508,11 +509,8 @@ ipa_set_jf_constant (struct ipa_jump_func *jfunc, tree constant,
       && TREE_CODE (TREE_OPERAND (constant, 0)) == FUNCTION_DECL)
     {
       struct ipa_cst_ref_desc *rdesc;
-      if (!ipa_refdesc_pool)
-	ipa_refdesc_pool = create_alloc_pool ("IPA-PROP ref descriptions",
-					sizeof (struct ipa_cst_ref_desc), 32);
 
-      rdesc = (struct ipa_cst_ref_desc *) pool_alloc (ipa_refdesc_pool);
+      rdesc = ipa_refdesc_pool.allocate ();
       rdesc->cs = cs;
       rdesc->next_duplicate = NULL;
       rdesc->refcount = 1;
@@ -3517,9 +3515,7 @@ ipa_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
 	      gcc_checking_assert (ref);
 	      dst->caller->clone_reference (ref, ref->stmt);
 
-	      gcc_checking_assert (ipa_refdesc_pool);
-	      struct ipa_cst_ref_desc *dst_rdesc
-		= (struct ipa_cst_ref_desc *) pool_alloc (ipa_refdesc_pool);
+	      struct ipa_cst_ref_desc *dst_rdesc = ipa_refdesc_pool.allocate ();
 	      dst_rdesc->cs = dst;
 	      dst_rdesc->refcount = src_rdesc->refcount;
 	      dst_rdesc->next_duplicate = NULL;
@@ -3527,10 +3523,7 @@ ipa_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
 	    }
 	  else if (src_rdesc->cs == src)
 	    {
-	      struct ipa_cst_ref_desc *dst_rdesc;
-	      gcc_checking_assert (ipa_refdesc_pool);
-	      dst_rdesc
-		= (struct ipa_cst_ref_desc *) pool_alloc (ipa_refdesc_pool);
+	      struct ipa_cst_ref_desc *dst_rdesc = ipa_refdesc_pool.allocate ();
 	      dst_rdesc->cs = dst;
 	      dst_rdesc->refcount = src_rdesc->refcount;
 	      dst_rdesc->next_duplicate = src_rdesc->next_duplicate;
@@ -3681,8 +3674,7 @@ ipa_free_all_structures_after_ipa_cp (void)
       free_alloc_pool (ipcp_poly_ctx_values_pool);
       free_alloc_pool (ipcp_agg_lattice_pool);
       ipa_unregister_cgraph_hooks ();
-      if (ipa_refdesc_pool)
-	free_alloc_pool (ipa_refdesc_pool);
+      ipa_refdesc_pool.release ();
     }
 }
 
@@ -3703,8 +3695,7 @@ ipa_free_all_structures_after_iinln (void)
     free_alloc_pool (ipcp_poly_ctx_values_pool);
   if (ipcp_agg_lattice_pool)
     free_alloc_pool (ipcp_agg_lattice_pool);
-  if (ipa_refdesc_pool)
-    free_alloc_pool (ipa_refdesc_pool);
+  ipa_refdesc_pool.release ();
 }
 
 /* Print ipa_tree_map data structures of all functions in the
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 31/35] Change use to type-based pool allocator in ipa-prop.c and ipa-cp.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (19 preceding siblings ...)
  2015-05-27 14:19 ` [PATCH 14/35] Change use to type-based pool allocator in df-scan.c mliska
@ 2015-05-27 14:20 ` mliska
  2015-05-29 14:09   ` Martin Liška
  2015-05-27 14:20 ` [PATCH 29/35] Change use to type-based pool allocator in ipa-prop.c mliska
                   ` (13 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:20 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ipa-cp.c (ipcp_value::add_source): Use new type-based pool allocator.
	(allocate_and_init_ipcp_value): Likewise.
	(ipcp_lattice::add_value): Likewise.
	(merge_agg_lats_step): Likewise.
	(ipcp_driver): Likewise.
	* ipa-prop.c (ipa_free_all_structures_after_ipa_cp): Likewise.
	(ipa_free_all_structures_after_iinln): Likewise.
	* ipa-prop.h: Likewise.
---
 gcc/ipa-cp.c   | 37 +++++++++++++++++--------------------
 gcc/ipa-prop.c | 20 ++++++++------------
 gcc/ipa-prop.h | 19 +++++++++++++++----
 3 files changed, 40 insertions(+), 36 deletions(-)

diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 356f402..0c3f885 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -291,10 +291,17 @@ public:
 
 /* Allocation pools for values and their sources in ipa-cp.  */
 
-alloc_pool ipcp_cst_values_pool;
-alloc_pool ipcp_poly_ctx_values_pool;
-alloc_pool ipcp_sources_pool;
-alloc_pool ipcp_agg_lattice_pool;
+pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool ("IPA-CP constant values",
+						       32);
+
+pool_allocator<ipcp_value<ipa_polymorphic_call_context> > ipcp_poly_ctx_values_pool
+  ("IPA-CP polymorphic contexts", 32);
+
+pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool
+  ("IPA-CP value sources", 64);
+
+pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
+  ("IPA_CP aggregate lattices", 32);
 
 /* Maximal count found in program.  */
 
@@ -1147,7 +1154,7 @@ ipcp_value<valtype>::add_source (cgraph_edge *cs, ipcp_value *src_val,
 {
   ipcp_value_source<valtype> *src;
 
-  src = new (pool_alloc (ipcp_sources_pool)) ipcp_value_source<valtype>;
+  src = new (ipcp_sources_pool.allocate ()) ipcp_value_source<valtype>;
   src->offset = offset;
   src->cs = cs;
   src->val = src_val;
@@ -1165,7 +1172,7 @@ allocate_and_init_ipcp_value (tree source)
 {
   ipcp_value<tree> *val;
 
-  val = new (pool_alloc (ipcp_cst_values_pool)) ipcp_value<tree>;
+  val = ipcp_cst_values_pool.allocate ();
   memset (val, 0, sizeof (*val));
   val->value = source;
   return val;
@@ -1179,8 +1186,8 @@ allocate_and_init_ipcp_value (ipa_polymorphic_call_context source)
 {
   ipcp_value<ipa_polymorphic_call_context> *val;
 
-  val = new (pool_alloc (ipcp_poly_ctx_values_pool))
-    ipcp_value<ipa_polymorphic_call_context>;
+  // TODO
+  val = ipcp_poly_ctx_values_pool.allocate ();
   memset (val, 0, sizeof (*val));
   val->value = source;
   return val;
@@ -1229,7 +1236,7 @@ ipcp_lattice<valtype>::add_value (valtype newval, cgraph_edge *cs,
 	    {
 	      ipcp_value_source<valtype> *src = val->sources;
 	      val->sources = src->next;
-	      pool_free (ipcp_sources_pool, src);
+	      ipcp_sources_pool.remove ((ipcp_value_source<tree>*)src);
 	    }
 	}
 
@@ -1599,7 +1606,7 @@ merge_agg_lats_step (struct ipcp_param_lattices *dest_plats,
       if (dest_plats->aggs_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS))
 	return false;
       dest_plats->aggs_count++;
-      new_al = (struct ipcp_agg_lattice *) pool_alloc (ipcp_agg_lattice_pool);
+      new_al = ipcp_agg_lattice_pool.allocate ();
       memset (new_al, 0, sizeof (*new_al));
 
       new_al->offset = offset;
@@ -4463,16 +4470,6 @@ ipcp_driver (void)
   edge_removal_hook_holder =
     symtab->add_edge_removal_hook (&ipcp_edge_removal_hook, NULL);
 
-  ipcp_cst_values_pool = create_alloc_pool ("IPA-CP constant values",
-					    sizeof (ipcp_value<tree>), 32);
-  ipcp_poly_ctx_values_pool = create_alloc_pool
-    ("IPA-CP polymorphic contexts",
-     sizeof (ipcp_value<ipa_polymorphic_call_context>), 32);
-  ipcp_sources_pool = create_alloc_pool ("IPA-CP value sources",
-					 sizeof (ipcp_value_source<tree>), 64);
-  ipcp_agg_lattice_pool = create_alloc_pool ("IPA_CP aggregate lattices",
-					     sizeof (struct ipcp_agg_lattice),
-					     32);
   if (dump_file)
     {
       fprintf (dump_file, "\nIPA structures before propagation:\n");
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 80ce6b8..e90502b 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -3669,10 +3669,10 @@ ipa_free_all_structures_after_ipa_cp (void)
     {
       ipa_free_all_edge_args ();
       ipa_free_all_node_params ();
-      free_alloc_pool (ipcp_sources_pool);
-      free_alloc_pool (ipcp_cst_values_pool);
-      free_alloc_pool (ipcp_poly_ctx_values_pool);
-      free_alloc_pool (ipcp_agg_lattice_pool);
+      ipcp_sources_pool.release ();
+      ipcp_cst_values_pool.release ();
+      ipcp_poly_ctx_values_pool.release ();
+      ipcp_agg_lattice_pool.release ();
       ipa_unregister_cgraph_hooks ();
       ipa_refdesc_pool.release ();
     }
@@ -3687,14 +3687,10 @@ ipa_free_all_structures_after_iinln (void)
   ipa_free_all_edge_args ();
   ipa_free_all_node_params ();
   ipa_unregister_cgraph_hooks ();
-  if (ipcp_sources_pool)
-    free_alloc_pool (ipcp_sources_pool);
-  if (ipcp_cst_values_pool)
-    free_alloc_pool (ipcp_cst_values_pool);
-  if (ipcp_poly_ctx_values_pool)
-    free_alloc_pool (ipcp_poly_ctx_values_pool);
-  if (ipcp_agg_lattice_pool)
-    free_alloc_pool (ipcp_agg_lattice_pool);
+  ipcp_sources_pool.release ();
+  ipcp_cst_values_pool.release ();
+  ipcp_poly_ctx_values_pool.release ();
+  ipcp_agg_lattice_pool.release ();
   ipa_refdesc_pool.release ();
 }
 
diff --git a/gcc/ipa-prop.h b/gcc/ipa-prop.h
index 0488254..e6725aa 100644
--- a/gcc/ipa-prop.h
+++ b/gcc/ipa-prop.h
@@ -595,10 +595,21 @@ void ipa_print_node_jump_functions (FILE *f, struct cgraph_node *node);
 void ipa_print_all_jump_functions (FILE * f);
 void ipcp_verify_propagated_values (void);
 
-extern alloc_pool ipcp_cst_values_pool;
-extern alloc_pool ipcp_poly_ctx_values_pool;
-extern alloc_pool ipcp_sources_pool;
-extern alloc_pool ipcp_agg_lattice_pool;
+template <typename value>
+class ipcp_value;
+
+extern pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool;
+extern pool_allocator<ipcp_value<ipa_polymorphic_call_context> >
+  ipcp_poly_ctx_values_pool;
+
+template <typename valtype>
+class ipcp_value_source;
+
+extern pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
+
+class ipcp_agg_lattice;
+
+extern pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
 
 /* Operation to be performed for the parameter in ipa_parm_adjustment
    below.  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 18/35] Change use to type-based pool allocator in stmt.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (28 preceding siblings ...)
  2015-05-27 14:21 ` [PATCH 26/35] Change use to type-based pool allocator in tree-ssa-strlen.c mliska
@ 2015-05-27 14:21 ` mliska
  2015-05-27 18:13   ` Jeff Law
  2015-05-27 14:42 ` [PATCH 22/35] Change use to type-based pool allocator in sched-deps.c mliska
                   ` (4 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:21 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* stmt.c (add_case_node): Use new type-based pool allocator.
	(expand_case): Likewise.
	(expand_sjlj_dispatch_table): Likewise.
---
 gcc/stmt.c | 16 +++++-----------
 1 file changed, 5 insertions(+), 11 deletions(-)

diff --git a/gcc/stmt.c b/gcc/stmt.c
index 303df72..e93ed02 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -748,7 +748,7 @@ do_jump_if_equal (machine_mode mode, rtx op0, rtx op1, rtx_code_label *label,
 
 static struct case_node *
 add_case_node (struct case_node *head, tree low, tree high,
-               tree label, int prob, alloc_pool case_node_pool)
+               tree label, int prob, pool_allocator<case_node> &case_node_pool)
 {
   struct case_node *r;
 
@@ -756,7 +756,7 @@ add_case_node (struct case_node *head, tree low, tree high,
   gcc_checking_assert (high && (TREE_TYPE (low) == TREE_TYPE (high)));
 
   /* Add this label to the chain.  */
-  r = (struct case_node *) pool_alloc (case_node_pool);
+  r = case_node_pool.allocate ();
   r->low = low;
   r->high = high;
   r->code_label = label;
@@ -1160,7 +1160,7 @@ expand_case (gswitch *stmt)
   struct case_node *case_list = 0;
 
   /* A pool for case nodes.  */
-  alloc_pool case_node_pool;
+  pool_allocator<case_node> case_node_pool ("struct case_node pool", 100);
 
   /* An ERROR_MARK occurs for various reasons including invalid data type.
      ??? Can this still happen, with GIMPLE and all?  */
@@ -1171,9 +1171,6 @@ expand_case (gswitch *stmt)
      expressions being INTEGER_CST.  */
   gcc_assert (TREE_CODE (index_expr) != INTEGER_CST);
   
-  case_node_pool = create_alloc_pool ("struct case_node pool",
-				      sizeof (struct case_node),
-				      100);
 
   do_pending_stack_adjust ();
 
@@ -1273,7 +1270,6 @@ expand_case (gswitch *stmt)
   reorder_insns (NEXT_INSN (before_case), get_last_insn (), before_case);
 
   free_temp_slots ();
-  free_alloc_pool (case_node_pool);
 }
 
 /* Expand the dispatch to a short decrement chain if there are few cases
@@ -1340,9 +1336,8 @@ expand_sjlj_dispatch_table (rtx dispatch_index,
     {
       /* Similar to expand_case, but much simpler.  */
       struct case_node *case_list = 0;
-      alloc_pool case_node_pool = create_alloc_pool ("struct sjlj_case pool",
-						     sizeof (struct case_node),
-						     ncases);
+      pool_allocator<case_node> case_node_pool ("struct sjlj_case pool",
+						ncases);
       tree index_expr = make_tree (index_type, dispatch_index);
       tree minval = build_int_cst (index_type, 0);
       tree maxval = CASE_LOW (dispatch_table.last ());
@@ -1362,7 +1357,6 @@ expand_sjlj_dispatch_table (rtx dispatch_index,
 				minval, maxval, range,
                                 BLOCK_FOR_INSN (before_case));
       emit_label (default_label);
-      free_alloc_pool (case_node_pool);
     }
 
   /* Dispatching something not handled?  Trap!  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 26/35] Change use to type-based pool allocator in tree-ssa-strlen.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (27 preceding siblings ...)
  2015-05-27 14:21 ` [PATCH 15/35] Change use to type-based pool allocator in dse.c mliska
@ 2015-05-27 14:21 ` mliska
  2015-05-27 18:17   ` Jeff Law
  2015-05-29 13:42   ` Martin Liška
  2015-05-27 14:21 ` [PATCH 18/35] Change use to type-based pool allocator in stmt.c mliska
                   ` (5 subsequent siblings)
  34 siblings, 2 replies; 108+ messages in thread
From: mliska @ 2015-05-27 14:21 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-strlen.c (new_strinfo): Use new type-based pool allocator.
	(free_strinfo): Likewise.
	(pass_strlen::execute): Likewise.
---
 gcc/tree-ssa-strlen.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
index 34776a3..2664189 100644
--- a/gcc/tree-ssa-strlen.c
+++ b/gcc/tree-ssa-strlen.c
@@ -142,7 +142,7 @@ typedef struct strinfo_struct
 } *strinfo;
 
 /* Pool for allocating strinfo_struct entries.  */
-static alloc_pool strinfo_pool;
+static pool_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool", 64);
 
 /* Vector mapping positive string indexes to strinfo, for the
    current basic block.  The first pointer in the vector is special,
@@ -431,7 +431,7 @@ new_addr_stridx (tree exp)
 static strinfo
 new_strinfo (tree ptr, int idx, tree length)
 {
-  strinfo si = (strinfo) pool_alloc (strinfo_pool);
+  strinfo si = strinfo_pool.allocate ();
   si->length = length;
   si->ptr = ptr;
   si->stmt = NULL;
@@ -452,7 +452,7 @@ static inline void
 free_strinfo (strinfo si)
 {
   if (si && --si->refcount == 0)
-    pool_free (strinfo_pool, si);
+    strinfo_pool.remove (si);
 }
 
 /* Set strinfo in the vector entry IDX to SI.  */
@@ -2400,8 +2400,6 @@ pass_strlen::execute (function *fun)
 {
   ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
   max_stridx = 1;
-  strinfo_pool = create_alloc_pool ("strinfo_struct pool",
-				    sizeof (struct strinfo_struct), 64);
 
   calculate_dominance_info (CDI_DOMINATORS);
 
@@ -2410,7 +2408,7 @@ pass_strlen::execute (function *fun)
   strlen_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
 
   ssa_ver_to_stridx.release ();
-  free_alloc_pool (strinfo_pool);
+  strinfo_pool.release ();
   if (decl_to_stridxlist_htab)
     {
       obstack_free (&stridx_obstack, NULL);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 24/35] Change use to type-based pool allocator in tree-ssa-reassoc.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (24 preceding siblings ...)
  2015-05-27 14:21 ` [PATCH 30/35] Change use to type-based pool allocator in ipa-inline-analysis.c mliska
@ 2015-05-27 14:21 ` mliska
  2015-05-27 18:15   ` Jeff Law
  2015-05-27 14:21 ` [PATCH 16/35] Change use to type-based pool allocator in tree-sra.c mliska
                   ` (8 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:21 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-reassoc.c (add_to_ops_vec): Use new type-based pool allocator.
	(add_repeat_to_ops_vec): Likewise.
	(get_ops): Likewise.
	(maybe_optimize_range_tests): Likewise.
	(init_reassoc): Likewise.
	(fini_reassoc): Likewise.
---
 gcc/tree-ssa-reassoc.c | 19 ++++++++-----------
 1 file changed, 8 insertions(+), 11 deletions(-)

diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 0c67379..c1a7f4b9 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -235,7 +235,8 @@ typedef struct operand_entry
   unsigned int count;
 } *operand_entry_t;
 
-static alloc_pool operand_entry_pool;
+static pool_allocator<operand_entry> operand_entry_pool ("operand entry pool",
+							 30);
 
 /* This is used to assign a unique ID to each struct operand_entry
    so that qsort results are identical on different hosts.  */
@@ -619,7 +620,7 @@ sort_by_operand_rank (const void *pa, const void *pb)
 static void
 add_to_ops_vec (vec<operand_entry_t> *ops, tree op)
 {
-  operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+  operand_entry_t oe = operand_entry_pool.allocate ();
 
   oe->op = op;
   oe->rank = get_rank (op);
@@ -635,7 +636,7 @@ static void
 add_repeat_to_ops_vec (vec<operand_entry_t> *ops, tree op,
 		       HOST_WIDE_INT repeat)
 {
-  operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+  operand_entry_t oe = operand_entry_pool.allocate ();
 
   oe->op = op;
   oe->rank = get_rank (op);
@@ -2990,7 +2991,7 @@ get_ops (tree var, enum tree_code code, vec<operand_entry_t> *ops,
 	&& !get_ops (rhs[i], code, ops, loop)
 	&& has_single_use (rhs[i]))
       {
-	operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+	operand_entry_t oe = operand_entry_pool.allocate ();
 
 	oe->op = rhs[i];
 	oe->rank = code;
@@ -3223,8 +3224,7 @@ maybe_optimize_range_tests (gimple stmt)
 	      && has_single_use (rhs))
 	    {
 	      /* Otherwise, push the _234 range test itself.  */
-	      operand_entry_t oe
-		= (operand_entry_t) pool_alloc (operand_entry_pool);
+	      operand_entry_t oe = operand_entry_pool.allocate ();
 
 	      oe->op = rhs;
 	      oe->rank = code;
@@ -3256,8 +3256,7 @@ maybe_optimize_range_tests (gimple stmt)
 			   loop_containing_stmt (stmt))))
 	{
 	  /* Or push the GIMPLE_COND stmt itself.  */
-	  operand_entry_t oe
-	    = (operand_entry_t) pool_alloc (operand_entry_pool);
+	  operand_entry_t oe = operand_entry_pool.allocate ();
 
 	  oe->op = NULL;
 	  oe->rank = (e->flags & EDGE_TRUE_VALUE)
@@ -5035,8 +5034,6 @@ init_reassoc (void)
 
   memset (&reassociate_stats, 0, sizeof (reassociate_stats));
 
-  operand_entry_pool = create_alloc_pool ("operand entry pool",
-					  sizeof (struct operand_entry), 30);
   next_operand_entry_id = 0;
 
   /* Reverse RPO (Reverse Post Order) will give us something where
@@ -5085,7 +5082,7 @@ fini_reassoc (void)
 			    reassociate_stats.pows_created);
 
   delete operand_rank;
-  free_alloc_pool (operand_entry_pool);
+  operand_entry_pool.release ();
   free (bb_rank);
   plus_negates.release ();
   free_dominance_info (CDI_POST_DOMINATORS);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 30/35] Change use to type-based pool allocator in ipa-inline-analysis.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (23 preceding siblings ...)
  2015-05-27 14:21 ` [PATCH 20/35] Change use to type-based pool allocator in ira-build.c mliska
@ 2015-05-27 14:21 ` mliska
  2015-05-29 14:06   ` Martin Liška
  2015-05-27 14:21 ` [PATCH 24/35] Change use to type-based pool allocator in tree-ssa-reassoc.c mliska
                   ` (9 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:21 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ipa-inline-analysis.c (edge_set_predicate): Use new type-based pool allocator.
	(set_hint_predicate): Likewise.
	(inline_summary_alloc): Likewise.
	(reset_inline_edge_summary): Likewise.
	(reset_inline_summary): Likewise.
	(set_cond_stmt_execution_predicate): Likewise.
	(set_switch_stmt_execution_predicate): Likewise.
	(compute_bb_predicates): Likewise.
	(estimate_function_body_sizes): Likewise.
	(inline_free_summary): Likewise.
---
 gcc/ipa-inline-analysis.c | 39 +++++++++++++++++----------------------
 1 file changed, 17 insertions(+), 22 deletions(-)

diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index 5d99887..7d8edee 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -170,7 +170,7 @@ vec<inline_edge_summary_t> inline_edge_summary_vec;
 vec<edge_growth_cache_entry> edge_growth_cache;
 
 /* Edge predicates goes here.  */
-static alloc_pool edge_predicate_pool;
+static pool_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
 
 /* Return true predicate (tautology).
    We represent it by empty list of clauses.  */
@@ -804,13 +804,13 @@ edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
   if (predicate && !true_predicate_p (predicate))
     {
       if (!es->predicate)
-	es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool);
+	es->predicate = edge_predicate_pool.allocate ();
       *es->predicate = *predicate;
     }
   else
     {
       if (es->predicate)
-	pool_free (edge_predicate_pool, es->predicate);
+	edge_predicate_pool.remove (es->predicate);
       es->predicate = NULL;
     }
 }
@@ -823,13 +823,13 @@ set_hint_predicate (struct predicate **p, struct predicate new_predicate)
   if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
     {
       if (*p)
-	pool_free (edge_predicate_pool, *p);
+	edge_predicate_pool.remove (*p);
       *p = NULL;
     }
   else
     {
       if (!*p)
-	*p = (struct predicate *) pool_alloc (edge_predicate_pool);
+	*p = edge_predicate_pool.allocate ();
       **p = new_predicate;
     }
 }
@@ -1044,9 +1044,6 @@ inline_summary_alloc (void)
 
   if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
     inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
-  if (!edge_predicate_pool)
-    edge_predicate_pool = create_alloc_pool ("edge predicates",
-					     sizeof (struct predicate), 10);
 }
 
 /* We are called multiple time for given function; clear
@@ -1061,7 +1058,7 @@ reset_inline_edge_summary (struct cgraph_edge *e)
 
       es->call_stmt_size = es->call_stmt_time = 0;
       if (es->predicate)
-	pool_free (edge_predicate_pool, es->predicate);
+	edge_predicate_pool.remove (es->predicate);
       es->predicate = NULL;
       es->param.release ();
     }
@@ -1086,17 +1083,17 @@ reset_inline_summary (struct cgraph_node *node,
   info->scc_no = 0;
   if (info->loop_iterations)
     {
-      pool_free (edge_predicate_pool, info->loop_iterations);
+      edge_predicate_pool.remove (info->loop_iterations);
       info->loop_iterations = NULL;
     }
   if (info->loop_stride)
     {
-      pool_free (edge_predicate_pool, info->loop_stride);
+      edge_predicate_pool.remove (info->loop_stride);
       info->loop_stride = NULL;
     }
   if (info->array_index)
     {
-      pool_free (edge_predicate_pool, info->array_index);
+      edge_predicate_pool.remove (info->array_index);
       info->array_index = NULL;
     }
   vec_free (info->conds);
@@ -1812,7 +1809,7 @@ set_cond_stmt_execution_predicate (struct ipa_node_params *info,
 	      struct predicate p = add_condition (summary, index, &aggpos,
 						  this_code,
 						  gimple_cond_rhs (last));
-	      e->aux = pool_alloc (edge_predicate_pool);
+	      e->aux = edge_predicate_pool.allocate ();
 	      *(struct predicate *) e->aux = p;
 	    }
 	}
@@ -1845,7 +1842,7 @@ set_cond_stmt_execution_predicate (struct ipa_node_params *info,
     {
       struct predicate p = add_condition (summary, index, &aggpos,
 					  IS_NOT_CONSTANT, NULL_TREE);
-      e->aux = pool_alloc (edge_predicate_pool);
+      e->aux = edge_predicate_pool.allocate ();
       *(struct predicate *) e->aux = p;
     }
 }
@@ -1878,7 +1875,7 @@ set_switch_stmt_execution_predicate (struct ipa_node_params *info,
 
   FOR_EACH_EDGE (e, ei, bb->succs)
     {
-      e->aux = pool_alloc (edge_predicate_pool);
+      e->aux = edge_predicate_pool.allocate ();
       *(struct predicate *) e->aux = false_predicate ();
     }
   n = gimple_switch_num_labels (last);
@@ -1932,7 +1929,7 @@ compute_bb_predicates (struct cgraph_node *node,
 
   /* Entry block is always executable.  */
   ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
-    = pool_alloc (edge_predicate_pool);
+    = edge_predicate_pool.allocate ();
   *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
     = true_predicate ();
 
@@ -1968,7 +1965,7 @@ compute_bb_predicates (struct cgraph_node *node,
 	      if (!bb->aux)
 		{
 		  done = false;
-		  bb->aux = pool_alloc (edge_predicate_pool);
+		  bb->aux = edge_predicate_pool.allocate ();
 		  *((struct predicate *) bb->aux) = p;
 		}
 	      else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
@@ -2864,12 +2861,12 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early)
       edge_iterator ei;
 
       if (bb->aux)
-	pool_free (edge_predicate_pool, bb->aux);
+	edge_predicate_pool.remove ((predicate *)bb->aux);
       bb->aux = NULL;
       FOR_EACH_EDGE (e, ei, bb->succs)
 	{
 	  if (e->aux)
-	    pool_free (edge_predicate_pool, e->aux);
+	    edge_predicate_pool.remove ((predicate *) e->aux);
 	  e->aux = NULL;
 	}
     }
@@ -4460,7 +4457,5 @@ inline_free_summary (void)
   inline_summaries->release ();
   inline_summaries = NULL;
   inline_edge_summary_vec.release ();
-  if (edge_predicate_pool)
-    free_alloc_pool (edge_predicate_pool);
-  edge_predicate_pool = 0;
+  edge_predicate_pool.release ();
 }
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 16/35] Change use to type-based pool allocator in tree-sra.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (25 preceding siblings ...)
  2015-05-27 14:21 ` [PATCH 24/35] Change use to type-based pool allocator in tree-ssa-reassoc.c mliska
@ 2015-05-27 14:21 ` mliska
  2015-05-27 18:11   ` Jeff Law
  2015-05-27 14:21 ` [PATCH 15/35] Change use to type-based pool allocator in dse.c mliska
                   ` (7 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:21 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-sra.c (sra_initialize): Use new type-based pool allocator.
	(sra_deinitialize) Likewise.
	(create_access_1) Likewise.
	(build_accesses_from_assign) Likewise.
	(create_artificial_child_access) Likewise.
---
 gcc/tree-sra.c | 48 +++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 37 insertions(+), 11 deletions(-)

diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 4b0d2a8..b5047e7 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -300,13 +300,28 @@ struct access
   /* Set when we discover that this pointer is not safe to dereference in the
      caller.  */
   unsigned grp_not_necessarilly_dereferenced : 1;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((access *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<access> pool;
 };
 
 typedef struct access *access_p;
 
 
 /* Alloc pool for allocating access structures.  */
-static alloc_pool access_pool;
+pool_allocator<struct access> access::pool ("SRA accesses", 16);
 
 /* A structure linking lhs and rhs accesses from an aggregate assignment.  They
    are used to propagate subaccesses from rhs to lhs as long as they don't
@@ -315,10 +330,25 @@ struct assign_link
 {
   struct access *lacc, *racc;
   struct assign_link *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((assign_link *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<assign_link> pool;
 };
 
 /* Alloc pool for allocating assign link structures.  */
-static alloc_pool link_pool;
+pool_allocator<assign_link> assign_link::pool ("SRA links", 16);
 
 /* Base (tree) -> Vector (vec<access_p> *) map.  */
 static hash_map<tree, auto_vec<access_p> > *base_access_vec;
@@ -690,8 +720,6 @@ sra_initialize (void)
   should_scalarize_away_bitmap = BITMAP_ALLOC (NULL);
   cannot_scalarize_away_bitmap = BITMAP_ALLOC (NULL);
   gcc_obstack_init (&name_obstack);
-  access_pool = create_alloc_pool ("SRA accesses", sizeof (struct access), 16);
-  link_pool = create_alloc_pool ("SRA links", sizeof (struct assign_link), 16);
   base_access_vec = new hash_map<tree, auto_vec<access_p> >;
   memset (&sra_stats, 0, sizeof (sra_stats));
   encountered_apply_args = false;
@@ -709,8 +737,8 @@ sra_deinitialize (void)
   candidates = NULL;
   BITMAP_FREE (should_scalarize_away_bitmap);
   BITMAP_FREE (cannot_scalarize_away_bitmap);
-  free_alloc_pool (access_pool);
-  free_alloc_pool (link_pool);
+  access::pool.release ();
+  assign_link::pool.release ();
   obstack_free (&name_obstack, NULL);
 
   delete base_access_vec;
@@ -862,9 +890,8 @@ mark_parm_dereference (tree base, HOST_WIDE_INT dist, gimple stmt)
 static struct access *
 create_access_1 (tree base, HOST_WIDE_INT offset, HOST_WIDE_INT size)
 {
-  struct access *access;
+  struct access *access = new struct access();
 
-  access = (struct access *) pool_alloc (access_pool);
   memset (access, 0, sizeof (struct access));
   access->base = base;
   access->offset = offset;
@@ -1239,7 +1266,7 @@ build_accesses_from_assign (gimple stmt)
     {
       struct assign_link *link;
 
-      link = (struct assign_link *) pool_alloc (link_pool);
+      link = new assign_link;
       memset (link, 0, sizeof (struct assign_link));
 
       link->lacc = lacc;
@@ -2393,13 +2420,12 @@ static struct access *
 create_artificial_child_access (struct access *parent, struct access *model,
 				HOST_WIDE_INT new_offset)
 {
-  struct access *access;
   struct access **child;
   tree expr = parent->base;
 
   gcc_assert (!model->grp_unscalarizable_region);
 
-  access = (struct access *) pool_alloc (access_pool);
+  struct access *access = new struct access ();
   memset (access, 0, sizeof (struct access));
   if (!build_user_friendly_ref_for_offset (&expr, TREE_TYPE (expr), new_offset,
 					   model->type))
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 15/35] Change use to type-based pool allocator in dse.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (26 preceding siblings ...)
  2015-05-27 14:21 ` [PATCH 16/35] Change use to type-based pool allocator in tree-sra.c mliska
@ 2015-05-27 14:21 ` mliska
  2015-05-29 13:38   ` Martin Liška
  2015-05-27 14:21 ` [PATCH 26/35] Change use to type-based pool allocator in tree-ssa-strlen.c mliska
                   ` (6 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:21 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* dse.c (get_group_info):Use new type-based pool allocator.
	(dse_step0) Likewise.
	(free_store_info) Likewise.
	(delete_dead_store_insn) Likewise.
	(free_read_records) Likewise.
	(record_store) Likewise.
	(replace_read) Likewise.
	(check_mem_read_rtx) Likewise.
	(scan_insn) Likewise.
	(dse_step1) Likewise.
	(dse_step7) Likewise.
---
 gcc/dse.c | 201 ++++++++++++++++++++++++++++++++++++++++----------------------
 1 file changed, 129 insertions(+), 72 deletions(-)

diff --git a/gcc/dse.c b/gcc/dse.c
index b3b38d5..5ade9dd 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -249,7 +249,7 @@ static struct obstack dse_obstack;
 /* Scratch bitmap for cselib's cselib_expand_value_rtx.  */
 static bitmap scratch = NULL;
 
-struct insn_info;
+struct insn_info_type;
 
 /* This structure holds information about a candidate store.  */
 struct store_info
@@ -316,7 +316,7 @@ struct store_info
   /* Set if this store stores the same constant value as REDUNDANT_REASON
      insn stored.  These aren't eliminated early, because doing that
      might prevent the earlier larger store to be eliminated.  */
-  struct insn_info *redundant_reason;
+  struct insn_info_type *redundant_reason;
 };
 
 /* Return a bitmask with the first N low bits set.  */
@@ -329,12 +329,15 @@ lowpart_bitmask (int n)
 }
 
 typedef struct store_info *store_info_t;
-static alloc_pool cse_store_info_pool;
-static alloc_pool rtx_store_info_pool;
+static pool_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
+						       100);
+
+static pool_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
+						       100);
 
 /* This structure holds information about a load.  These are only
    built for rtx bases.  */
-struct read_info
+struct read_info_type
 {
   /* The id of the mem group of the base address.  */
   int group_id;
@@ -351,15 +354,30 @@ struct read_info
   rtx mem;
 
   /* The next read_info for this insn.  */
-  struct read_info *next;
+  struct read_info_type *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((read_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<read_info_type> pool;
 };
-typedef struct read_info *read_info_t;
-static alloc_pool read_info_pool;
+typedef struct read_info_type *read_info_t;
 
+pool_allocator<read_info_type> read_info_type::pool ("read_info_pool", 100);
 
 /* One of these records is created for each insn.  */
 
-struct insn_info
+struct insn_info_type
 {
   /* Set true if the insn contains a store but the insn itself cannot
      be deleted.  This is set if the insn is a parallel and there is
@@ -433,27 +451,41 @@ struct insn_info
   regset fixed_regs_live;
 
   /* The prev insn in the basic block.  */
-  struct insn_info * prev_insn;
+  struct insn_info_type * prev_insn;
 
   /* The linked list of insns that are in consideration for removal in
      the forwards pass through the basic block.  This pointer may be
      trash as it is not cleared when a wild read occurs.  The only
      time it is guaranteed to be correct is when the traversal starts
      at active_local_stores.  */
-  struct insn_info * next_local_store;
+  struct insn_info_type * next_local_store;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((insn_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<insn_info_type> pool;
 };
+typedef struct insn_info_type *insn_info_t;
 
-typedef struct insn_info *insn_info_t;
-static alloc_pool insn_info_pool;
+pool_allocator<insn_info_type> insn_info_type::pool ("insn_info_pool", 100);
 
 /* The linked list of stores that are under consideration in this
    basic block.  */
 static insn_info_t active_local_stores;
 static int active_local_stores_len;
 
-struct dse_bb_info
+struct dse_bb_info_type
 {
-
   /* Pointer to the insn info for the last insn in the block.  These
      are linked so this is how all of the insns are reached.  During
      scanning this is the current insn being scanned.  */
@@ -507,10 +539,25 @@ struct dse_bb_info
      to assure that shift and/or add sequences that are inserted do not
      accidentally clobber live hard regs.  */
   bitmap regs_live;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((dse_bb_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<dse_bb_info_type> pool;
 };
 
-typedef struct dse_bb_info *bb_info_t;
-static alloc_pool bb_info_pool;
+typedef struct dse_bb_info_type *bb_info_t;
+pool_allocator<dse_bb_info_type> dse_bb_info_type::pool ("bb_info_pool", 100);
 
 /* Table to hold all bb_infos.  */
 static bb_info_t *bb_table;
@@ -578,10 +625,26 @@ struct group_info
      care about.  */
   int *offset_map_n, *offset_map_p;
   int offset_map_size_n, offset_map_size_p;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((group_info *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<group_info> pool;
 };
 typedef struct group_info *group_info_t;
 typedef const struct group_info *const_group_info_t;
-static alloc_pool rtx_group_info_pool;
+
+pool_allocator<group_info> group_info::pool ("rtx_group_info_pool", 100);
 
 /* Index into the rtx_group_vec.  */
 static int rtx_group_next_id;
@@ -602,10 +665,27 @@ struct deferred_change
   rtx reg;
 
   struct deferred_change *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((deferred_change *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<deferred_change> pool;
 };
 
 typedef struct deferred_change *deferred_change_t;
-static alloc_pool deferred_change_pool;
+
+pool_allocator<deferred_change> deferred_change::pool
+  ("deferred_change_pool", 10);
 
 static deferred_change_t deferred_change_list = NULL;
 
@@ -712,8 +792,7 @@ get_group_info (rtx base)
     {
       if (!clear_alias_group)
 	{
-	  clear_alias_group = gi =
-	    (group_info_t) pool_alloc (rtx_group_info_pool);
+	  clear_alias_group = gi = new group_info;
 	  memset (gi, 0, sizeof (struct group_info));
 	  gi->id = rtx_group_next_id++;
 	  gi->store1_n = BITMAP_ALLOC (&dse_bitmap_obstack);
@@ -735,7 +814,7 @@ get_group_info (rtx base)
 
   if (gi == NULL)
     {
-      *slot = gi = (group_info_t) pool_alloc (rtx_group_info_pool);
+      *slot = gi = new group_info;
       gi->rtx_base = base;
       gi->id = rtx_group_next_id++;
       gi->base_mem = gen_rtx_MEM (BLKmode, base);
@@ -776,24 +855,6 @@ dse_step0 (void)
   scratch = BITMAP_ALLOC (&reg_obstack);
   kill_on_calls = BITMAP_ALLOC (&dse_bitmap_obstack);
 
-  rtx_store_info_pool
-    = create_alloc_pool ("rtx_store_info_pool",
-			 sizeof (struct store_info), 100);
-  read_info_pool
-    = create_alloc_pool ("read_info_pool",
-			 sizeof (struct read_info), 100);
-  insn_info_pool
-    = create_alloc_pool ("insn_info_pool",
-			 sizeof (struct insn_info), 100);
-  bb_info_pool
-    = create_alloc_pool ("bb_info_pool",
-			 sizeof (struct dse_bb_info), 100);
-  rtx_group_info_pool
-    = create_alloc_pool ("rtx_group_info_pool",
-			 sizeof (struct group_info), 100);
-  deferred_change_pool
-    = create_alloc_pool ("deferred_change_pool",
-			 sizeof (struct deferred_change), 10);
 
   rtx_group_table = new hash_table<invariant_group_base_hasher> (11);
 
@@ -829,9 +890,9 @@ free_store_info (insn_info_t insn_info)
       if (store_info->is_large)
 	BITMAP_FREE (store_info->positions_needed.large.bmap);
       if (store_info->cse_base)
-	pool_free (cse_store_info_pool, store_info);
+	cse_store_info_pool.remove (store_info);
       else
-	pool_free (rtx_store_info_pool, store_info);
+	rtx_store_info_pool.remove (store_info);
       store_info = next;
     }
 
@@ -948,7 +1009,7 @@ check_for_inc_dec_1 (insn_info_t insn_info)
 bool
 check_for_inc_dec (rtx_insn *insn)
 {
-  struct insn_info insn_info;
+  insn_info_type insn_info;
   rtx note;
 
   insn_info.insn = insn;
@@ -989,7 +1050,7 @@ delete_dead_store_insn (insn_info_t insn_info)
   while (read_info)
     {
       read_info_t next = read_info->next;
-      pool_free (read_info_pool, read_info);
+      delete read_info;
       read_info = next;
     }
   insn_info->read_rec = NULL;
@@ -1113,7 +1174,7 @@ free_read_records (bb_info_t bb_info)
       read_info_t next = (*ptr)->next;
       if ((*ptr)->alias_set == 0)
         {
-          pool_free (read_info_pool, *ptr);
+	  delete *ptr;
           *ptr = next;
         }
       else
@@ -1167,7 +1228,7 @@ const_or_frame_p (rtx x)
 	return true;
       return false;
     }
-  
+
   return false;
 }
 
@@ -1488,7 +1549,7 @@ record_store (rtx body, bb_info_t bb_info)
       if (clear_alias_group->offset_map_size_p < spill_alias_set)
 	clear_alias_group->offset_map_size_p = spill_alias_set;
 
-      store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
+      store_info = rtx_store_info_pool.allocate ();
 
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file, " processing spill store %d(%s)\n",
@@ -1503,7 +1564,7 @@ record_store (rtx body, bb_info_t bb_info)
 	= rtx_group_vec[group_id];
       tree expr = MEM_EXPR (mem);
 
-      store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
+      store_info = rtx_store_info_pool.allocate ();
       set_usage_bits (group, offset, width, expr);
 
       if (dump_file && (dump_flags & TDF_DETAILS))
@@ -1516,7 +1577,7 @@ record_store (rtx body, bb_info_t bb_info)
 	insn_info->stack_pointer_based = true;
       insn_info->contains_cselib_groups = true;
 
-      store_info = (store_info_t) pool_alloc (cse_store_info_pool);
+      store_info = cse_store_info_pool.allocate ();
       group_id = -1;
 
       if (dump_file && (dump_flags & TDF_DETAILS))
@@ -2060,8 +2121,7 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
 
   if (validate_change (read_insn->insn, loc, read_reg, 0))
     {
-      deferred_change_t deferred_change =
-	(deferred_change_t) pool_alloc (deferred_change_pool);
+      deferred_change_t change = new deferred_change;
 
       /* Insert this right before the store insn where it will be safe
 	 from later insns that might change it before the read.  */
@@ -2091,15 +2151,15 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
 	 block we can put them back.  */
 
       *loc = read_info->mem;
-      deferred_change->next = deferred_change_list;
-      deferred_change_list = deferred_change;
-      deferred_change->loc = loc;
-      deferred_change->reg = read_reg;
+      change->next = deferred_change_list;
+      deferred_change_list = change;
+      change->loc = loc;
+      change->reg = read_reg;
 
       /* Get rid of the read_info, from the point of view of the
 	 rest of dse, play like this read never happened.  */
       read_insn->read_rec = read_info->next;
-      pool_free (read_info_pool, read_info);
+      delete read_info;
       if (dump_file && (dump_flags & TDF_DETAILS))
 	{
 	  fprintf (dump_file, " -- replaced the loaded MEM with ");
@@ -2165,7 +2225,7 @@ check_mem_read_rtx (rtx *loc, bb_info_t bb_info)
   else
     width = GET_MODE_SIZE (GET_MODE (mem));
 
-  read_info = (read_info_t) pool_alloc (read_info_pool);
+  read_info = new read_info_type;
   read_info->group_id = group_id;
   read_info->mem = mem;
   read_info->alias_set = spill_alias_set;
@@ -2481,9 +2541,9 @@ static void
 scan_insn (bb_info_t bb_info, rtx_insn *insn)
 {
   rtx body;
-  insn_info_t insn_info = (insn_info_t) pool_alloc (insn_info_pool);
+  insn_info_type *insn_info = new insn_info_type;
   int mems_found = 0;
-  memset (insn_info, 0, sizeof (struct insn_info));
+  memset (insn_info, 0, sizeof (struct insn_info_type));
 
   if (dump_file && (dump_flags & TDF_DETAILS))
     fprintf (dump_file, "\n**scanning insn=%d\n",
@@ -2740,9 +2800,9 @@ dse_step1 (void)
   FOR_ALL_BB_FN (bb, cfun)
     {
       insn_info_t ptr;
-      bb_info_t bb_info = (bb_info_t) pool_alloc (bb_info_pool);
+      bb_info_t bb_info = new dse_bb_info_type;
 
-      memset (bb_info, 0, sizeof (struct dse_bb_info));
+      memset (bb_info, 0, sizeof (dse_bb_info_type));
       bitmap_set_bit (all_blocks, bb->index);
       bb_info->regs_live = regs_live;
 
@@ -2756,9 +2816,6 @@ dse_step1 (void)
 	{
 	  rtx_insn *insn;
 
-	  cse_store_info_pool
-	    = create_alloc_pool ("cse_store_info_pool",
-				 sizeof (struct store_info), 100);
 	  active_local_stores = NULL;
 	  active_local_stores_len = 0;
 	  cselib_clear_table ();
@@ -2820,7 +2877,7 @@ dse_step1 (void)
 	      /* There is no reason to validate this change.  That was
 		 done earlier.  */
 	      *deferred_change_list->loc = deferred_change_list->reg;
-	      pool_free (deferred_change_pool, deferred_change_list);
+	      delete deferred_change_list;
 	      deferred_change_list = next;
 	    }
 
@@ -2866,7 +2923,7 @@ dse_step1 (void)
 	      ptr = ptr->prev_insn;
 	    }
 
-	  free_alloc_pool (cse_store_info_pool);
+	  cse_store_info_pool.release ();
 	}
       bb_info->regs_live = NULL;
     }
@@ -3704,12 +3761,12 @@ dse_step7 (void)
   BITMAP_FREE (all_blocks);
   BITMAP_FREE (scratch);
 
-  free_alloc_pool (rtx_store_info_pool);
-  free_alloc_pool (read_info_pool);
-  free_alloc_pool (insn_info_pool);
-  free_alloc_pool (bb_info_pool);
-  free_alloc_pool (rtx_group_info_pool);
-  free_alloc_pool (deferred_change_pool);
+  rtx_store_info_pool.release ();
+  read_info_type::pool.release ();
+  insn_info_type::pool.release ();
+  dse_bb_info_type::pool.release ();
+  group_info::pool.release ();
+  deferred_change::pool.release ();
 }
 
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 20/35] Change use to type-based pool allocator in ira-build.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (22 preceding siblings ...)
  2015-05-27 14:20 ` [PATCH 07/35] Change use to type-based pool allocator in var-tracking.c mliska
@ 2015-05-27 14:21 ` mliska
  2015-05-27 18:15   ` Jeff Law
  2015-05-27 14:21 ` [PATCH 30/35] Change use to type-based pool allocator in ipa-inline-analysis.c mliska
                   ` (10 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:21 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-build.c (initiate_cost_vectors): Use new type-based pool allocator.
	(ira_allocate_cost_vector): Likewise.
	(ira_free_cost_vector): Likewise.
	(finish_cost_vectors): Likewise.
---
 gcc/ira-build.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 8b6b956..2de7d34 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -1633,7 +1633,7 @@ finish_copies (void)
 \f
 
 /* Pools for cost vectors.  It is defined only for allocno classes.  */
-static alloc_pool cost_vector_pool[N_REG_CLASSES];
+static pool_allocator<int> * cost_vector_pool[N_REG_CLASSES];
 
 /* The function initiates work with hard register cost vectors.  It
    creates allocation pool for each allocno class.  */
@@ -1646,10 +1646,9 @@ initiate_cost_vectors (void)
   for (i = 0; i < ira_allocno_classes_num; i++)
     {
       aclass = ira_allocno_classes[i];
-      cost_vector_pool[aclass]
-	= create_alloc_pool ("cost vectors",
-			     sizeof (int) * ira_class_hard_regs_num[aclass],
-			     100);
+      cost_vector_pool[aclass] = new pool_allocator<int>
+	("cost vectors", 100,
+	 sizeof (int) * (ira_class_hard_regs_num[aclass] - 1));
     }
 }
 
@@ -1657,7 +1656,7 @@ initiate_cost_vectors (void)
 int *
 ira_allocate_cost_vector (reg_class_t aclass)
 {
-  return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
+  return cost_vector_pool[(int) aclass]->allocate ();
 }
 
 /* Free a cost vector VEC for ACLASS.  */
@@ -1665,7 +1664,7 @@ void
 ira_free_cost_vector (int *vec, reg_class_t aclass)
 {
   ira_assert (vec != NULL);
-  pool_free (cost_vector_pool[(int) aclass], vec);
+  cost_vector_pool[(int) aclass]->remove (vec);
 }
 
 /* Finish work with hard register cost vectors.  Release allocation
@@ -1679,7 +1678,7 @@ finish_cost_vectors (void)
   for (i = 0; i < ira_allocno_classes_num; i++)
     {
       aclass = ira_allocno_classes[i];
-      free_alloc_pool (cost_vector_pool[aclass]);
+      delete cost_vector_pool[aclass];
     }
 }
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 22/35] Change use to type-based pool allocator in sched-deps.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (29 preceding siblings ...)
  2015-05-27 14:21 ` [PATCH 18/35] Change use to type-based pool allocator in stmt.c mliska
@ 2015-05-27 14:42 ` mliska
  2015-05-27 18:16   ` Jeff Law
  2015-05-27 14:55 ` [PATCH 17/35] Change use to type-based pool allocator in tree-ssa-math-opts.c mliska
                   ` (3 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:42 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* sched-deps.c (create_dep_node): Use new type-based pool allocator.
	(delete_dep_node): Likewise.
	(create_deps_list): Likewise.
	(free_deps_list): Likewise.
	(sched_deps_init): Likewise.
	(sched_deps_finish): Likewise.
---
 gcc/sched-deps.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index c1cfc1f..30d4630 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -334,7 +334,7 @@ dep_link_is_detached_p (dep_link_t link)
 }
 
 /* Pool to hold all dependency nodes (dep_node_t).  */
-static alloc_pool dn_pool;
+static pool_allocator<_dep_node> *dn_pool;
 
 /* Number of dep_nodes out there.  */
 static int dn_pool_diff = 0;
@@ -343,7 +343,7 @@ static int dn_pool_diff = 0;
 static dep_node_t
 create_dep_node (void)
 {
-  dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
+  dep_node_t n = dn_pool->allocate ();
   dep_link_t back = DEP_NODE_BACK (n);
   dep_link_t forw = DEP_NODE_FORW (n);
 
@@ -371,11 +371,11 @@ delete_dep_node (dep_node_t n)
 
   --dn_pool_diff;
 
-  pool_free (dn_pool, n);
+  dn_pool->remove (n);
 }
 
 /* Pool to hold dependencies lists (deps_list_t).  */
-static alloc_pool dl_pool;
+static pool_allocator<_deps_list> *dl_pool;
 
 /* Number of deps_lists out there.  */
 static int dl_pool_diff = 0;
@@ -393,7 +393,7 @@ deps_list_empty_p (deps_list_t l)
 static deps_list_t
 create_deps_list (void)
 {
-  deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
+  deps_list_t l = dl_pool->allocate ();
 
   DEPS_LIST_FIRST (l) = NULL;
   DEPS_LIST_N_LINKS (l) = 0;
@@ -410,7 +410,7 @@ free_deps_list (deps_list_t l)
 
   --dl_pool_diff;
 
-  pool_free (dl_pool, l);
+  dl_pool->remove (l);
 }
 
 /* Return true if there is no dep_nodes and deps_lists out there.
@@ -4075,10 +4075,10 @@ sched_deps_init (bool global_p)
 
   if (global_p)
     {
-      dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
+      dl_pool = new pool_allocator<_deps_list> ("deps_list",
                                    /* Allocate lists for one block at a time.  */
                                    insns_in_block);
-      dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
+      dn_pool = new pool_allocator<_dep_node> ("dep_node",
                                    /* Allocate nodes for one block at a time.
                                       We assume that average insn has
                                       5 producers.  */
@@ -4128,9 +4128,10 @@ void
 sched_deps_finish (void)
 {
   gcc_assert (deps_pools_are_empty_p ());
-  free_alloc_pool_if_empty (&dn_pool);
-  free_alloc_pool_if_empty (&dl_pool);
-  gcc_assert (dn_pool == NULL && dl_pool == NULL);
+  dn_pool->release_if_empty ();
+  dn_pool = NULL;
+  dl_pool->release_if_empty ();
+  dl_pool = NULL;
 
   h_d_i_d.release ();
   cache_size = 0;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 17/35] Change use to type-based pool allocator in tree-ssa-math-opts.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (30 preceding siblings ...)
  2015-05-27 14:42 ` [PATCH 22/35] Change use to type-based pool allocator in sched-deps.c mliska
@ 2015-05-27 14:55 ` mliska
  2015-05-27 18:12   ` Jeff Law
  2015-05-27 14:58 ` [PATCH 33/35] Change use to type-based pool allocator in ira-color.c mliska
                   ` (2 subsequent siblings)
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 14:55 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-math-opts.c (occ_new): Use new type-based pool allocator.
	(free_bb): Likewise.
	(pass_cse_reciprocals::execute): Likewise.
---
 gcc/tree-ssa-math-opts.c | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index 98e2c49..0df755b 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -229,7 +229,7 @@ static struct
 static struct occurrence *occ_head;
 
 /* Allocation pool for getting instances of "struct occurrence".  */
-static alloc_pool occ_pool;
+static pool_allocator<occurrence> *occ_pool;
 
 
 
@@ -240,7 +240,7 @@ occ_new (basic_block bb, struct occurrence *children)
 {
   struct occurrence *occ;
 
-  bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
+  bb->aux = occ = occ_pool->allocate ();
   memset (occ, 0, sizeof (struct occurrence));
 
   occ->bb = bb;
@@ -468,7 +468,7 @@ free_bb (struct occurrence *occ)
   next = occ->next;
   child = occ->children;
   occ->bb->aux = NULL;
-  pool_free (occ_pool, occ);
+  occ_pool->remove (occ);
 
   /* Now ensure that we don't recurse unless it is necessary.  */
   if (!child)
@@ -572,9 +572,8 @@ pass_cse_reciprocals::execute (function *fun)
   basic_block bb;
   tree arg;
 
-  occ_pool = create_alloc_pool ("dominators for recip",
-				sizeof (struct occurrence),
-				n_basic_blocks_for_fn (fun) / 3 + 1);
+  occ_pool = new pool_allocator<occurrence>
+    ("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1);
 
   memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
   calculate_dominance_info (CDI_DOMINATORS);
@@ -704,7 +703,7 @@ pass_cse_reciprocals::execute (function *fun)
 
   free_dominance_info (CDI_DOMINATORS);
   free_dominance_info (CDI_POST_DOMINATORS);
-  free_alloc_pool (occ_pool);
+  delete occ_pool;
   return 0;
 }
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (31 preceding siblings ...)
  2015-05-27 14:55 ` [PATCH 17/35] Change use to type-based pool allocator in tree-ssa-math-opts.c mliska
@ 2015-05-27 14:58 ` mliska
  2015-05-27 18:24   ` Jeff Law
  2015-05-28 11:23   ` Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.) David Malcolm
  2015-05-27 15:04 ` [PATCH 13/35] Change use to type-based pool allocator in df-problems.c mliska
  2015-05-27 17:50 ` [PATCH 01/35] Introduce new type-based pool allocator Jeff Law
  34 siblings, 2 replies; 108+ messages in thread
From: mliska @ 2015-05-27 14:58 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-color.c (init_update_cost_records): Use new type-based pool allocator.
	(get_update_cost_record): Likewise.
	(free_update_cost_record_list): Likewise.
	(finish_update_cost_records): Likewise.
	(initiate_cost_update): Likewise.
---
 gcc/ira-color.c | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index 4750714..4aec98e 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -1166,16 +1166,8 @@ setup_profitable_hard_regs (void)
    allocnos.  */
 
 /* Pool for update cost records.  */
-static alloc_pool update_cost_record_pool;
-
-/* Initiate update cost records.  */
-static void
-init_update_cost_records (void)
-{
-  update_cost_record_pool
-    = create_alloc_pool ("update cost records",
-			 sizeof (struct update_cost_record), 100);
-}
+static pool_allocator<update_cost_record> update_cost_record_pool
+  ("update cost records", 100);
 
 /* Return new update cost record with given params.  */
 static struct update_cost_record *
@@ -1184,7 +1176,7 @@ get_update_cost_record (int hard_regno, int divisor,
 {
   struct update_cost_record *record;
 
-  record = (struct update_cost_record *) pool_alloc (update_cost_record_pool);
+  record = update_cost_record_pool.allocate ();
   record->hard_regno = hard_regno;
   record->divisor = divisor;
   record->next = next;
@@ -1200,7 +1192,7 @@ free_update_cost_record_list (struct update_cost_record *list)
   while (list != NULL)
     {
       next = list->next;
-      pool_free (update_cost_record_pool, list);
+      update_cost_record_pool.remove (list);
       list = next;
     }
 }
@@ -1209,7 +1201,7 @@ free_update_cost_record_list (struct update_cost_record *list)
 static void
 finish_update_cost_records (void)
 {
-  free_alloc_pool (update_cost_record_pool);
+  update_cost_record_pool.release ();
 }
 
 /* Array whose element value is TRUE if the corresponding hard
@@ -1264,7 +1256,6 @@ initiate_cost_update (void)
     = (struct update_cost_queue_elem *) ira_allocate (size);
   memset (update_cost_queue_elems, 0, size);
   update_cost_check = 0;
-  init_update_cost_records ();
 }
 
 /* Deallocate data used by function update_costs_from_copies.  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* [PATCH 13/35] Change use to type-based pool allocator in df-problems.c.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (32 preceding siblings ...)
  2015-05-27 14:58 ` [PATCH 33/35] Change use to type-based pool allocator in ira-color.c mliska
@ 2015-05-27 15:04 ` mliska
  2015-05-27 18:05   ` Jeff Law
  2015-05-27 17:50 ` [PATCH 01/35] Introduce new type-based pool allocator Jeff Law
  34 siblings, 1 reply; 108+ messages in thread
From: mliska @ 2015-05-27 15:04 UTC (permalink / raw)
  To: gcc-patches

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* df-problems.c (df_chain_create):Use new type-based pool allocator.
	(df_chain_unlink_1) Likewise.
	(df_chain_unlink) Likewise.
	(df_chain_remove_problem) Likewise.
	(df_chain_alloc) Likewise.
	(df_chain_free) Likewise.
	* df.h (struct dataflow) Likewise.
---
 gcc/df-problems.c | 14 +++++++-------
 gcc/df.h          |  2 +-
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index ff08abd..7700157 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -1879,7 +1879,7 @@ struct df_link *
 df_chain_create (df_ref src, df_ref dst)
 {
   struct df_link *head = DF_REF_CHAIN (src);
-  struct df_link *link = (struct df_link *) pool_alloc (df_chain->block_pool);
+  struct df_link *link = df_chain->block_pool->allocate ();
 
   DF_REF_CHAIN (src) = link;
   link->next = head;
@@ -1904,7 +1904,7 @@ df_chain_unlink_1 (df_ref ref, df_ref target)
 	    prev->next = chain->next;
 	  else
 	    DF_REF_CHAIN (ref) = chain->next;
-	  pool_free (df_chain->block_pool, chain);
+	  df_chain->block_pool->remove (chain);
 	  return;
 	}
       prev = chain;
@@ -1924,7 +1924,7 @@ df_chain_unlink (df_ref ref)
       struct df_link *next = chain->next;
       /* Delete the other side if it exists.  */
       df_chain_unlink_1 (chain->ref, ref);
-      pool_free (df_chain->block_pool, chain);
+      df_chain->block_pool->remove (chain);
       chain = next;
     }
   DF_REF_CHAIN (ref) = NULL;
@@ -1956,7 +1956,7 @@ df_chain_remove_problem (void)
 
   /* Wholesale destruction of the old chains.  */
   if (df_chain->block_pool)
-    free_alloc_pool (df_chain->block_pool);
+    delete df_chain->block_pool;
 
   EXECUTE_IF_SET_IN_BITMAP (df_chain->out_of_date_transfer_functions, 0, bb_index, bi)
     {
@@ -2010,8 +2010,8 @@ static void
 df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
 {
   df_chain_remove_problem ();
-  df_chain->block_pool = create_alloc_pool ("df_chain_block pool",
-					 sizeof (struct df_link), 50);
+  df_chain->block_pool = new pool_allocator<df_link> ("df_chain_block pool",
+						      50);
   df_chain->optional_p = true;
 }
 
@@ -2146,7 +2146,7 @@ df_chain_finalize (bitmap all_blocks)
 static void
 df_chain_free (void)
 {
-  free_alloc_pool (df_chain->block_pool);
+  delete df_chain->block_pool;
   BITMAP_FREE (df_chain->out_of_date_transfer_functions);
   free (df_chain);
 }
diff --git a/gcc/df.h b/gcc/df.h
index 7e233667..8a5b21f 100644
--- a/gcc/df.h
+++ b/gcc/df.h
@@ -305,7 +305,7 @@ struct dataflow
   unsigned int block_info_size;
 
   /* The pool to allocate the block_info from. */
-  alloc_pool block_pool;
+  pool_allocator<df_link> *block_pool;
 
   /* The lr and live problems have their transfer functions recomputed
      only if necessary.  This is possible for them because, the
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 01/35] Introduce new type-based pool allocator.
  2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
                   ` (33 preceding siblings ...)
  2015-05-27 15:04 ` [PATCH 13/35] Change use to type-based pool allocator in df-problems.c mliska
@ 2015-05-27 17:50 ` Jeff Law
  2015-05-28 13:27   ` Martin Liška
  34 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 17:50 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> Hello.
>
> Following patch set attempts to replace old-style pool allocator
> to a type-based one. Moreover, as we utilize  classes and structs that are used
> just by a pool allocator, these types have overwritten ctors and dtors.
> Thus, using the allocator is much easier and we shouldn't cast types
> back and forth. Another beneficat can be achieved in future, as we will
> be able to call a class constructors to correctly register a location,
> where a memory is allocated (-fgather-detailed-mem-stats).
>
> Patch can boostrap on x86_64-linux-gnu and ppc64-linux-gnu and
> survives regression tests on x86_64-linux-gnu.
>
> Ready for trunk?
> Thanks,
> Martin
>
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* alloc-pool.c (struct alloc_pool_descriptor): Move definition
> 	to header file.
> 	* alloc-pool.h (pool_allocator::pool_allocator): New function.
> 	(pool_allocator::release): Likewise.
> 	(inline pool_allocator::release_if_empty): Likewise.
> 	(inline pool_allocator::~pool_allocator): Likewise.
> 	(pool_allocator::allocate): Likewise.
> 	(pool_allocator::remove): Likewise.
So on a general note, I don't like changing the size of the structure 
based on ENABLE_CHECKING.  If we've got other cases where we do this, 
then I guess it's OK, but if not, I'd prefer not to start doing so.


> ---

> +
> +  /* Align X to 8.  */
> +  size_t align_eight (size_t x)
> +  {
> +    return (((x+7) >> 3) << 3);
> +  }
> +
> +  const char *m_name;
> +#ifdef ENABLE_CHECKING
> +  ALLOC_POOL_ID_TYPE m_id;
> +#endif
> +  size_t m_elts_per_block;
> +
> +  /* These are the elements that have been allocated at least once and freed.  */
> +  allocation_pool_list *m_returned_free_list;
> +
> +  /* These are the elements that have not yet been allocated out of
> +     the last block obtained from XNEWVEC.  */
> +  char* m_virgin_free_list;
> +
> +  /* The number of elements in the virgin_free_list that can be
> +     allocated before needing another block.  */
> +  size_t m_virgin_elts_remaining;
> +  size_t m_elts_allocated;
> +  size_t m_elts_free;
> +  size_t m_blocks_allocated;
> +  allocation_pool_list *m_block_list;
> +  size_t m_block_size;
> +  size_t m_elt_size;
Several fields aren't documented.  They're largely self-explanatory, so 
I won't insist you document those trailing fields.  Your call whether or 
not to add docs for them.


> +
> +  /* Now align the size to a multiple of 4.  */
> +  size = align_eight (size);
Why not just aligned to 4, rather than a multiple of 4?  Presumably the 
extra 4 bytes don't matter in practice?

> +
> +template <typename T>
> +void
> +inline pool_allocator<T>::release_if_empty ()
> +{
> +  if (m_elts_free == m_elts_allocated)
> +    release ();
> +}
Is the release_if_empty all that useful in practice?

So the big issue in my mind continues to be the additional element in 
the structure when ENABLE_CHECKING is on.  As mentioned earlier, if 
we're already doing this elsewhere, then I won't object.  If we aren't, 
then I don't want to start doing so now.

The rest of the stuff are just minor questions, but nothing which would 
in my mind stop this from going forward.

Presumably your testing was with the whole series and they can't go in 
piecemeal, right?


jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 02/35] Change use to type-based pool allocator in et-forest.c.
  2015-05-27 14:09 ` [PATCH 02/35] Change use to type-based pool allocator in et-forest.c mliska
@ 2015-05-27 17:50   ` Jeff Law
  2015-05-29 13:33     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 17:50 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* et-forest.c (et_new_occ): Use new type-based pool allocator.
> 	(et_new_tree): Likewise.
> 	(et_free_tree): Likewise.
> 	(et_free_tree_force): Likewise.
> 	(et_free_pools): Likewise.
> 	(et_split): Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c.
  2015-05-27 14:00 ` [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c mliska
@ 2015-05-27 17:53   ` Jeff Law
  2015-05-29 13:34     ` Martin Liška
  2015-05-28  0:48   ` Trevor Saunders
  1 sibling, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 17:53 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* lra-lives.c (free_live_range): Use new type-based pool allocator.
> 	(free_live_range_list) Likewise.
> 	(create_live_range) Likewise.
> 	(copy_live_range) Likewise.
> 	(lra_merge_live_ranges) Likewise.
> 	(remove_some_program_points_and_update_live_ranges) Likewise.
> 	(lra_live_ranges_init) Likewise.
> 	(lra_live_ranges_finish) Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 04/35] Change use to type-based pool allocator in lra.c.
  2015-05-27 14:00 ` [PATCH 04/35] Change use to type-based pool allocator in lra.c mliska
@ 2015-05-27 17:55   ` Jeff Law
  2015-05-29 13:34     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 17:55 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* lra.c (init_insn_regs): Use new type-based pool allocator.
> 	(new_insn_reg) Likewise.
> 	(free_insn_reg) Likewise.
> 	(free_insn_regs) Likewise.
> 	(finish_insn_regs) Likewise.
> 	(init_insn_recog_data) Likewise.
> 	(init_reg_info) Likewise.
> 	(finish_reg_info) Likewise.
> 	(lra_free_copies) Likewise.
> 	(lra_create_copy) Likewise.
> 	(invalidate_insn_data_regno_info) Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 05/35] Change use to type-based pool allocator in ira-color.c.
  2015-05-27 14:15 ` [PATCH 05/35] Change use to type-based pool allocator in ira-color.c mliska
@ 2015-05-27 17:59   ` Jeff Law
  2015-05-29 13:34     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 17:59 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* ira-color.c (init_update_cost_records):Use new type-based pool allocator.
> 	(get_update_cost_record) Likewise.
> 	(free_update_cost_record_list) Likewise.
> 	(finish_update_cost_records) Likewise.
> 	(initiate_cost_update) Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 09/35] Change use to type-based pool allocator in c-format.c.
  2015-05-27 14:00 ` [PATCH 09/35] Change use to type-based pool allocator in c-format.c mliska
  2015-05-27 14:16   ` Jakub Jelinek
@ 2015-05-27 18:01   ` Jeff Law
  2015-05-29 13:35     ` Martin Liška
  1 sibling, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:01 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/c-family/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* c-format.c (check_format_arg):Use new type-based pool allocator.
> 	(check_format_info_main) Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 10/35] Change use to type-based pool allocator in cfg.c.
  2015-05-27 14:00 ` [PATCH 10/35] Change use to type-based pool allocator in cfg.c mliska
@ 2015-05-27 18:01   ` Jeff Law
  2015-05-29 13:34     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:01 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* cfg.c (initialize_original_copy_tables):Use new type-based pool allocator.
> 	(free_original_copy_tables) Likewise.
> 	(copy_original_table_clear) Likewise.
> 	(copy_original_table_set) Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 08/35] Change use to type-based pool allocator in asan.c.
  2015-05-27 14:19 ` [PATCH 08/35] Change use to type-based pool allocator in asan.c mliska
@ 2015-05-27 18:01   ` Jeff Law
  0 siblings, 0 replies; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:01 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* asan.c (asan_mem_ref_get_alloc_pool):Use new type-based pool allocator.
> 	(asan_mem_ref_new) Likewise.
> 	(free_mem_ref_resources) Likewise.
Presumably the inconsequential whitespace changes are removing trailing 
whitespace or something similar.

OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 11/35] Change use to type-based pool allocator in sh.c.
  2015-05-27 14:19 ` [PATCH 11/35] Change use to type-based pool allocator in sh.c mliska
@ 2015-05-27 18:03   ` Jeff Law
  2015-05-29 13:37     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:03 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* config/sh/sh.c (add_constant):Use new type-based pool allocator.
> 	(sh_reorg) Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 13/35] Change use to type-based pool allocator in df-problems.c.
  2015-05-27 15:04 ` [PATCH 13/35] Change use to type-based pool allocator in df-problems.c mliska
@ 2015-05-27 18:05   ` Jeff Law
  2015-05-29 13:37     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:05 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* df-problems.c (df_chain_create):Use new type-based pool allocator.
> 	(df_chain_unlink_1) Likewise.
> 	(df_chain_unlink) Likewise.
> 	(df_chain_remove_problem) Likewise.
> 	(df_chain_alloc) Likewise.
> 	(df_chain_free) Likewise.
> 	* df.h (struct dataflow) Likewise.
OK.

As Jakub noted, please double-check your ChangeLogs for proper 
formatting before committing.  There's consistently nits to fix in them.

Jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 16/35] Change use to type-based pool allocator in tree-sra.c.
  2015-05-27 14:21 ` [PATCH 16/35] Change use to type-based pool allocator in tree-sra.c mliska
@ 2015-05-27 18:11   ` Jeff Law
  2015-05-29 13:39     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:11 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* tree-sra.c (sra_initialize): Use new type-based pool allocator.
> 	(sra_deinitialize) Likewise.
> 	(create_access_1) Likewise.
> 	(build_accesses_from_assign) Likewise.
> 	(create_artificial_child_access) Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 17/35] Change use to type-based pool allocator in tree-ssa-math-opts.c.
  2015-05-27 14:55 ` [PATCH 17/35] Change use to type-based pool allocator in tree-ssa-math-opts.c mliska
@ 2015-05-27 18:12   ` Jeff Law
  2015-05-29 13:39     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:12 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* tree-ssa-math-opts.c (occ_new): Use new type-based pool allocator.
> 	(free_bb): Likewise.
> 	(pass_cse_reciprocals::execute): Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 19/35] Change use to type-based pool allocator in sel-sched-ir.c.
  2015-05-27 14:07 ` [PATCH 19/35] Change use to type-based pool allocator in sel-sched-ir.c mliska
@ 2015-05-27 18:12   ` Jeff Law
  2015-05-29 13:40     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:12 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* sel-sched-ir.c (alloc_sched_pools): Use new type-based pool allocator.
> 	(free_sched_pools): Likewise.
> 	* sel-sched-ir.h (_list_alloc): Likewise.
> 	(_list_remove): Likewise.
OK
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 18/35] Change use to type-based pool allocator in stmt.c.
  2015-05-27 14:21 ` [PATCH 18/35] Change use to type-based pool allocator in stmt.c mliska
@ 2015-05-27 18:13   ` Jeff Law
  2015-05-29 13:39     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:13 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* stmt.c (add_case_node): Use new type-based pool allocator.
> 	(expand_case): Likewise.
> 	(expand_sjlj_dispatch_table): Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 21/35] Change use to type-based pool allocator in regcprop.c.
  2015-05-27 14:17 ` [PATCH 21/35] Change use to type-based pool allocator in regcprop.c mliska
@ 2015-05-27 18:14   ` Jeff Law
  2015-05-29 13:40     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:14 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* regcprop.c (free_debug_insn_changes): Use new type-based pool allocator.
> 	(replace_oldest_value_reg): Likewise.
> 	(pass_cprop_hardreg::execute): Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 24/35] Change use to type-based pool allocator in tree-ssa-reassoc.c.
  2015-05-27 14:21 ` [PATCH 24/35] Change use to type-based pool allocator in tree-ssa-reassoc.c mliska
@ 2015-05-27 18:15   ` Jeff Law
  2015-05-29 13:41     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:15 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* tree-ssa-reassoc.c (add_to_ops_vec): Use new type-based pool allocator.
> 	(add_repeat_to_ops_vec): Likewise.
> 	(get_ops): Likewise.
> 	(maybe_optimize_range_tests): Likewise.
> 	(init_reassoc): Likewise.
> 	(fini_reassoc): Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 20/35] Change use to type-based pool allocator in ira-build.c.
  2015-05-27 14:21 ` [PATCH 20/35] Change use to type-based pool allocator in ira-build.c mliska
@ 2015-05-27 18:15   ` Jeff Law
  2015-05-29 13:39     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:15 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* ira-build.c (initiate_cost_vectors): Use new type-based pool allocator.
> 	(ira_allocate_cost_vector): Likewise.
> 	(ira_free_cost_vector): Likewise.
> 	(finish_cost_vectors): Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 25/35] Change use to type-based pool allocator in tree-ssa-sccvn.c.
  2015-05-27 14:19 ` [PATCH 25/35] Change use to type-based pool allocator in tree-ssa-sccvn.c mliska
@ 2015-05-27 18:16   ` Jeff Law
  2015-05-29 13:41     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:16 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* tree-ssa-sccvn.c (vn_reference_insert): Use new type-based pool allocator.
> 	(vn_reference_insert_pieces): Likewise.
> 	(vn_phi_insert): Likewise.
> 	(visit_reference_op_call): Likewise.
> 	(copy_phi): Likewise.
> 	(copy_reference): Likewise.
> 	(process_scc): Likewise.
> 	(allocate_vn_table): Likewise.
> 	(free_vn_table): Likewise.
OK.
jeff


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 22/35] Change use to type-based pool allocator in sched-deps.c.
  2015-05-27 14:42 ` [PATCH 22/35] Change use to type-based pool allocator in sched-deps.c mliska
@ 2015-05-27 18:16   ` Jeff Law
  2015-05-29 13:40     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:16 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* sched-deps.c (create_dep_node): Use new type-based pool allocator.
> 	(delete_dep_node): Likewise.
> 	(create_deps_list): Likewise.
> 	(free_deps_list): Likewise.
> 	(sched_deps_init): Likewise.
> 	(sched_deps_finish): Likewise.
OK.

First use of the release_if_empty API that I've seen in these patches.

jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 26/35] Change use to type-based pool allocator in tree-ssa-strlen.c.
  2015-05-27 14:21 ` [PATCH 26/35] Change use to type-based pool allocator in tree-ssa-strlen.c mliska
@ 2015-05-27 18:17   ` Jeff Law
  2015-05-29 13:42   ` Martin Liška
  1 sibling, 0 replies; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:17 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* tree-ssa-strlen.c (new_strinfo): Use new type-based pool allocator.
> 	(free_strinfo): Likewise.
> 	(pass_strlen::execute): Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 28/35] Change use to type-based pool allocator in ipa-profile.c.
  2015-05-27 14:17 ` [PATCH 28/35] Change use to type-based pool allocator in ipa-profile.c mliska
@ 2015-05-27 18:18   ` Jeff Law
  2015-05-29 13:42     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:18 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* ipa-profile.c (account_time_size): Use new type-based pool allocator.
> 	(ipa_profile_generate_summary): Likewise.
> 	(ipa_profile_read_summary): Likewise.
> 	(ipa_profile): Likewise.
OK.
jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 27/35] Change use to type-based pool allocator in tree-ssa-structalias.c.
  2015-05-27 14:18 ` [PATCH 27/35] Change use to type-based pool allocator in tree-ssa-structalias.c mliska
@ 2015-05-27 18:20   ` Jeff Law
  2015-05-29 13:42     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:20 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* tree-ssa-structalias.c (new_var_info): Use new type-based pool allocator.
> 	(new_constraint): Likewise.
> 	(init_alias_vars): Likewise.
> 	(delete_points_to_sets): Likewise.
> ---
OK.
Jeff


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 29/35] Change use to type-based pool allocator in ipa-prop.c.
  2015-05-27 14:20 ` [PATCH 29/35] Change use to type-based pool allocator in ipa-prop.c mliska
@ 2015-05-27 18:22   ` Jeff Law
  2015-05-29 13:42     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:22 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* ipa-prop.c (ipa_set_jf_constant): Use new type-based pool allocator.
> 	(ipa_edge_duplication_hook): Likewise.
> 	(ipa_free_all_structures_after_ipa_cp): Likewise.
> 	(ipa_free_all_structures_after_iinln): Likewise.
OK.
Jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.
  2015-05-27 14:58 ` [PATCH 33/35] Change use to type-based pool allocator in ira-color.c mliska
@ 2015-05-27 18:24   ` Jeff Law
  2015-05-28 11:23   ` Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.) David Malcolm
  1 sibling, 0 replies; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:24 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* ira-color.c (init_update_cost_records): Use new type-based pool allocator.
> 	(get_update_cost_record): Likewise.
> 	(free_update_cost_record_list): Likewise.
> 	(finish_update_cost_records): Likewise.
> 	(initiate_cost_update): Likewise.
OK.
Jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 23/35] Change use to type-based pool allocator in tree-ssa-pre.c.
  2015-05-27 14:17 ` [PATCH 23/35] Change use to type-based pool allocator in tree-ssa-pre.c mliska
@ 2015-05-27 18:59   ` Jeff Law
  2015-05-29 13:41     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 18:59 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* tree-ssa-pre.c (get_or_alloc_expr_for_name): Use new type-based pool allocator.
> 	(bitmap_set_new): Likewise.
> 	(get_or_alloc_expr_for_constant): Likewise.
> 	(get_or_alloc_expr_for): Likewise.
> 	(phi_translate_1): Likewise.
> 	(compute_avail): Likewise.
> 	(init_pre): Likewise.
> 	(fini_pre): Likewise.
OK.
Jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 32/35] Change use to type-based pool allocator in ira-build.c.
  2015-05-27 14:17 ` [PATCH 32/35] Change use to type-based pool allocator in ira-build.c mliska
@ 2015-05-27 19:34   ` Jeff Law
  2015-05-29 13:44     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 19:34 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* ira-build.c (finish_allocnos): Use new type-based pool allocator.
> 	(finish_prefs): Likewise.
> 	(finish_copies): Likewise.
Is this a partial duplicate of patch #34?  Something seems amiss here.


jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 35/35] Remove old pool allocator.
  2015-05-27 14:17 ` [PATCH 35/35] Remove old pool allocator mliska
@ 2015-05-27 19:40   ` Jeff Law
  2015-05-29 14:11     ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-27 19:40 UTC (permalink / raw)
  To: mliska, gcc-patches

On 05/27/2015 07:56 AM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* alloc-pool.c (create_alloc_pool): Remove.
> 	(empty_alloc_pool): Likewise.
> 	(free_alloc_pool): Likewise.
> 	(free_alloc_pool_if_empty): Likewise.
> 	(pool_alloc): Likewise.
> 	(pool_free): Likewise.
> 	* alloc-pool.h: Remove old declarations.
So, the remaining patches to use the type based pool allocator are OK as 
long as they have the same overall structure as the patches that have 
already been OK.   You've got something goofy in #32/#34, which I'll 
assume you'll sort out sensibly.

OK.

jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c.
  2015-05-27 14:00 ` [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c mliska
  2015-05-27 17:53   ` Jeff Law
@ 2015-05-28  0:48   ` Trevor Saunders
  1 sibling, 0 replies; 108+ messages in thread
From: Trevor Saunders @ 2015-05-28  0:48 UTC (permalink / raw)
  To: mliska; +Cc: gcc-patches

On Wed, May 27, 2015 at 03:56:44PM +0200, mliska wrote:
>  copy_live_range (lra_live_range_t r)
>  {
> -  lra_live_range_t p;
> -
> -  p = (lra_live_range_t) pool_alloc (live_range_pool);
> +  lra_live_range_t p = new lra_live_range;
>    *p = *r;

I think the default copy ctor should be fine so you could just do
new lra_live_range (*r);

Trev

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.)
  2015-05-27 14:58 ` [PATCH 33/35] Change use to type-based pool allocator in ira-color.c mliska
  2015-05-27 18:24   ` Jeff Law
@ 2015-05-28 11:23   ` David Malcolm
  2015-05-28 17:38     ` Jeff Law
  2015-05-29  5:16     ` Trevor Saunders
  1 sibling, 2 replies; 108+ messages in thread
From: David Malcolm @ 2015-05-28 11:23 UTC (permalink / raw)
  To: mliska; +Cc: gcc-patches

On Wed, 2015-05-27 at 15:56 +0200, mliska wrote:
> gcc/ChangeLog:
> 
> 2015-04-30  Martin Liska  <mliska@suse.cz>
> 
> 	* ira-color.c (init_update_cost_records): Use new type-based pool allocator.
> 	(get_update_cost_record): Likewise.
> 	(free_update_cost_record_list): Likewise.
> 	(finish_update_cost_records): Likewise.
> 	(initiate_cost_update): Likewise.
> ---
>  gcc/ira-color.c | 19 +++++--------------
>  1 file changed, 5 insertions(+), 14 deletions(-)
> 
> diff --git a/gcc/ira-color.c b/gcc/ira-color.c
> index 4750714..4aec98e 100644
> --- a/gcc/ira-color.c
> +++ b/gcc/ira-color.c
> @@ -1166,16 +1166,8 @@ setup_profitable_hard_regs (void)
>     allocnos.  */
>  
>  /* Pool for update cost records.  */
> -static alloc_pool update_cost_record_pool;
> -
> -/* Initiate update cost records.  */
> -static void
> -init_update_cost_records (void)
> -{
> -  update_cost_record_pool
> -    = create_alloc_pool ("update cost records",
> -			 sizeof (struct update_cost_record), 100);
> -}
> +static pool_allocator<update_cost_record> update_cost_record_pool
> +  ("update cost records", 100);

Am I right in thinking that this is a statically-allocated object with a
non-trivial constructor?  i.e. that this constructor has to run before
"main" is entered?

Do our coding guidelines allow for this?  (I've been burned by this
before, on a buggy C++ runtime that didn't manage to support these).
I'm a little nervous about this, touching global state before
"main" (e.g. from the point-of-view of the JIT), though I don't know yet
if this is just a gut reaction, or if there's a valid concern here (I'm
officially on holiday this week, so I haven't had a chance to dig deeply
into these patches yet, sorry).

[...snip...]

> @@ -1264,7 +1256,6 @@ initiate_cost_update (void)
>      = (struct update_cost_queue_elem *) ira_allocate (size);
>    memset (update_cost_queue_elems, 0, size);
>    update_cost_check = 0;
> -  init_update_cost_records ();
>  }

(for reference, this is where the manually-coded initialization call was
made)

Hope this is constructive
Dave

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 01/35] Introduce new type-based pool allocator.
  2015-05-27 17:50 ` [PATCH 01/35] Introduce new type-based pool allocator Jeff Law
@ 2015-05-28 13:27   ` Martin Liška
  2015-05-28 18:04     ` Jeff Law
  0 siblings, 1 reply; 108+ messages in thread
From: Martin Liška @ 2015-05-28 13:27 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

On 05/27/2015 07:44 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> Hello.
>>
>> Following patch set attempts to replace old-style pool allocator
>> to a type-based one. Moreover, as we utilize  classes and structs that are used
>> just by a pool allocator, these types have overwritten ctors and dtors.
>> Thus, using the allocator is much easier and we shouldn't cast types
>> back and forth. Another beneficat can be achieved in future, as we will
>> be able to call a class constructors to correctly register a location,
>> where a memory is allocated (-fgather-detailed-mem-stats).
>>
>> Patch can boostrap on x86_64-linux-gnu and ppc64-linux-gnu and
>> survives regression tests on x86_64-linux-gnu.
>>
>> Ready for trunk?
>> Thanks,
>> Martin
>>
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * alloc-pool.c (struct alloc_pool_descriptor): Move definition
>>     to header file.
>>     * alloc-pool.h (pool_allocator::pool_allocator): New function.
>>     (pool_allocator::release): Likewise.
>>     (inline pool_allocator::release_if_empty): Likewise.
>>     (inline pool_allocator::~pool_allocator): Likewise.
>>     (pool_allocator::allocate): Likewise.
>>     (pool_allocator::remove): Likewise.
> So on a general note, I don't like changing the size of the structure based on ENABLE_CHECKING.  If we've got other cases where we do this, then I guess it's OK, but if not, I'd prefer not to start doing so.

Hello.

This mechanism has been just adapted. I find it quite useful as we have examples in source code where we
allocate same struct/class types from a various pool. For debugging purpose, it helps to identify if
release operation is called for a correct pool.

>
>
>> ---
>
>> +
>> +  /* Align X to 8.  */
>> +  size_t align_eight (size_t x)
>> +  {
>> +    return (((x+7) >> 3) << 3);
>> +  }
>> +
>> +  const char *m_name;
>> +#ifdef ENABLE_CHECKING
>> +  ALLOC_POOL_ID_TYPE m_id;
>> +#endif
>> +  size_t m_elts_per_block;
>> +
>> +  /* These are the elements that have been allocated at least once and freed.  */
>> +  allocation_pool_list *m_returned_free_list;
>> +
>> +  /* These are the elements that have not yet been allocated out of
>> +     the last block obtained from XNEWVEC.  */
>> +  char* m_virgin_free_list;
>> +
>> +  /* The number of elements in the virgin_free_list that can be
>> +     allocated before needing another block.  */
>> +  size_t m_virgin_elts_remaining;
>> +  size_t m_elts_allocated;
>> +  size_t m_elts_free;
>> +  size_t m_blocks_allocated;
>> +  allocation_pool_list *m_block_list;
>> +  size_t m_block_size;
>> +  size_t m_elt_size;
> Several fields aren't documented.  They're largely self-explanatory, so I won't insist you document those trailing fields.  Your call whether or not to add docs for them.

Ok, even tough they are self-explanatory, I'm going to document these fields.

>
>
>> +
>> +  /* Now align the size to a multiple of 4.  */
>> +  size = align_eight (size);
> Why not just aligned to 4, rather than a multiple of 4?  Presumably the extra 4 bytes don't matter in practice?

Also adapted constant, hope it's chosen as the best.

>
>> +
>> +template <typename T>
>> +void
>> +inline pool_allocator<T>::release_if_empty ()
>> +{
>> +  if (m_elts_free == m_elts_allocated)
>> +    release ();
>> +}
> Is the release_if_empty all that useful in practice?

Yes, 02/x uses that feature.

>
> So the big issue in my mind continues to be the additional element in the structure when ENABLE_CHECKING is on.  As mentioned earlier, if we're already doing this elsewhere, then I won't object.  If we aren't, then I don't want to start doing so now.
>
> The rest of the stuff are just minor questions, but nothing which would in my mind stop this from going forward.
>
> Presumably your testing was with the whole series and they can't go in piecemeal, right?

Right, regression tests were run just once for the whole series, but I've tested that every individual patch can be applied and the compiler can be successfully built.
Anyway, I would like to commit all these patches at once (one by one).
Thus, I'm going to wait for approval for the whole series before I'll commit the set.

Thanks,
Martin

>
>
> jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.)
  2015-05-28 11:23   ` Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.) David Malcolm
@ 2015-05-28 17:38     ` Jeff Law
  2015-05-28 18:30       ` Richard Biener
  2015-05-29  5:16     ` Trevor Saunders
  1 sibling, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-28 17:38 UTC (permalink / raw)
  To: David Malcolm, mliska; +Cc: gcc-patches

On 05/28/2015 04:42 AM, David Malcolm wrote:
>
> Am I right in thinking that this is a statically-allocated object with a
> non-trivial constructor?  i.e. that this constructor has to run before
> "main" is entered?
>
> Do our coding guidelines allow for this?  (I've been burned by this
> before, on a buggy C++ runtime that didn't manage to support these).
> I'm a little nervous about this, touching global state before
> "main" (e.g. from the point-of-view of the JIT), though I don't know yet
> if this is just a gut reaction, or if there's a valid concern here (I'm
> officially on holiday this week, so I haven't had a chance to dig deeply
> into these patches yet, sorry).
That idiom is used in various places by Martin's patches.   I didn't see 
a strong rhyme or reason behind why it was used over allocating 
something in automatic or heap storage.

As to supporting it, I'm not terribly concerned about other buggy C++ 
runtimes.  GCC bootstraps with GCC, which means we've got our C++ 
runtime.  The only worry becomes the low level bits that we build our 
static ctor/dtor support on top of -- and I haven't seen major problems 
with that for eons.

jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 01/35] Introduce new type-based pool allocator.
  2015-05-28 13:27   ` Martin Liška
@ 2015-05-28 18:04     ` Jeff Law
  2015-05-29 13:33       ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jeff Law @ 2015-05-28 18:04 UTC (permalink / raw)
  To: Martin Liška, gcc-patches

On 05/28/2015 06:49 AM, Martin Liška wrote:
.
>
> This mechanism has been just adapted. I find it quite useful as we have
> examples in source code where we
> allocate same struct/class types from a various pool. For debugging
> purpose, it helps to identify if
> release operation is called for a correct pool.
I saw that you were following existing practice for the pools in the 
removal patch. I still don't like it as it makes mixing and matching 
objects harder when debugging gcc and if the structure is exposed for 
plugins, then we've got an unnecessary ABI plugin breakage.

I certainly understand how it's useful -- I'm not questioning that.  I'm 
questioning changing the size of structures on ENABLE_CHECKING.

My first inclination would be to include all that stuff unconditionally. 
  If that's too much overhead, then perhaps include the structure 
member, but not bother with any of the bookkeeping except for 
ENABLE_CHECKING.


> Anyway, I would like to commit all these patches at once (one by one).
> Thus, I'm going to wait for approval for the whole series before I'll
> commit the set.
Quite reasonable -- I was mostly trying to make sure I understood the 
testing situation.

I think at this point the whole series is approved, so you can move forward.

jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.)
  2015-05-28 17:38     ` Jeff Law
@ 2015-05-28 18:30       ` Richard Biener
  2015-05-28 18:34         ` Jakub Jelinek
  0 siblings, 1 reply; 108+ messages in thread
From: Richard Biener @ 2015-05-28 18:30 UTC (permalink / raw)
  To: Jeff Law, David Malcolm, mliska; +Cc: gcc-patches

On May 28, 2015 7:06:36 PM GMT+02:00, Jeff Law <law@redhat.com> wrote:
>On 05/28/2015 04:42 AM, David Malcolm wrote:
>>
>> Am I right in thinking that this is a statically-allocated object
>with a
>> non-trivial constructor?  i.e. that this constructor has to run
>before
>> "main" is entered?
>>
>> Do our coding guidelines allow for this?  (I've been burned by this
>> before, on a buggy C++ runtime that didn't manage to support these).
>> I'm a little nervous about this, touching global state before
>> "main" (e.g. from the point-of-view of the JIT), though I don't know
>yet
>> if this is just a gut reaction, or if there's a valid concern here
>(I'm
>> officially on holiday this week, so I haven't had a chance to dig
>deeply
>> into these patches yet, sorry).
>That idiom is used in various places by Martin's patches.   I didn't
>see 
>a strong rhyme or reason behind why it was used over allocating 
>something in automatic or heap storage.
>
>As to supporting it, I'm not terribly concerned about other buggy C++ 
>runtimes.  GCC bootstraps with GCC, which means we've got our C++ 
>runtime.  The only worry becomes the low level bits that we build our 
>static ctor/dtor support on top of -- and I haven't seen major problems
>
>with that for eons.

But we've been trying to avoid this. And the jit might not be too happy about it either.

>jeff


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.)
  2015-05-28 18:30       ` Richard Biener
@ 2015-05-28 18:34         ` Jakub Jelinek
  2015-05-28 19:25           ` Martin Liška
  0 siblings, 1 reply; 108+ messages in thread
From: Jakub Jelinek @ 2015-05-28 18:34 UTC (permalink / raw)
  To: Richard Biener; +Cc: Jeff Law, David Malcolm, mliska, gcc-patches

On Thu, May 28, 2015 at 07:57:39PM +0200, Richard Biener wrote:
> But we've been trying to avoid this. And the jit might not be too happy about it either.

Yeah, we should certainly try to avoid them, especially if it would affect
many variables having to be constructed.

	Jakub

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.)
  2015-05-28 18:34         ` Jakub Jelinek
@ 2015-05-28 19:25           ` Martin Liška
  2015-05-28 20:42             ` Trevor Saunders
  0 siblings, 1 reply; 108+ messages in thread
From: Martin Liška @ 2015-05-28 19:25 UTC (permalink / raw)
  To: gcc-patches

On 05/28/2015 08:03 PM, Jakub Jelinek wrote:
> On Thu, May 28, 2015 at 07:57:39PM +0200, Richard Biener wrote:
>> But we've been trying to avoid this. And the jit might not be too happy about it either.
>
> Yeah, we should certainly try to avoid them, especially if it would affect
> many variables having to be constructed.
>
> 	Jakub
>

Ok, thus I will do it as before my modifications:

static pool_allocator <update_cost_record> *update_cost_record_pool = NULL;

/* Initiate update cost records.  */
static void
init_update_cost_records (void)
{
  update_cost_record_pool = new pool_allocator <update_cost_record>
    ("update cost records", 100);
}

I'm going to migrate rest of patches that use the same construct.

Thanks,
Martin

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.)
  2015-05-28 19:25           ` Martin Liška
@ 2015-05-28 20:42             ` Trevor Saunders
  0 siblings, 0 replies; 108+ messages in thread
From: Trevor Saunders @ 2015-05-28 20:42 UTC (permalink / raw)
  To: Martin Liška; +Cc: gcc-patches

On Thu, May 28, 2015 at 08:47:16PM +0200, Martin Liška wrote:
> On 05/28/2015 08:03 PM, Jakub Jelinek wrote:
> >On Thu, May 28, 2015 at 07:57:39PM +0200, Richard Biener wrote:
> >>But we've been trying to avoid this. And the jit might not be too happy about it either.
> >
> >Yeah, we should certainly try to avoid them, especially if it would affect
> >many variables having to be constructed.
> >
> >	Jakub
> >
> 
> Ok, thus I will do it as before my modifications:
> 
> static pool_allocator <update_cost_record> *update_cost_record_pool = NULL;
> 
> /* Initiate update cost records.  */
> static void
> init_update_cost_records (void)
> {
>  update_cost_record_pool = new pool_allocator <update_cost_record>
>    ("update cost records", 100);
> }
> 
> I'm going to migrate rest of patches that use the same construct.


Hrm, why not just change pool_allocator so it does the first allocation
on the first alloc and just initializes everything to null / 0?  Then
the ctor would be close to trivial.  Then if you really care about the
stuff gcc doesn't optimize away you could add a special class
static_pool_allocator (you might also need to hack in a way to get the
c++ fe to do constexpr / defaulted functions).

Trev

> 
> Thanks,
> Martin
> 

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.)
  2015-05-28 11:23   ` Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.) David Malcolm
  2015-05-28 17:38     ` Jeff Law
@ 2015-05-29  5:16     ` Trevor Saunders
  1 sibling, 0 replies; 108+ messages in thread
From: Trevor Saunders @ 2015-05-29  5:16 UTC (permalink / raw)
  To: David Malcolm; +Cc: mliska, gcc-patches

On Thu, May 28, 2015 at 06:42:57AM -0400, David Malcolm wrote:
> On Wed, 2015-05-27 at 15:56 +0200, mliska wrote:
> > gcc/ChangeLog:
> > 
> > 2015-04-30  Martin Liska  <mliska@suse.cz>
> > 
> > 	* ira-color.c (init_update_cost_records): Use new type-based pool allocator.
> > 	(get_update_cost_record): Likewise.
> > 	(free_update_cost_record_list): Likewise.
> > 	(finish_update_cost_records): Likewise.
> > 	(initiate_cost_update): Likewise.
> > ---
> >  gcc/ira-color.c | 19 +++++--------------
> >  1 file changed, 5 insertions(+), 14 deletions(-)
> > 
> > diff --git a/gcc/ira-color.c b/gcc/ira-color.c
> > index 4750714..4aec98e 100644
> > --- a/gcc/ira-color.c
> > +++ b/gcc/ira-color.c
> > @@ -1166,16 +1166,8 @@ setup_profitable_hard_regs (void)
> >     allocnos.  */
> >  
> >  /* Pool for update cost records.  */
> > -static alloc_pool update_cost_record_pool;
> > -
> > -/* Initiate update cost records.  */
> > -static void
> > -init_update_cost_records (void)
> > -{
> > -  update_cost_record_pool
> > -    = create_alloc_pool ("update cost records",
> > -			 sizeof (struct update_cost_record), 100);
> > -}
> > +static pool_allocator<update_cost_record> update_cost_record_pool
> > +  ("update cost records", 100);
> 
> Am I right in thinking that this is a statically-allocated object with a
> non-trivial constructor?  i.e. that this constructor has to run before
> "main" is entered?

yes though I think it'd be pretty easy to make it basically trivial but
with a static initializer because gcc doesn't optimize them well, and
with a bit more work we could probably get rid of the static initializer
without actually fixing gcc.

> Do our coding guidelines allow for this?  (I've been burned by this
> before, on a buggy C++ runtime that didn't manage to support these).

I'm pretty sure there already are some iirc the pretty printers are one
example.

> I'm a little nervous about this, touching global state before
> "main" (e.g. from the point-of-view of the JIT), though I don't know yet
> if this is just a gut reaction, or if there's a valid concern here (I'm

afaik it should work fine.  Of course this is global data which isn't
great, but that's a preexisting problem.

Trev

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 01/35] Introduce new type-based pool allocator.
  2015-05-28 18:04     ` Jeff Law
@ 2015-05-29 13:33       ` Martin Liška
  2015-05-30  5:14         ` Jeff Law
  2015-06-02 10:10         ` Andreas Schwab
  0 siblings, 2 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:33 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 1866 bytes --]

On 05/28/2015 07:15 PM, Jeff Law wrote:
> On 05/28/2015 06:49 AM, Martin Liška wrote:
> .
>>
>> This mechanism has been just adapted. I find it quite useful as we have
>> examples in source code where we
>> allocate same struct/class types from a various pool. For debugging
>> purpose, it helps to identify if
>> release operation is called for a correct pool.
> I saw that you were following existing practice for the pools in the removal patch. I still don't like it as it makes mixing and matching objects harder when debugging gcc and if the structure is exposed for plugins, then we've got an unnecessary ABI plugin breakage.
>
> I certainly understand how it's useful -- I'm not questioning that.  I'm questioning changing the size of structures on ENABLE_CHECKING.
>
> My first inclination would be to include all that stuff unconditionally.  If that's too much overhead, then perhaps include the structure member, but not bother with any of the bookkeeping except for ENABLE_CHECKING.

Hi.

Updated version of patch removes ENABLE_CHECKING in the struct definition.

News in the patchset I'm going to re-send:
+ Changelog entries are fixed for spaces
+ Each patch passes ./contrib/check_GNU_style.sh script
+ pool_allocator::pool_allocator is a trivial constructor and first allocation launches initialization
+ some patches are squashed as were mentioned multiple time (ira-color.c, ira-build.c)

The patch set survives x86_64-linux-pc boostrap, I'm going to re-run regression tests.

Thanks,
Martin

>
>
>> Anyway, I would like to commit all these patches at once (one by one).
>> Thus, I'm going to wait for approval for the whole series before I'll
>> commit the set.
> Quite reasonable -- I was mostly trying to make sure I understood the testing situation.
>
> I think at this point the whole series is approved, so you can move forward.
>
> jeff
>


[-- Attachment #2: 0001-Introduce-new-type-based-pool-allocator.patch --]
[-- Type: text/x-patch, Size: 14998 bytes --]

From b551d2349b342776213f518de0835ab54fa9fa03 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:44 +0200
Subject: [PATCH 01/32] Introduce new type-based pool allocator.

Hello.

Following patch set attempts to replace old-style pool allocator
to a type-based one. Moreover, as we utilize  classes and structs that are used
just by a pool allocator, these types have overwritten ctors and dtors.
Thus, using the allocator is much easier and we shouldn't cast types
back and forth. Another beneficat can be achieved in future, as we will
be able to call a class constructors to correctly register a location,
where a memory is allocated (-fgather-detailed-mem-stats).

Patch can boostrap on x86_64-linux-gnu and ppc64-linux-gnu and
survives regression tests on x86_64-linux-gnu.

Ready for trunk?
Thanks,
Martin

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* alloc-pool.c (struct alloc_pool_descriptor): Move definition
	to header file.
	* alloc-pool.h (pool_allocator::pool_allocator): New function.
	(pool_allocator::release): Likewise.
	(inline pool_allocator::release_if_empty): Likewise.
	(inline pool_allocator::~pool_allocator): Likewise.
	(pool_allocator::allocate): Likewise.
	(pool_allocator::remove): Likewise.
---
 gcc/alloc-pool.c |  33 +----
 gcc/alloc-pool.h | 380 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 383 insertions(+), 30 deletions(-)

diff --git a/gcc/alloc-pool.c b/gcc/alloc-pool.c
index 81909d8..0bea7a6 100644
--- a/gcc/alloc-pool.c
+++ b/gcc/alloc-pool.c
@@ -25,6 +25,8 @@ along with GCC; see the file COPYING3.  If not see
 #include "hash-table.h"
 #include "hash-map.h"
 
+ALLOC_POOL_ID_TYPE last_id;
+
 #define align_eight(x) (((x+7) >> 3) << 3)
 
 /* The internal allocation object.  */
@@ -58,36 +60,10 @@ typedef struct allocation_object_def
 #define USER_PTR_FROM_ALLOCATION_OBJECT_PTR(X)				\
    ((void *) (((allocation_object *) (X))->u.data))
 
-#ifdef ENABLE_CHECKING
-/* Last used ID.  */
-static ALLOC_POOL_ID_TYPE last_id;
-#endif
-
-/* Store information about each particular alloc_pool.  Note that this
-   will underestimate the amount the amount of storage used by a small amount:
-   1) The overhead in a pool is not accounted for.
-   2) The unallocated elements in a block are not accounted for.  Note
-   that this can at worst case be one element smaller that the block
-   size for that pool.  */
-struct alloc_pool_descriptor
-{
-  /* Number of pools allocated.  */
-  unsigned long created;
-  /* Gross allocated storage.  */
-  unsigned long allocated;
-  /* Amount of currently active storage. */
-  unsigned long current;
-  /* Peak amount of storage used.  */
-  unsigned long peak;
-  /* Size of element in the pool.  */
-  int elt_size;
-};
-
 /* Hashtable mapping alloc_pool names to descriptors.  */
-static hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
 
-/* For given name, return descriptor, create new if needed.  */
-static struct alloc_pool_descriptor *
+struct alloc_pool_descriptor *
 allocate_pool_descriptor (const char *name)
 {
   if (!alloc_pool_hash)
@@ -96,6 +72,7 @@ allocate_pool_descriptor (const char *name)
   return &alloc_pool_hash->get_or_insert (name);
 }
 
+
 /* Create a pool of things of size SIZE, with NUM in each block we
    allocate.  */
 
diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index 0c30711..ec671dc 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -20,6 +20,8 @@ along with GCC; see the file COPYING3.  If not see
 #ifndef ALLOC_POOL_H
 #define ALLOC_POOL_H
 
+#include "hash-map.h"
+
 typedef unsigned long ALLOC_POOL_ID_TYPE;
 
 typedef struct alloc_pool_list_def
@@ -31,9 +33,7 @@ typedef struct alloc_pool_list_def
 typedef struct alloc_pool_def
 {
   const char *name;
-#ifdef ENABLE_CHECKING
   ALLOC_POOL_ID_TYPE id;
-#endif
   size_t elts_per_block;
 
   /* These are the elements that have been allocated at least once and freed.  */
@@ -63,4 +63,380 @@ extern void free_alloc_pool_if_empty (alloc_pool *);
 extern void *pool_alloc (alloc_pool) ATTRIBUTE_MALLOC;
 extern void pool_free (alloc_pool, void *);
 extern void dump_alloc_pool_statistics (void);
+
+typedef unsigned long ALLOC_POOL_ID_TYPE;
+
+/* Type based memory pool allocator.  */
+template <typename T>
+class pool_allocator
+{
+public:
+  /* Default constructor for pool allocator called NAME.  Each block
+     has NUM elements.  The allocator support EXTRA_SIZE and can
+     potentially IGNORE_TYPE_SIZE.  */
+  pool_allocator (const char *name, size_t num, size_t extra_size = 0,
+		  bool ignore_type_size = false);
+
+  /* Default destuctor.  */
+  ~pool_allocator ();
+
+  /* Release internal data structures.  */
+  void release ();
+
+  /* Release internal data structures if the pool has not allocated
+     an object.  */
+  void release_if_empty ();
+
+  /* Allocate a new object.  */
+  T *allocate () ATTRIBUTE_MALLOC;
+
+  /* Release OBJECT that must come from the pool.  */
+  void remove (T *object);
+
+private:
+  struct allocation_pool_list
+  {
+    allocation_pool_list *next;
+  };
+
+  /* Initialize a pool allocator.  */
+  void initialize ();
+
+  template <typename U>
+  struct allocation_object
+  {
+    /* The ID of alloc pool which the object was allocated from.  */
+    ALLOC_POOL_ID_TYPE id;
+
+    union
+      {
+	/* The data of the object.  */
+	char data[1];
+
+	/* Because we want any type of data to be well aligned after the ID,
+	   the following elements are here.  They are never accessed so
+	   the allocated object may be even smaller than this structure.
+	   We do not care about alignment for floating-point types.  */
+	char *align_p;
+	int64_t align_i;
+      } u;
+
+    static inline allocation_object<U> *get_instance (void *data_ptr)
+    {
+      return (allocation_object<U> *)(((char *)(data_ptr))
+				      - offsetof (allocation_object<U>,
+						  u.data));
+    }
+
+    static inline U *get_data (void *instance_ptr)
+    {
+      return (U*)(((allocation_object<U> *) instance_ptr)->u.data);
+    }
+  };
+
+  /* Align X to 8.  */
+  size_t align_eight (size_t x)
+  {
+    return (((x+7) >> 3) << 3);
+  }
+
+  const char *m_name;
+  ALLOC_POOL_ID_TYPE m_id;
+  size_t m_elts_per_block;
+
+  /* These are the elements that have been allocated at least once
+     and freed.  */
+  allocation_pool_list *m_returned_free_list;
+
+  /* These are the elements that have not yet been allocated out of
+     the last block obtained from XNEWVEC.  */
+  char* m_virgin_free_list;
+
+  /* The number of elements in the virgin_free_list that can be
+     allocated before needing another block.  */
+  size_t m_virgin_elts_remaining;
+  /* The number of elements that are allocated.  */
+  size_t m_elts_allocated;
+  /* The number of elements that are released.  */
+  size_t m_elts_free;
+  /* The number of allocated blocks.  */
+  size_t m_blocks_allocated;
+  /* List of blocks that are used to allocate new objects.  */
+  allocation_pool_list *m_block_list;
+  /* The number of elements in a block.  */
+  size_t m_block_size;
+  /* Size of a pool elements in bytes.  */
+  size_t m_elt_size;
+  /* Flag if we shoul ignore size of a type.  */
+  bool m_ignore_type_size;
+  /* Extra size in bytes that should be allocated for each element.  */
+  size_t m_extra_size;
+  /* Flag if a pool allocator is initialized.  */
+  bool m_initialized;
+};
+
+/* Last used ID.  */
+extern ALLOC_POOL_ID_TYPE last_id;
+
+/* Store information about each particular alloc_pool.  Note that this
+   will underestimate the amount the amount of storage used by a small amount:
+   1) The overhead in a pool is not accounted for.
+   2) The unallocated elements in a block are not accounted for.  Note
+   that this can at worst case be one element smaller that the block
+   size for that pool.  */
+struct alloc_pool_descriptor
+{
+  /* Number of pools allocated.  */
+  unsigned long created;
+  /* Gross allocated storage.  */
+  unsigned long allocated;
+  /* Amount of currently active storage.  */
+  unsigned long current;
+  /* Peak amount of storage used.  */
+  unsigned long peak;
+  /* Size of element in the pool.  */
+  int elt_size;
+};
+
+
+/* Hashtable mapping alloc_pool names to descriptors.  */
+extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+
+/* For given name, return descriptor, create new if needed.  */
+alloc_pool_descriptor *
+allocate_pool_descriptor (const char *name);
+
+template <typename T>
+inline
+pool_allocator<T>::pool_allocator (const char *name, size_t num,
+				   size_t extra_size, bool ignore_type_size):
+  m_name (name), m_elts_per_block (num), m_returned_free_list (NULL),
+  m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
+  m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
+  m_ignore_type_size (ignore_type_size), m_extra_size (extra_size),
+  m_initialized (false) {}
+
+/* Initialize a pool allocator.  */
+
+template <typename T>
+void
+pool_allocator<T>::initialize ()
+{
+  gcc_checking_assert (!m_initialized);
+  m_initialized = true;
+
+  size_t header_size;
+  size_t size = (m_ignore_type_size ? 0 : sizeof (T)) + m_extra_size;
+
+  gcc_checking_assert (m_name);
+
+  /* Make size large enough to store the list header.  */
+  if (size < sizeof (allocation_pool_list*))
+    size = sizeof (allocation_pool_list*);
+
+  /* Now align the size to a multiple of 4.  */
+  size = align_eight (size);
+
+  /* Add the aligned size of ID.  */
+  size += offsetof (allocation_object<T>, u.data);
+
+  /* Um, we can't really allocate 0 elements per block.  */
+  gcc_checking_assert (m_elts_per_block);
+
+  m_elt_size = size;
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->elt_size = size;
+      desc->created++;
+    }
+
+  /* List header size should be a multiple of 8.  */
+  header_size = align_eight (sizeof (allocation_pool_list));
+
+  m_block_size = (size * m_elts_per_block) + header_size;
+
+#ifdef ENABLE_CHECKING
+  /* Increase the last used ID and use it for this pool.
+     ID == 0 is used for free elements of pool so skip it.  */
+  last_id++;
+  if (last_id == 0)
+    last_id++;
+
+  m_id = last_id;
+#endif
+
+}
+
+/* Free all memory allocated for the given memory pool.  */
+template <typename T>
+inline void
+pool_allocator<T>::release ()
+{
+  if (!m_initialized)
+    return;
+
+  allocation_pool_list *block, *next_block;
+
+  /* Free each block allocated to the pool.  */
+  for (block = m_block_list; block != NULL; block = next_block)
+    {
+      next_block = block->next;
+      free (block);
+    }
+
+  if (GATHER_STATISTICS && false)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->current -= (m_elts_allocated - m_elts_free) * m_elt_size;
+    }
+
+  m_returned_free_list = NULL;
+  m_virgin_free_list = NULL;
+  m_virgin_elts_remaining = 0;
+  m_elts_allocated = 0;
+  m_elts_free = 0;
+  m_blocks_allocated = 0;
+  m_block_list = NULL;
+}
+
+template <typename T>
+void
+inline pool_allocator<T>::release_if_empty ()
+{
+  if (m_elts_free == m_elts_allocated)
+    release ();
+}
+
+template <typename T>
+inline pool_allocator<T>::~pool_allocator ()
+{
+  release ();
+}
+
+/* Allocates one element from the pool specified.  */
+template <typename T>
+inline T *
+pool_allocator<T>::allocate ()
+{
+  if (!m_initialized)
+    initialize ();
+
+  allocation_pool_list *header;
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+  int size;
+#endif
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+
+      desc->allocated += m_elt_size;
+      desc->current += m_elt_size;
+      if (desc->peak < desc->current)
+	desc->peak = desc->current;
+    }
+
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+  size = m_elt_size - offsetof (allocation_object<T>, u.data);
+#endif
+
+  /* If there are no more free elements, make some more!.  */
+  if (!m_returned_free_list)
+    {
+      char *block;
+      if (!m_virgin_elts_remaining)
+	{
+	  allocation_pool_list *block_header;
+
+	  /* Make the block.  */
+	  block = XNEWVEC (char, m_block_size);
+	  block_header = (allocation_pool_list*) block;
+	  block += align_eight (sizeof (allocation_pool_list));
+
+	  /* Throw it on the block list.  */
+	  block_header->next = m_block_list;
+	  m_block_list = block_header;
+
+	  /* Make the block available for allocation.  */
+	  m_virgin_free_list = block;
+	  m_virgin_elts_remaining = m_elts_per_block;
+
+	  /* Also update the number of elements we have free/allocated, and
+	     increment the allocated block count.  */
+	  m_elts_allocated += m_elts_per_block;
+	  m_elts_free += m_elts_per_block;
+	  m_blocks_allocated += 1;
+	}
+
+      /* We now know that we can take the first elt off the virgin list and
+	 put it on the returned list.  */
+      block = m_virgin_free_list;
+      header = (allocation_pool_list*) allocation_object<T>::get_data (block);
+      header->next = NULL;
+#ifdef ENABLE_CHECKING
+      /* Mark the element to be free.  */
+      ((allocation_object<T> *) block)->id = 0;
+#endif
+      VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
+      m_returned_free_list = header;
+      m_virgin_free_list += m_elt_size;
+      m_virgin_elts_remaining--;
+
+    }
+
+  /* Pull the first free element from the free list, and return it.  */
+  header = m_returned_free_list;
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
+  m_returned_free_list = header->next;
+  m_elts_free--;
+
+#ifdef ENABLE_CHECKING
+  /* Set the ID for element.  */
+  allocation_object<T>::get_instance (header)->id = m_id;
+#endif
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
+
+  /* Call default constructor.  */
+  return (T *)(header);
+}
+
+/* Puts PTR back on POOL's free list.  */
+template <typename T>
+void
+pool_allocator<T>::remove (T *object)
+{
+  gcc_checking_assert (m_initialized);
+
+  allocation_pool_list *header;
+  int size;
+  size = m_elt_size - offsetof (allocation_object<T>, u.data);
+
+#ifdef ENABLE_CHECKING
+  gcc_assert (object
+	      /* Check if we free more than we allocated, which is Bad (TM).  */
+	      && m_elts_free < m_elts_allocated
+	      /* Check whether the PTR was allocated from POOL.  */
+	      && m_id == allocation_object<T>::get_instance (object)->id);
+
+  memset (object, 0xaf, size);
+
+  /* Mark the element to be free.  */
+  allocation_object<T>::get_instance (object)->id = 0;
+#endif
+
+  header = (allocation_pool_list*) object;
+  header->next = m_returned_free_list;
+  m_returned_free_list = header;
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
+  m_elts_free++;
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->current -= m_elt_size;
+    }
+}
+
 #endif
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 02/35] Change use to type-based pool allocator in et-forest.c.
  2015-05-27 17:50   ` Jeff Law
@ 2015-05-29 13:33     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:33 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 407 bytes --]

On 05/27/2015 07:45 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * et-forest.c (et_new_occ): Use new type-based pool allocator.
>>     (et_new_tree): Likewise.
>>     (et_free_tree): Likewise.
>>     (et_free_tree_force): Likewise.
>>     (et_free_pools): Likewise.
>>     (et_split): Likewise.
> OK.
> jeff
>

v2.

[-- Attachment #2: 0002-Change-use-to-type-based-pool-allocator-in-et-forest.patch --]
[-- Type: text/x-patch, Size: 4667 bytes --]

From 2d1aa571663a6f503e2722d19e64ba18b2db0403 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:44 +0200
Subject: [PATCH 02/32] Change use to type-based pool allocator in et-forest.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* et-forest.c (et_new_occ): Use new type-based pool allocator.
	(et_new_tree): Likewise.
	(et_free_tree): Likewise.
	(et_free_tree_force): Likewise.
	(et_free_pools): Likewise.
	(et_split): Likewise.
---
 gcc/dominance.c |  1 +
 gcc/et-forest.c | 48 +++++++++++++++++++++++++++++-------------------
 gcc/et-forest.h | 15 +++++++++++++++
 3 files changed, 45 insertions(+), 19 deletions(-)

diff --git a/gcc/dominance.c b/gcc/dominance.c
index 09c8c90..f3c99ba 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -51,6 +51,7 @@
 #include "cfganal.h"
 #include "basic-block.h"
 #include "diagnostic-core.h"
+#include "alloc-pool.h"
 #include "et-forest.h"
 #include "timevar.h"
 #include "hash-map.h"
diff --git a/gcc/et-forest.c b/gcc/et-forest.c
index da6b7d7..4e55b63 100644
--- a/gcc/et-forest.c
+++ b/gcc/et-forest.c
@@ -25,8 +25,8 @@ License along with libiberty; see the file COPYING3.  If not see
 #include "config.h"
 #include "system.h"
 #include "coretypes.h"
-#include "et-forest.h"
 #include "alloc-pool.h"
+#include "et-forest.h"
 
 /* We do not enable this with ENABLE_CHECKING, since it is awfully slow.  */
 #undef DEBUG_ET
@@ -59,10 +59,26 @@ struct et_occ
 				   on the path to the root.  */
   struct et_occ *min_occ;	/* The occurrence in the subtree with the minimal
 				   depth.  */
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((et_occ *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<et_occ> pool;
+
 };
 
-static alloc_pool et_nodes;
-static alloc_pool et_occurrences;
+pool_allocator<et_node> et_node::pool ("et_nodes pool", 300);
+pool_allocator<et_occ> et_occ::pool ("et_occ pool", 300);
 
 /* Changes depth of OCC to D.  */
 
@@ -449,11 +465,7 @@ et_splay (struct et_occ *occ)
 static struct et_occ *
 et_new_occ (struct et_node *node)
 {
-  struct et_occ *nw;
-
-  if (!et_occurrences)
-    et_occurrences = create_alloc_pool ("et_occ pool", sizeof (struct et_occ), 300);
-  nw = (struct et_occ *) pool_alloc (et_occurrences);
+  et_occ *nw = new et_occ;
 
   nw->of = node;
   nw->parent = NULL;
@@ -474,9 +486,7 @@ et_new_tree (void *data)
 {
   struct et_node *nw;
 
-  if (!et_nodes)
-    et_nodes = create_alloc_pool ("et_node pool", sizeof (struct et_node), 300);
-  nw = (struct et_node *) pool_alloc (et_nodes);
+  nw = new et_node;
 
   nw->data = data;
   nw->father = NULL;
@@ -501,8 +511,8 @@ et_free_tree (struct et_node *t)
   if (t->father)
     et_split (t);
 
-  pool_free (et_occurrences, t->rightmost_occ);
-  pool_free (et_nodes, t);
+  delete t->rightmost_occ;
+  delete t;
 }
 
 /* Releases et tree T without maintaining other nodes.  */
@@ -510,10 +520,10 @@ et_free_tree (struct et_node *t)
 void
 et_free_tree_force (struct et_node *t)
 {
-  pool_free (et_occurrences, t->rightmost_occ);
+  delete t->rightmost_occ;
   if (t->parent_occ)
-    pool_free (et_occurrences, t->parent_occ);
-  pool_free (et_nodes, t);
+    delete t->parent_occ;
+  delete t;
 }
 
 /* Release the alloc pools, if they are empty.  */
@@ -521,8 +531,8 @@ et_free_tree_force (struct et_node *t)
 void
 et_free_pools (void)
 {
-  free_alloc_pool_if_empty (&et_occurrences);
-  free_alloc_pool_if_empty (&et_nodes);
+  et_occ::pool.release_if_empty ();
+  et_node::pool.release_if_empty ();
 }
 
 /* Sets father of et tree T to FATHER.  */
@@ -614,7 +624,7 @@ et_split (struct et_node *t)
   rmost->depth = 0;
   rmost->min = 0;
 
-  pool_free (et_occurrences, p_occ);
+  delete p_occ;
 
   /* Update the tree.  */
   if (father->son == t)
diff --git a/gcc/et-forest.h b/gcc/et-forest.h
index b507c64..15c582d 100644
--- a/gcc/et-forest.h
+++ b/gcc/et-forest.h
@@ -66,6 +66,21 @@ struct et_node
 
   struct et_occ *rightmost_occ;	/* The rightmost occurrence.  */
   struct et_occ *parent_occ;	/* The occurrence of the parent node.  */
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((et_node *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<et_node> pool;
 };
 
 struct et_node *et_new_tree (void *data);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 04/35] Change use to type-based pool allocator in lra.c.
  2015-05-27 17:55   ` Jeff Law
@ 2015-05-29 13:34     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:34 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 601 bytes --]

On 05/27/2015 07:50 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * lra.c (init_insn_regs): Use new type-based pool allocator.
>>     (new_insn_reg) Likewise.
>>     (free_insn_reg) Likewise.
>>     (free_insn_regs) Likewise.
>>     (finish_insn_regs) Likewise.
>>     (init_insn_recog_data) Likewise.
>>     (init_reg_info) Likewise.
>>     (finish_reg_info) Likewise.
>>     (lra_free_copies) Likewise.
>>     (lra_create_copy) Likewise.
>>     (invalidate_insn_data_regno_info) Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0004-Change-use-to-type-based-pool-allocator-in-lra.c.patch --]
[-- Type: text/x-patch, Size: 5909 bytes --]

From d4b64952961e379f6d26d609369dc0de4fd0236f Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:45 +0200
Subject: [PATCH 04/32] Change use to type-based pool allocator in lra.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* lra.c (init_insn_regs): Use new type-based pool allocator.
	(new_insn_reg) Likewise.
	(free_insn_reg) Likewise.
	(free_insn_regs) Likewise.
	(finish_insn_regs) Likewise.
	(init_insn_recog_data) Likewise.
	(init_reg_info) Likewise.
	(finish_reg_info) Likewise.
	(lra_free_copies) Likewise.
	(lra_create_copy) Likewise.
	(invalidate_insn_data_regno_info) Likewise.
---
 gcc/lra-int.h | 31 +++++++++++++++++++++++++++++++
 gcc/lra.c     | 40 ++++++++++------------------------------
 2 files changed, 41 insertions(+), 30 deletions(-)

diff --git a/gcc/lra-int.h b/gcc/lra-int.h
index 42e4a54..25bd3ce 100644
--- a/gcc/lra-int.h
+++ b/gcc/lra-int.h
@@ -84,6 +84,22 @@ struct lra_copy
   int regno1, regno2;
   /* Next copy with correspondingly REGNO1 and REGNO2.	*/
   lra_copy_t regno1_next, regno2_next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((lra_copy *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<lra_copy> pool;
+
 };
 
 /* Common info about a register (pseudo or hard register).  */
@@ -191,6 +207,21 @@ struct lra_insn_reg
   int regno;
   /* Next reg info of the same insn.  */
   struct lra_insn_reg *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((lra_insn_reg *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<lra_insn_reg> pool;
 };
 
 /* Static part (common info for insns with the same ICODE) of LRA
diff --git a/gcc/lra.c b/gcc/lra.c
index 7440668..456f618 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -550,15 +550,7 @@ lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
    insns.  */
 
 /* Pools for insn reg info.  */
-static alloc_pool insn_reg_pool;
-
-/* Initiate pool for insn reg info.  */
-static void
-init_insn_regs (void)
-{
-  insn_reg_pool
-    = create_alloc_pool ("insn regs", sizeof (struct lra_insn_reg), 100);
-}
+pool_allocator<lra_insn_reg> lra_insn_reg::pool ("insn regs", 100);
 
 /* Create LRA insn related info about a reference to REGNO in INSN with
    TYPE (in/out/inout), biggest reference mode MODE, flag that it is
@@ -570,9 +562,7 @@ new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
 	      machine_mode mode,
 	      bool subreg_p, bool early_clobber, struct lra_insn_reg *next)
 {
-  struct lra_insn_reg *ir;
-
-  ir = (struct lra_insn_reg *) pool_alloc (insn_reg_pool);
+  lra_insn_reg *ir = new lra_insn_reg ();
   ir->type = type;
   ir->biggest_mode = mode;
   if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (lra_reg_info[regno].biggest_mode)
@@ -585,13 +575,6 @@ new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
   return ir;
 }
 
-/* Free insn reg info IR.  */
-static void
-free_insn_reg (struct lra_insn_reg *ir)
-{
-  pool_free (insn_reg_pool, ir);
-}
-
 /* Free insn reg info list IR.	*/
 static void
 free_insn_regs (struct lra_insn_reg *ir)
@@ -601,7 +584,7 @@ free_insn_regs (struct lra_insn_reg *ir)
   for (; ir != NULL; ir = next_ir)
     {
       next_ir = ir->next;
-      free_insn_reg (ir);
+      delete ir;
     }
 }
 
@@ -609,7 +592,7 @@ free_insn_regs (struct lra_insn_reg *ir)
 static void
 finish_insn_regs (void)
 {
-  free_alloc_pool (insn_reg_pool);
+  lra_insn_reg::pool.release ();
 }
 
 \f
@@ -737,7 +720,6 @@ init_insn_recog_data (void)
 {
   lra_insn_recog_data_len = 0;
   lra_insn_recog_data = NULL;
-  init_insn_regs ();
 }
 
 /* Expand, if necessary, LRA data about insns.	*/
@@ -791,6 +773,8 @@ finish_insn_recog_data (void)
     if ((data = lra_insn_recog_data[i]) != NULL)
       free_insn_recog_data (data);
   finish_insn_regs ();
+  lra_copy::pool.release ();
+  lra_insn_reg::pool.release ();
   free (lra_insn_recog_data);
 }
 
@@ -1310,7 +1294,7 @@ get_new_reg_value (void)
 }
 
 /* Pools for copies.  */
-static alloc_pool copy_pool;
+pool_allocator<lra_copy> lra_copy::pool ("lra copies", 100);
 
 /* Vec referring to pseudo copies.  */
 static vec<lra_copy_t> copy_vec;
@@ -1350,8 +1334,6 @@ init_reg_info (void)
   lra_reg_info = XNEWVEC (struct lra_reg, reg_info_size);
   for (i = 0; i < reg_info_size; i++)
     initialize_lra_reg_info_element (i);
-  copy_pool
-    = create_alloc_pool ("lra copies", sizeof (struct lra_copy), 100);
   copy_vec.create (100);
 }
 
@@ -1366,8 +1348,6 @@ finish_reg_info (void)
     bitmap_clear (&lra_reg_info[i].insn_bitmap);
   free (lra_reg_info);
   reg_info_size = 0;
-  free_alloc_pool (copy_pool);
-  copy_vec.release ();
 }
 
 /* Expand common reg info if it is necessary.  */
@@ -1394,7 +1374,7 @@ lra_free_copies (void)
     {
       cp = copy_vec.pop ();
       lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULL;
-      pool_free (copy_pool, cp);
+      delete cp;
     }
 }
 
@@ -1416,7 +1396,7 @@ lra_create_copy (int regno1, int regno2, int freq)
       regno2 = regno1;
       regno1 = temp;
     }
-  cp = (lra_copy_t) pool_alloc (copy_pool);
+  cp = new lra_copy ();
   copy_vec.safe_push (cp);
   cp->regno1_dest_p = regno1_dest_p;
   cp->freq = freq;
@@ -1585,7 +1565,7 @@ invalidate_insn_data_regno_info (lra_insn_recog_data_t data, rtx_insn *insn,
     {
       i = ir->regno;
       next_ir = ir->next;
-      free_insn_reg (ir);
+      delete ir;
       bitmap_clear_bit (&lra_reg_info[i].insn_bitmap, uid);
       if (i >= FIRST_PSEUDO_REGISTER && ! debug_p)
 	{
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 10/35] Change use to type-based pool allocator in cfg.c.
  2015-05-27 18:01   ` Jeff Law
@ 2015-05-29 13:34     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:34 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 386 bytes --]

On 05/27/2015 07:57 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * cfg.c (initialize_original_copy_tables):Use new type-based pool allocator.
>>     (free_original_copy_tables) Likewise.
>>     (copy_original_table_clear) Likewise.
>>     (copy_original_table_set) Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0009-Change-use-to-type-based-pool-allocator-in-cfg.c.patch --]
[-- Type: text/x-patch, Size: 2377 bytes --]

From 6facfc84e89ec3a887e7db342493e7656fb29dc4 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:47 +0200
Subject: [PATCH 09/32] Change use to type-based pool allocator in cfg.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* cfg.c (initialize_original_copy_tables):Use new type-based pool allocator.
	(free_original_copy_tables) Likewise.
	(copy_original_table_clear) Likewise.
	(copy_original_table_set) Likewise.
---
 gcc/cfg.c | 17 +++++++----------
 1 file changed, 7 insertions(+), 10 deletions(-)

diff --git a/gcc/cfg.c b/gcc/cfg.c
index cdcc01c..ddfecdc 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -1066,18 +1066,16 @@ static hash_table<bb_copy_hasher> *bb_copy;
 
 /* And between loops and copies.  */
 static hash_table<bb_copy_hasher> *loop_copy;
-static alloc_pool original_copy_bb_pool;
-
+static pool_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
 
 /* Initialize the data structures to maintain mapping between blocks
    and its copies.  */
 void
 initialize_original_copy_tables (void)
 {
-  gcc_assert (!original_copy_bb_pool);
-  original_copy_bb_pool
-    = create_alloc_pool ("original_copy",
-			 sizeof (struct htab_bb_copy_original_entry), 10);
+
+  original_copy_bb_pool = new pool_allocator<htab_bb_copy_original_entry>
+    ("original_copy", 10);
   bb_original = new hash_table<bb_copy_hasher> (10);
   bb_copy = new hash_table<bb_copy_hasher> (10);
   loop_copy = new hash_table<bb_copy_hasher> (10);
@@ -1095,7 +1093,7 @@ free_original_copy_tables (void)
   bb_copy = NULL;
   delete loop_copy;
   loop_copy = NULL;
-  free_alloc_pool (original_copy_bb_pool);
+  delete original_copy_bb_pool;
   original_copy_bb_pool = NULL;
 }
 
@@ -1117,7 +1115,7 @@ copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
 
   elt = *slot;
   tab->clear_slot (slot);
-  pool_free (original_copy_bb_pool, elt);
+  original_copy_bb_pool->remove (elt);
 }
 
 /* Sets the value associated with OBJ in table TAB to VAL.
@@ -1137,8 +1135,7 @@ copy_original_table_set (hash_table<bb_copy_hasher> *tab,
   slot = tab->find_slot (&key, INSERT);
   if (!*slot)
     {
-      *slot = (struct htab_bb_copy_original_entry *)
-		pool_alloc (original_copy_bb_pool);
+      *slot = original_copy_bb_pool->allocate ();
       (*slot)->index1 = obj;
     }
   (*slot)->index2 = val;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 05/35] Change use to type-based pool allocator in ira-color.c.
  2015-05-27 17:59   ` Jeff Law
@ 2015-05-29 13:34     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:34 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 428 bytes --]

On 05/27/2015 07:51 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * ira-color.c (init_update_cost_records):Use new type-based pool allocator.
>>     (get_update_cost_record) Likewise.
>>     (free_update_cost_record_list) Likewise.
>>     (finish_update_cost_records) Likewise.
>>     (initiate_cost_update) Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0005-Change-use-to-type-based-pool-allocator-in-ira-color.patch --]
[-- Type: text/x-patch, Size: 3082 bytes --]

From b2142d4d3939a81405f562a0970f7223069f130f Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:46 +0200
Subject: [PATCH 05/32] Change use to type-based pool allocator in ira-color.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-color.c (init_update_cost_records):Use new type-based pool allocator.
	(get_update_cost_record) Likewise.
	(free_update_cost_record_list) Likewise.
	(finish_update_cost_records) Likewise.
	(initiate_cost_update) Likewise.
---
 gcc/ira-color.c | 34 ++++++++++++++++++++--------------
 1 file changed, 20 insertions(+), 14 deletions(-)

diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index 4750714..543440d 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -123,6 +123,21 @@ struct update_cost_record
   int divisor;
   /* Next record for given allocno.  */
   struct update_cost_record *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((update_cost_record *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<update_cost_record> pool;
 };
 
 /* To decrease footprint of ira_allocno structure we store all data
@@ -1166,16 +1181,8 @@ setup_profitable_hard_regs (void)
    allocnos.  */
 
 /* Pool for update cost records.  */
-static alloc_pool update_cost_record_pool;
-
-/* Initiate update cost records.  */
-static void
-init_update_cost_records (void)
-{
-  update_cost_record_pool
-    = create_alloc_pool ("update cost records",
-			 sizeof (struct update_cost_record), 100);
-}
+static pool_allocator<update_cost_record> update_cost_record_pool
+  ("update cost records", 100);
 
 /* Return new update cost record with given params.  */
 static struct update_cost_record *
@@ -1184,7 +1191,7 @@ get_update_cost_record (int hard_regno, int divisor,
 {
   struct update_cost_record *record;
 
-  record = (struct update_cost_record *) pool_alloc (update_cost_record_pool);
+  record = update_cost_record_pool.allocate ();
   record->hard_regno = hard_regno;
   record->divisor = divisor;
   record->next = next;
@@ -1200,7 +1207,7 @@ free_update_cost_record_list (struct update_cost_record *list)
   while (list != NULL)
     {
       next = list->next;
-      pool_free (update_cost_record_pool, list);
+      update_cost_record_pool.remove (list);
       list = next;
     }
 }
@@ -1209,7 +1216,7 @@ free_update_cost_record_list (struct update_cost_record *list)
 static void
 finish_update_cost_records (void)
 {
-  free_alloc_pool (update_cost_record_pool);
+  update_cost_record_pool.release ();
 }
 
 /* Array whose element value is TRUE if the corresponding hard
@@ -1264,7 +1271,6 @@ initiate_cost_update (void)
     = (struct update_cost_queue_elem *) ira_allocate (size);
   memset (update_cost_queue_elems, 0, size);
   update_cost_check = 0;
-  init_update_cost_records ();
 }
 
 /* Deallocate data used by function update_costs_from_copies.  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c.
  2015-05-27 17:53   ` Jeff Law
@ 2015-05-29 13:34     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:34 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 549 bytes --]

On 05/27/2015 07:47 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * lra-lives.c (free_live_range): Use new type-based pool allocator.
>>     (free_live_range_list) Likewise.
>>     (create_live_range) Likewise.
>>     (copy_live_range) Likewise.
>>     (lra_merge_live_ranges) Likewise.
>>     (remove_some_program_points_and_update_live_ranges) Likewise.
>>     (lra_live_ranges_init) Likewise.
>>     (lra_live_ranges_finish) Likewise.
> OK.
> jeff
>

v2.

[-- Attachment #2: 0003-Change-use-to-type-based-pool-allocator-in-lra-lives.patch --]
[-- Type: text/x-patch, Size: 5747 bytes --]

From f44535e764581a661d42b6d8a03bdf0de78d9789 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:44 +0200
Subject: [PATCH 03/32] Change use to type-based pool allocator in lra-lives.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* lra-lives.c (free_live_range): Use new type-based pool allocator.
	(free_live_range_list) Likewise.
	(create_live_range) Likewise.
	(copy_live_range) Likewise.
	(lra_merge_live_ranges) Likewise.
	(remove_some_program_points_and_update_live_ranges) Likewise.
	(lra_live_ranges_init) Likewise.
	(lra_live_ranges_finish) Likewise.
---
 gcc/lra-coalesce.c |  1 +
 gcc/lra-int.h      | 15 +++++++++++++++
 gcc/lra-lives.c    | 35 ++++++++++-------------------------
 gcc/lra-spills.c   |  1 +
 gcc/lra.c          |  1 +
 5 files changed, 28 insertions(+), 25 deletions(-)

diff --git a/gcc/lra-coalesce.c b/gcc/lra-coalesce.c
index 045691d..b385603 100644
--- a/gcc/lra-coalesce.c
+++ b/gcc/lra-coalesce.c
@@ -84,6 +84,7 @@ along with GCC; see the file COPYING3.	If not see
 #include "except.h"
 #include "timevar.h"
 #include "ira.h"
+#include "alloc-pool.h"
 #include "lra-int.h"
 #include "df.h"
 
diff --git a/gcc/lra-int.h b/gcc/lra-int.h
index 12923ee..42e4a54 100644
--- a/gcc/lra-int.h
+++ b/gcc/lra-int.h
@@ -54,6 +54,21 @@ struct lra_live_range
   lra_live_range_t next;
   /* Pointer to structures with the same start.	 */
   lra_live_range_t start_next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((lra_live_range *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<lra_live_range> pool;
 };
 
 typedef struct lra_copy *lra_copy_t;
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index 085411e..4dfe30f 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -121,14 +121,7 @@ static sparseset unused_set, dead_set;
 static bitmap_head temp_bitmap;
 
 /* Pool for pseudo live ranges.	 */
-static alloc_pool live_range_pool;
-
-/* Free live range LR.	*/
-static void
-free_live_range (lra_live_range_t lr)
-{
-  pool_free (live_range_pool, lr);
-}
+pool_allocator <lra_live_range> lra_live_range::pool ("live ranges", 100);
 
 /* Free live range list LR.  */
 static void
@@ -139,7 +132,7 @@ free_live_range_list (lra_live_range_t lr)
   while (lr != NULL)
     {
       next = lr->next;
-      free_live_range (lr);
+      delete lr;
       lr = next;
     }
 }
@@ -148,9 +141,7 @@ free_live_range_list (lra_live_range_t lr)
 static lra_live_range_t
 create_live_range (int regno, int start, int finish, lra_live_range_t next)
 {
-  lra_live_range_t p;
-
-  p = (lra_live_range_t) pool_alloc (live_range_pool);
+  lra_live_range_t p = new lra_live_range;
   p->regno = regno;
   p->start = start;
   p->finish = finish;
@@ -162,11 +153,7 @@ create_live_range (int regno, int start, int finish, lra_live_range_t next)
 static lra_live_range_t
 copy_live_range (lra_live_range_t r)
 {
-  lra_live_range_t p;
-
-  p = (lra_live_range_t) pool_alloc (live_range_pool);
-  *p = *r;
-  return p;
+  return new lra_live_range (*r);
 }
 
 /* Copy live range list given by its head R and return the result.  */
@@ -209,7 +196,7 @@ lra_merge_live_ranges (lra_live_range_t r1, lra_live_range_t r2)
 	  r1->start = r2->start;
 	  lra_live_range_t temp = r2;
 	  r2 = r2->next;
-	  pool_free (live_range_pool, temp);
+	  delete temp;
 	}
       else
 	{
@@ -480,7 +467,7 @@ live_con_fun_n (edge e)
   basic_block dest = e->dest;
   bitmap bb_liveout = df_get_live_out (bb);
   bitmap dest_livein = df_get_live_in (dest);
-  
+
   return bitmap_ior_and_compl_into (bb_liveout,
 				    dest_livein, &all_hard_regs_bitmap);
 }
@@ -1024,7 +1011,7 @@ process_bb_lives (basic_block bb, int &curr_point, bool dead_insn_p)
       if (sparseset_bit_p (pseudos_live_through_calls, j))
 	check_pseudos_live_through_calls (j);
     }
-  
+
   if (need_curr_point_incr)
     next_program_point (curr_point, freq);
 
@@ -1109,7 +1096,7 @@ remove_some_program_points_and_update_live_ranges (void)
 		}
 	      prev_r->start = r->start;
 	      prev_r->next = next_r;
-	      free_live_range (r);
+	      delete r;
 	    }
 	}
     }
@@ -1252,7 +1239,7 @@ lra_create_live_ranges_1 (bool all_p, bool dead_insn_p)
 	}
     }
   lra_free_copies ();
- 
+
   /* Under some circumstances, we can have functions without pseudo
      registers.  For such functions, lra_live_max_point will be 0,
      see e.g. PR55604, and there's nothing more to do for us here.  */
@@ -1380,8 +1367,6 @@ lra_clear_live_ranges (void)
 void
 lra_live_ranges_init (void)
 {
-  live_range_pool = create_alloc_pool ("live ranges",
-				       sizeof (struct lra_live_range), 100);
   bitmap_initialize (&temp_bitmap, &reg_obstack);
   initiate_live_solver ();
 }
@@ -1392,5 +1377,5 @@ lra_live_ranges_finish (void)
 {
   finish_live_solver ();
   bitmap_clear (&temp_bitmap);
-  free_alloc_pool (live_range_pool);
+  lra_live_range::pool.release ();
 }
diff --git a/gcc/lra-spills.c b/gcc/lra-spills.c
index 19ece20..caece9a 100644
--- a/gcc/lra-spills.c
+++ b/gcc/lra-spills.c
@@ -98,6 +98,7 @@ along with GCC; see the file COPYING3.	If not see
 #include "except.h"
 #include "timevar.h"
 #include "target.h"
+#include "alloc-pool.h"
 #include "lra-int.h"
 #include "ira.h"
 #include "df.h"
diff --git a/gcc/lra.c b/gcc/lra.c
index 7c33636..7440668 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -149,6 +149,7 @@ along with GCC; see the file COPYING3.	If not see
 #include "timevar.h"
 #include "target.h"
 #include "ira.h"
+#include "alloc-pool.h"
 #include "lra-int.h"
 #include "df.h"
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 07/35] Change use to type-based pool allocator in var-tracking.c.
  2015-05-27 14:20 ` [PATCH 07/35] Change use to type-based pool allocator in var-tracking.c mliska
@ 2015-05-29 13:34   ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:34 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 21706 bytes --]

On 05/27/2015 03:56 PM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* var-tracking.c (variable_htab_free):Use new type-based pool allocator.
> 	(attrs_list_clear) Likewise.
> 	(attrs_list_insert) Likewise.
> 	(attrs_list_copy) Likewise.
> 	(shared_hash_unshare) Likewise.
> 	(shared_hash_destroy) Likewise.
> 	(unshare_variable) Likewise.
> 	(var_reg_delete_and_set) Likewise.
> 	(var_reg_delete) Likewise.
> 	(var_regno_delete) Likewise.
> 	(drop_overlapping_mem_locs) Likewise.
> 	(variable_union) Likewise.
> 	(insert_into_intersection) Likewise.
> 	(canonicalize_values_star) Likewise.
> 	(variable_merge_over_cur) Likewise.
> 	(dataflow_set_merge) Likewise.
> 	(remove_duplicate_values) Likewise.
> 	(variable_post_merge_new_vals) Likewise.
> 	(dataflow_set_preserve_mem_locs) Likewise.
> 	(dataflow_set_remove_mem_locs) Likewise.
> 	(variable_from_dropped) Likewise.
> 	(variable_was_changed) Likewise.
> 	(set_slot_part) Likewise.
> 	(clobber_slot_part) Likewise.
> 	(delete_slot_part) Likewise.
> 	(loc_exp_insert_dep) Likewise.
> 	(notify_dependents_of_changed_value) Likewise.
> 	(emit_notes_for_differences_1) Likewise.
> 	(vt_emit_notes) Likewise.
> 	(vt_initialize) Likewise.
> 	(vt_finalize) Likewise.
> ---
>   gcc/var-tracking.c | 201 ++++++++++++++++++++++++++++++++---------------------
>   1 file changed, 122 insertions(+), 79 deletions(-)
>
> diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
> index 0db4358..f7afed1 100644
> --- a/gcc/var-tracking.c
> +++ b/gcc/var-tracking.c
> @@ -282,6 +282,21 @@ typedef struct attrs_def
>
>     /* Offset from start of DECL.  */
>     HOST_WIDE_INT offset;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((attrs_def *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<attrs_def> pool;
>   } *attrs;
>
>   /* Structure for chaining the locations.  */
> @@ -298,6 +313,21 @@ typedef struct location_chain_def
>
>     /* Initialized? */
>     enum var_init_status init;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((location_chain_def *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<location_chain_def> pool;
>   } *location_chain;
>
>   /* A vector of loc_exp_dep holds the active dependencies of a one-part
> @@ -315,6 +345,21 @@ typedef struct loc_exp_dep_s
>     /* A pointer to the pointer to this entry (head or prev's next) in
>        the doubly-linked list.  */
>     struct loc_exp_dep_s **pprev;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((loc_exp_dep_s *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<loc_exp_dep_s> pool;
>   } loc_exp_dep;
>
>
> @@ -554,6 +599,21 @@ typedef struct shared_hash_def
>
>     /* Actual hash table.  */
>     variable_table_type *htab;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((shared_hash_def *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<shared_hash_def> pool;
>   } *shared_hash;
>
>   /* Structure holding the IN or OUT set for a basic block.  */
> @@ -598,22 +658,28 @@ typedef struct variable_tracking_info_def
>   } *variable_tracking_info;
>
>   /* Alloc pool for struct attrs_def.  */
> -static alloc_pool attrs_pool;
> +pool_allocator<attrs_def> attrs_def::pool ("attrs_def pool", 1024);
>
>   /* Alloc pool for struct variable_def with MAX_VAR_PARTS entries.  */
> -static alloc_pool var_pool;
> +
> +static pool_allocator<variable_def> var_pool
> +  ("variable_def pool", 64,
> +   (MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
>
>   /* Alloc pool for struct variable_def with a single var_part entry.  */
> -static alloc_pool valvar_pool;
> +static pool_allocator<variable_def> valvar_pool
> +  ("small variable_def pool", 256);
>
>   /* Alloc pool for struct location_chain_def.  */
> -static alloc_pool loc_chain_pool;
> +pool_allocator<location_chain_def> location_chain_def::pool
> +  ("location_chain_def pool", 1024);
>
>   /* Alloc pool for struct shared_hash_def.  */
> -static alloc_pool shared_hash_pool;
> +pool_allocator<shared_hash_def> shared_hash_def::pool
> +  ("shared_hash_def pool", 256);
>
>   /* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables.  */
> -static alloc_pool loc_exp_dep_pool;
> +pool_allocator<loc_exp_dep> loc_exp_dep::pool ("loc_exp_dep pool", 64);
>
>   /* Changed variables, notes will be emitted for them.  */
>   static variable_table_type *changed_variables;
> @@ -784,7 +850,7 @@ stack_adjust_offset_pre_post (rtx pattern, HOST_WIDE_INT *pre,
>   	*post += INTVAL (XEXP (src, 1));
>         else
>   	*post -= INTVAL (XEXP (src, 1));
> -      return;	
> +      return;
>       }
>     HOST_WIDE_INT res[2] = { 0, 0 };
>     for_each_inc_dec (pattern, stack_adjust_offset_pre_post_cb, res);
> @@ -1374,7 +1440,7 @@ dv_onepart_p (decl_or_value dv)
>   }
>
>   /* Return the variable pool to be used for a dv of type ONEPART.  */
> -static inline alloc_pool
> +static inline pool_allocator <variable_def> &
>   onepart_pool (onepart_enum_t onepart)
>   {
>     return onepart ? valvar_pool : var_pool;
> @@ -1457,7 +1523,7 @@ variable_htab_free (void *elem)
>         for (node = var->var_part[i].loc_chain; node; node = next)
>   	{
>   	  next = node->next;
> -	  pool_free (loc_chain_pool, node);
> +	  delete node;
>   	}
>         var->var_part[i].loc_chain = NULL;
>       }
> @@ -1472,7 +1538,7 @@ variable_htab_free (void *elem)
>         if (var->onepart == ONEPART_DEXPR)
>   	set_dv_changed (var->dv, true);
>       }
> -  pool_free (onepart_pool (var->onepart), var);
> +  onepart_pool (var->onepart).remove (var);
>   }
>
>   /* Initialize the set (array) SET of attrs to empty lists.  */
> @@ -1496,7 +1562,7 @@ attrs_list_clear (attrs *listp)
>     for (list = *listp; list; list = next)
>       {
>         next = list->next;
> -      pool_free (attrs_pool, list);
> +      delete list;
>       }
>     *listp = NULL;
>   }
> @@ -1518,9 +1584,7 @@ static void
>   attrs_list_insert (attrs *listp, decl_or_value dv,
>   		   HOST_WIDE_INT offset, rtx loc)
>   {
> -  attrs list;
> -
> -  list = (attrs) pool_alloc (attrs_pool);
> +  attrs list = new attrs_def;
>     list->loc = loc;
>     list->dv = dv;
>     list->offset = offset;
> @@ -1533,12 +1597,10 @@ attrs_list_insert (attrs *listp, decl_or_value dv,
>   static void
>   attrs_list_copy (attrs *dstp, attrs src)
>   {
> -  attrs n;
> -
>     attrs_list_clear (dstp);
>     for (; src; src = src->next)
>       {
> -      n = (attrs) pool_alloc (attrs_pool);
> +      attrs n = new attrs_def;
>         n->loc = src->loc;
>         n->dv = src->dv;
>         n->offset = src->offset;
> @@ -1612,7 +1674,7 @@ shared_var_p (variable var, shared_hash vars)
>   static shared_hash
>   shared_hash_unshare (shared_hash vars)
>   {
> -  shared_hash new_vars = (shared_hash) pool_alloc (shared_hash_pool);
> +  shared_hash new_vars = new shared_hash_def;
>     gcc_assert (vars->refcount > 1);
>     new_vars->refcount = 1;
>     new_vars->htab = new variable_table_type (vars->htab->elements () + 3);
> @@ -1640,7 +1702,7 @@ shared_hash_destroy (shared_hash vars)
>     if (--vars->refcount == 0)
>       {
>         delete vars->htab;
> -      pool_free (shared_hash_pool, vars);
> +      delete vars;
>       }
>   }
>
> @@ -1738,7 +1800,7 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var,
>     variable new_var;
>     int i;
>
> -  new_var = (variable) pool_alloc (onepart_pool (var->onepart));
> +  new_var = onepart_pool (var->onepart).allocate ();
>     new_var->dv = var->dv;
>     new_var->refcount = 1;
>     var->refcount--;
> @@ -1771,7 +1833,7 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var,
>   	{
>   	  location_chain new_lc;
>
> -	  new_lc = (location_chain) pool_alloc (loc_chain_pool);
> +	  new_lc = new location_chain_def;
>   	  new_lc->next = NULL;
>   	  if (node->init > initialized)
>   	    new_lc->init = node->init;
> @@ -1936,7 +1998,7 @@ var_reg_delete_and_set (dataflow_set *set, rtx loc, bool modify,
>         if (dv_as_opaque (node->dv) != decl || node->offset != offset)
>   	{
>   	  delete_variable_part (set, node->loc, node->dv, node->offset);
> -	  pool_free (attrs_pool, node);
> +	  delete node;
>   	  *nextp = next;
>   	}
>         else
> @@ -1977,7 +2039,7 @@ var_reg_delete (dataflow_set *set, rtx loc, bool clobber)
>         if (clobber || !dv_onepart_p (node->dv))
>   	{
>   	  delete_variable_part (set, node->loc, node->dv, node->offset);
> -	  pool_free (attrs_pool, node);
> +	  delete node;
>   	  *nextp = next;
>   	}
>         else
> @@ -1997,7 +2059,7 @@ var_regno_delete (dataflow_set *set, int regno)
>       {
>         next = node->next;
>         delete_variable_part (set, node->loc, node->dv, node->offset);
> -      pool_free (attrs_pool, node);
> +      delete node;
>       }
>     *reg = NULL;
>   }
> @@ -2047,7 +2109,7 @@ get_addr_from_global_cache (rtx const loc)
>     rtx x;
>
>     gcc_checking_assert (GET_CODE (loc) == VALUE);
> -
> +
>     bool existed;
>     rtx *slot = &global_get_addr_cache->get_or_insert (loc, &existed);
>     if (existed)
> @@ -2085,14 +2147,14 @@ get_addr_from_local_cache (dataflow_set *set, rtx const loc)
>     location_chain l;
>
>     gcc_checking_assert (GET_CODE (loc) == VALUE);
> -
> +
>     bool existed;
>     rtx *slot = &local_get_addr_cache->get_or_insert (loc, &existed);
>     if (existed)
>       return *slot;
>
>     x = get_addr_from_global_cache (loc);
> -
> +
>     /* Tentative, avoiding infinite recursion.  */
>     *slot = x;
>
> @@ -2304,7 +2366,7 @@ drop_overlapping_mem_locs (variable_def **slot, overlapping_mems *coms)
>   	      if (VAR_LOC_1PAUX (var))
>   		VAR_LOC_FROM (var) = NULL;
>   	    }
> -	  pool_free (loc_chain_pool, loc);
> +	  delete loc;
>   	}
>
>         if (!var->var_part[0].loc_chain)
> @@ -2538,7 +2600,7 @@ val_reset (dataflow_set *set, decl_or_value dv)
>     if (var->onepart == ONEPART_VALUE)
>       {
>         rtx x = dv_as_value (dv);
> -
> +
>         /* Relationships in the global cache don't change, so reset the
>   	 local cache entry only.  */
>         rtx *slot = local_get_addr_cache->get (x);
> @@ -2807,7 +2869,7 @@ variable_union (variable src, dataflow_set *set)
>   		  goto restart_onepart_unshared;
>   		}
>
> -	      *nodep = nnode = (location_chain) pool_alloc (loc_chain_pool);
> +	      *nodep = nnode = new location_chain_def;
>   	      nnode->loc = snode->loc;
>   	      nnode->init = snode->init;
>   	      if (!snode->set_src || MEM_P (snode->set_src))
> @@ -2927,7 +2989,7 @@ variable_union (variable src, dataflow_set *set)
>   		    location_chain new_node;
>
>   		    /* Copy the location from SRC.  */
> -		    new_node = (location_chain) pool_alloc (loc_chain_pool);
> +		    new_node = new location_chain_def;
>   		    new_node->loc = node->loc;
>   		    new_node->init = node->init;
>   		    if (!node->set_src || MEM_P (node->set_src))
> @@ -2982,7 +3044,7 @@ variable_union (variable src, dataflow_set *set)
>   		      location_chain new_node;
>
>   		      /* Copy the location from SRC.  */
> -		      new_node = (location_chain) pool_alloc (loc_chain_pool);
> +		      new_node = new location_chain_def;
>   		      new_node->loc = node->loc;
>   		      new_node->init = node->init;
>   		      if (!node->set_src || MEM_P (node->set_src))
> @@ -3078,7 +3140,7 @@ variable_union (variable src, dataflow_set *set)
>   	    {
>   	      location_chain new_lc;
>
> -	      new_lc = (location_chain) pool_alloc (loc_chain_pool);
> +	      new_lc = new location_chain_def;
>   	      new_lc->next = NULL;
>   	      new_lc->init = node->init;
>   	      if (!node->set_src || MEM_P (node->set_src))
> @@ -3296,7 +3358,7 @@ insert_into_intersection (location_chain *nodep, rtx loc,
>       else if (r > 0)
>         break;
>
> -  node = (location_chain) pool_alloc (loc_chain_pool);
> +  node = new location_chain_def;
>
>     node->loc = loc;
>     node->set_src = NULL;
> @@ -3817,7 +3879,7 @@ canonicalize_values_star (variable_def **slot, dataflow_set *set)
>   		    if (dv_as_opaque (list->dv) == dv_as_opaque (cdv))
>   		      {
>   			*listp = list->next;
> -			pool_free (attrs_pool, list);
> +			delete list;
>   			list = *listp;
>   			break;
>   		      }
> @@ -3835,7 +3897,7 @@ canonicalize_values_star (variable_def **slot, dataflow_set *set)
>   		    if (dv_as_opaque (list->dv) == dv_as_opaque (dv))
>   		      {
>   			*listp = list->next;
> -			pool_free (attrs_pool, list);
> +			delete list;
>   			list = *listp;
>   			break;
>   		      }
> @@ -4016,7 +4078,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
>   	{
>   	  if (node)
>   	    {
> -	      dvar = (variable) pool_alloc (onepart_pool (onepart));
> +	      dvar = onepart_pool (onepart).allocate ();
>   	      dvar->dv = dv;
>   	      dvar->refcount = 1;
>   	      dvar->n_var_parts = 1;
> @@ -4152,8 +4214,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
>   							  INSERT);
>   		  if (!*slot)
>   		    {
> -		      variable var = (variable) pool_alloc (onepart_pool
> -							    (ONEPART_VALUE));
> +		      variable var = onepart_pool (ONEPART_VALUE).allocate ();
>   		      var->dv = dv;
>   		      var->refcount = 1;
>   		      var->n_var_parts = 1;
> @@ -4240,7 +4301,7 @@ dataflow_set_merge (dataflow_set *dst, dataflow_set *src2)
>     dataflow_set_init (dst);
>     dst->stack_adjust = cur.stack_adjust;
>     shared_hash_destroy (dst->vars);
> -  dst->vars = (shared_hash) pool_alloc (shared_hash_pool);
> +  dst->vars = new shared_hash_def;
>     dst->vars->refcount = 1;
>     dst->vars->htab = new variable_table_type (MAX (src1_elems, src2_elems));
>
> @@ -4366,7 +4427,7 @@ remove_duplicate_values (variable var)
>   	    {
>   	      /* Remove duplicate value node.  */
>   	      *nodep = node->next;
> -	      pool_free (loc_chain_pool, node);
> +	      delete node;
>   	      continue;
>   	    }
>   	  else
> @@ -4519,7 +4580,7 @@ variable_post_merge_new_vals (variable_def **slot, dfset_post_merge *dfpm)
>   		 to be added when we bring perm in.  */
>   	      att = *curp;
>   	      *curp = att->next;
> -	      pool_free (attrs_pool, att);
> +	      delete att;
>   	    }
>   	}
>
> @@ -4779,7 +4840,7 @@ dataflow_set_preserve_mem_locs (variable_def **slot, dataflow_set *set)
>   		}
>   	    }
>   	  *locp = loc->next;
> -	  pool_free (loc_chain_pool, loc);
> +	  delete loc;
>   	}
>
>         if (!var->var_part[0].loc_chain)
> @@ -4851,7 +4912,7 @@ dataflow_set_remove_mem_locs (variable_def **slot, dataflow_set *set)
>   	      if (VAR_LOC_1PAUX (var))
>   		VAR_LOC_FROM (var) = NULL;
>   	    }
> -	  pool_free (loc_chain_pool, loc);
> +	  delete loc;
>   	}
>
>         if (!var->var_part[0].loc_chain)
> @@ -7302,7 +7363,7 @@ variable_from_dropped (decl_or_value dv, enum insert_option insert)
>
>     gcc_checking_assert (onepart == ONEPART_VALUE || onepart == ONEPART_DEXPR);
>
> -  empty_var = (variable) pool_alloc (onepart_pool (onepart));
> +  empty_var = onepart_pool (onepart).allocate ();
>     empty_var->dv = dv;
>     empty_var->refcount = 1;
>     empty_var->n_var_parts = 0;
> @@ -7406,7 +7467,7 @@ variable_was_changed (variable var, dataflow_set *set)
>
>   	  if (!empty_var)
>   	    {
> -	      empty_var = (variable) pool_alloc (onepart_pool (onepart));
> +	      empty_var = onepart_pool (onepart).allocate ();
>   	      empty_var->dv = var->dv;
>   	      empty_var->refcount = 1;
>   	      empty_var->n_var_parts = 0;
> @@ -7530,7 +7591,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
>     if (!var)
>       {
>         /* Create new variable information.  */
> -      var = (variable) pool_alloc (onepart_pool (onepart));
> +      var = onepart_pool (onepart).allocate ();
>         var->dv = dv;
>         var->refcount = 1;
>         var->n_var_parts = 1;
> @@ -7725,7 +7786,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
>   		set_src = node->set_src;
>   	      if (var->var_part[pos].cur_loc == node->loc)
>   		var->var_part[pos].cur_loc = NULL;
> -	      pool_free (loc_chain_pool, node);
> +	      delete node;
>   	      *nextp = next;
>   	      break;
>   	    }
> @@ -7737,7 +7798,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
>       }
>
>     /* Add the location to the beginning.  */
> -  node = (location_chain) pool_alloc (loc_chain_pool);
> +  node = new location_chain_def;
>     node->loc = loc;
>     node->init = initialized;
>     node->set_src = set_src;
> @@ -7819,7 +7880,7 @@ clobber_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
>   		      if (dv_as_opaque (anode->dv) == dv_as_opaque (var->dv)
>   			  && anode->offset == offset)
>   			{
> -			  pool_free (attrs_pool, anode);
> +			  delete anode;
>   			  *anextp = anext;
>   			}
>   		      else
> @@ -7919,7 +7980,7 @@ delete_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
>   		  if (pos == 0 && var->onepart && VAR_LOC_1PAUX (var))
>   		    VAR_LOC_FROM (var) = NULL;
>   		}
> -	      pool_free (loc_chain_pool, node);
> +	      delete node;
>   	      *nextp = next;
>   	      break;
>   	    }
> @@ -8080,7 +8141,7 @@ loc_exp_insert_dep (variable var, rtx x, variable_table_type *vars)
>       return;
>
>     if (var->onepart == NOT_ONEPART)
> -    led = (loc_exp_dep *) pool_alloc (loc_exp_dep_pool);
> +    led = new loc_exp_dep;
>     else
>       {
>         loc_exp_dep empty;
> @@ -8888,7 +8949,7 @@ notify_dependents_of_changed_value (rtx val, variable_table_type *htab,
>   	  break;
>
>   	case NOT_ONEPART:
> -	  pool_free (loc_exp_dep_pool, led);
> +	  delete led;
>   	  ivar = htab->find_with_hash (ldv, dv_htab_hash (ldv));
>   	  if (ivar)
>   	    {
> @@ -9010,7 +9071,7 @@ emit_notes_for_differences_1 (variable_def **slot, variable_table_type *new_vars
>
>         if (!empty_var)
>   	{
> -	  empty_var = (variable) pool_alloc (onepart_pool (old_var->onepart));
> +	  empty_var = onepart_pool (old_var->onepart).allocate ();
>   	  empty_var->dv = old_var->dv;
>   	  empty_var->refcount = 0;
>   	  empty_var->n_var_parts = 0;
> @@ -9451,8 +9512,6 @@ vt_emit_notes (void)
>     if (MAY_HAVE_DEBUG_INSNS)
>       {
>         dropped_values = new variable_table_type (cselib_get_next_uid () * 2);
> -      loc_exp_dep_pool = create_alloc_pool ("loc_exp_dep pool",
> -					    sizeof (loc_exp_dep), 64);
>       }
>
>     dataflow_set_init (&cur);
> @@ -9871,18 +9930,7 @@ vt_initialize (void)
>
>     alloc_aux_for_blocks (sizeof (struct variable_tracking_info_def));
>
> -  attrs_pool = create_alloc_pool ("attrs_def pool",
> -				  sizeof (struct attrs_def), 1024);
> -  var_pool = create_alloc_pool ("variable_def pool",
> -				sizeof (struct variable_def)
> -				+ (MAX_VAR_PARTS - 1)
> -				* sizeof (((variable)NULL)->var_part[0]), 64);
> -  loc_chain_pool = create_alloc_pool ("location_chain_def pool",
> -				      sizeof (struct location_chain_def),
> -				      1024);
> -  shared_hash_pool = create_alloc_pool ("shared_hash_def pool",
> -					sizeof (struct shared_hash_def), 256);
> -  empty_shared_hash = (shared_hash) pool_alloc (shared_hash_pool);
> +  empty_shared_hash = new shared_hash_def;
>     empty_shared_hash->refcount = 1;
>     empty_shared_hash->htab = new variable_table_type (1);
>     changed_variables = new variable_table_type (10);
> @@ -9901,15 +9949,12 @@ vt_initialize (void)
>       {
>         cselib_init (CSELIB_RECORD_MEMORY | CSELIB_PRESERVE_CONSTANTS);
>         scratch_regs = BITMAP_ALLOC (NULL);
> -      valvar_pool = create_alloc_pool ("small variable_def pool",
> -				       sizeof (struct variable_def), 256);
>         preserved_values.create (256);
>         global_get_addr_cache = new hash_map<rtx, rtx>;
>       }
>     else
>       {
>         scratch_regs = NULL;
> -      valvar_pool = NULL;
>         global_get_addr_cache = NULL;
>       }
>
> @@ -10243,20 +10288,18 @@ vt_finalize (void)
>     empty_shared_hash->htab = NULL;
>     delete changed_variables;
>     changed_variables = NULL;
> -  free_alloc_pool (attrs_pool);
> -  free_alloc_pool (var_pool);
> -  free_alloc_pool (loc_chain_pool);
> -  free_alloc_pool (shared_hash_pool);
> +  attrs_def::pool.release ();
> +  var_pool.release ();
> +  location_chain_def::pool.release ();
> +  shared_hash_def::pool.release ();
>
>     if (MAY_HAVE_DEBUG_INSNS)
>       {
>         if (global_get_addr_cache)
>   	delete global_get_addr_cache;
>         global_get_addr_cache = NULL;
> -      if (loc_exp_dep_pool)
> -	free_alloc_pool (loc_exp_dep_pool);
> -      loc_exp_dep_pool = NULL;
> -      free_alloc_pool (valvar_pool);
> +      loc_exp_dep::pool.release ();
> +      valvar_pool.release ();
>         preserved_values.release ();
>         cselib_finish ();
>         BITMAP_FREE (scratch_regs);
>

v2

[-- Attachment #2: 0006-Change-use-to-type-based-pool-allocator-in-var-track.patch --]
[-- Type: text/x-patch, Size: 20453 bytes --]

From 97a6b142995428ab6e9259082c1c572bbb584bf1 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:46 +0200
Subject: [PATCH 06/32] Change use to type-based pool allocator in
 var-tracking.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* var-tracking.c (variable_htab_free):Use new type-based pool allocator.
	(attrs_list_clear) Likewise.
	(attrs_list_insert) Likewise.
	(attrs_list_copy) Likewise.
	(shared_hash_unshare) Likewise.
	(shared_hash_destroy) Likewise.
	(unshare_variable) Likewise.
	(var_reg_delete_and_set) Likewise.
	(var_reg_delete) Likewise.
	(var_regno_delete) Likewise.
	(drop_overlapping_mem_locs) Likewise.
	(variable_union) Likewise.
	(insert_into_intersection) Likewise.
	(canonicalize_values_star) Likewise.
	(variable_merge_over_cur) Likewise.
	(dataflow_set_merge) Likewise.
	(remove_duplicate_values) Likewise.
	(variable_post_merge_new_vals) Likewise.
	(dataflow_set_preserve_mem_locs) Likewise.
	(dataflow_set_remove_mem_locs) Likewise.
	(variable_from_dropped) Likewise.
	(variable_was_changed) Likewise.
	(set_slot_part) Likewise.
	(clobber_slot_part) Likewise.
	(delete_slot_part) Likewise.
	(loc_exp_insert_dep) Likewise.
	(notify_dependents_of_changed_value) Likewise.
	(emit_notes_for_differences_1) Likewise.
	(vt_emit_notes) Likewise.
	(vt_initialize) Likewise.
	(vt_finalize) Likewise.
---
 gcc/var-tracking.c | 201 ++++++++++++++++++++++++++++++++---------------------
 1 file changed, 122 insertions(+), 79 deletions(-)

diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 0db4358..0b24007 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -282,6 +282,21 @@ typedef struct attrs_def
 
   /* Offset from start of DECL.  */
   HOST_WIDE_INT offset;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((attrs_def *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<attrs_def> pool;
 } *attrs;
 
 /* Structure for chaining the locations.  */
@@ -298,6 +313,21 @@ typedef struct location_chain_def
 
   /* Initialized? */
   enum var_init_status init;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((location_chain_def *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<location_chain_def> pool;
 } *location_chain;
 
 /* A vector of loc_exp_dep holds the active dependencies of a one-part
@@ -315,6 +345,21 @@ typedef struct loc_exp_dep_s
   /* A pointer to the pointer to this entry (head or prev's next) in
      the doubly-linked list.  */
   struct loc_exp_dep_s **pprev;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((loc_exp_dep_s *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<loc_exp_dep_s> pool;
 } loc_exp_dep;
 
 
@@ -554,6 +599,21 @@ typedef struct shared_hash_def
 
   /* Actual hash table.  */
   variable_table_type *htab;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((shared_hash_def *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<shared_hash_def> pool;
 } *shared_hash;
 
 /* Structure holding the IN or OUT set for a basic block.  */
@@ -598,22 +658,28 @@ typedef struct variable_tracking_info_def
 } *variable_tracking_info;
 
 /* Alloc pool for struct attrs_def.  */
-static alloc_pool attrs_pool;
+pool_allocator<attrs_def> attrs_def::pool ("attrs_def pool", 1024);
 
 /* Alloc pool for struct variable_def with MAX_VAR_PARTS entries.  */
-static alloc_pool var_pool;
+
+static pool_allocator<variable_def> var_pool
+  ("variable_def pool", 64,
+   (MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
 
 /* Alloc pool for struct variable_def with a single var_part entry.  */
-static alloc_pool valvar_pool;
+static pool_allocator<variable_def> valvar_pool
+  ("small variable_def pool", 256);
 
 /* Alloc pool for struct location_chain_def.  */
-static alloc_pool loc_chain_pool;
+pool_allocator<location_chain_def> location_chain_def::pool
+  ("location_chain_def pool", 1024);
 
 /* Alloc pool for struct shared_hash_def.  */
-static alloc_pool shared_hash_pool;
+pool_allocator<shared_hash_def> shared_hash_def::pool
+  ("shared_hash_def pool", 256);
 
 /* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables.  */
-static alloc_pool loc_exp_dep_pool;
+pool_allocator<loc_exp_dep> loc_exp_dep::pool ("loc_exp_dep pool", 64);
 
 /* Changed variables, notes will be emitted for them.  */
 static variable_table_type *changed_variables;
@@ -784,7 +850,7 @@ stack_adjust_offset_pre_post (rtx pattern, HOST_WIDE_INT *pre,
 	*post += INTVAL (XEXP (src, 1));
       else
 	*post -= INTVAL (XEXP (src, 1));
-      return;	
+      return;
     }
   HOST_WIDE_INT res[2] = { 0, 0 };
   for_each_inc_dec (pattern, stack_adjust_offset_pre_post_cb, res);
@@ -1374,7 +1440,7 @@ dv_onepart_p (decl_or_value dv)
 }
 
 /* Return the variable pool to be used for a dv of type ONEPART.  */
-static inline alloc_pool
+static inline pool_allocator <variable_def> &
 onepart_pool (onepart_enum_t onepart)
 {
   return onepart ? valvar_pool : var_pool;
@@ -1457,7 +1523,7 @@ variable_htab_free (void *elem)
       for (node = var->var_part[i].loc_chain; node; node = next)
 	{
 	  next = node->next;
-	  pool_free (loc_chain_pool, node);
+	  delete node;
 	}
       var->var_part[i].loc_chain = NULL;
     }
@@ -1472,7 +1538,7 @@ variable_htab_free (void *elem)
       if (var->onepart == ONEPART_DEXPR)
 	set_dv_changed (var->dv, true);
     }
-  pool_free (onepart_pool (var->onepart), var);
+  onepart_pool (var->onepart).remove (var);
 }
 
 /* Initialize the set (array) SET of attrs to empty lists.  */
@@ -1496,7 +1562,7 @@ attrs_list_clear (attrs *listp)
   for (list = *listp; list; list = next)
     {
       next = list->next;
-      pool_free (attrs_pool, list);
+      delete list;
     }
   *listp = NULL;
 }
@@ -1518,9 +1584,7 @@ static void
 attrs_list_insert (attrs *listp, decl_or_value dv,
 		   HOST_WIDE_INT offset, rtx loc)
 {
-  attrs list;
-
-  list = (attrs) pool_alloc (attrs_pool);
+  attrs list = new attrs_def;
   list->loc = loc;
   list->dv = dv;
   list->offset = offset;
@@ -1533,12 +1597,10 @@ attrs_list_insert (attrs *listp, decl_or_value dv,
 static void
 attrs_list_copy (attrs *dstp, attrs src)
 {
-  attrs n;
-
   attrs_list_clear (dstp);
   for (; src; src = src->next)
     {
-      n = (attrs) pool_alloc (attrs_pool);
+      attrs n = new attrs_def;
       n->loc = src->loc;
       n->dv = src->dv;
       n->offset = src->offset;
@@ -1612,7 +1674,7 @@ shared_var_p (variable var, shared_hash vars)
 static shared_hash
 shared_hash_unshare (shared_hash vars)
 {
-  shared_hash new_vars = (shared_hash) pool_alloc (shared_hash_pool);
+  shared_hash new_vars = new shared_hash_def;
   gcc_assert (vars->refcount > 1);
   new_vars->refcount = 1;
   new_vars->htab = new variable_table_type (vars->htab->elements () + 3);
@@ -1640,7 +1702,7 @@ shared_hash_destroy (shared_hash vars)
   if (--vars->refcount == 0)
     {
       delete vars->htab;
-      pool_free (shared_hash_pool, vars);
+      delete vars;
     }
 }
 
@@ -1738,7 +1800,7 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var,
   variable new_var;
   int i;
 
-  new_var = (variable) pool_alloc (onepart_pool (var->onepart));
+  new_var = onepart_pool (var->onepart).allocate ();
   new_var->dv = var->dv;
   new_var->refcount = 1;
   var->refcount--;
@@ -1771,7 +1833,7 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var,
 	{
 	  location_chain new_lc;
 
-	  new_lc = (location_chain) pool_alloc (loc_chain_pool);
+	  new_lc = new location_chain_def;
 	  new_lc->next = NULL;
 	  if (node->init > initialized)
 	    new_lc->init = node->init;
@@ -1936,7 +1998,7 @@ var_reg_delete_and_set (dataflow_set *set, rtx loc, bool modify,
       if (dv_as_opaque (node->dv) != decl || node->offset != offset)
 	{
 	  delete_variable_part (set, node->loc, node->dv, node->offset);
-	  pool_free (attrs_pool, node);
+	  delete node;
 	  *nextp = next;
 	}
       else
@@ -1977,7 +2039,7 @@ var_reg_delete (dataflow_set *set, rtx loc, bool clobber)
       if (clobber || !dv_onepart_p (node->dv))
 	{
 	  delete_variable_part (set, node->loc, node->dv, node->offset);
-	  pool_free (attrs_pool, node);
+	  delete node;
 	  *nextp = next;
 	}
       else
@@ -1997,7 +2059,7 @@ var_regno_delete (dataflow_set *set, int regno)
     {
       next = node->next;
       delete_variable_part (set, node->loc, node->dv, node->offset);
-      pool_free (attrs_pool, node);
+      delete node;
     }
   *reg = NULL;
 }
@@ -2047,7 +2109,7 @@ get_addr_from_global_cache (rtx const loc)
   rtx x;
 
   gcc_checking_assert (GET_CODE (loc) == VALUE);
-  
+
   bool existed;
   rtx *slot = &global_get_addr_cache->get_or_insert (loc, &existed);
   if (existed)
@@ -2085,14 +2147,14 @@ get_addr_from_local_cache (dataflow_set *set, rtx const loc)
   location_chain l;
 
   gcc_checking_assert (GET_CODE (loc) == VALUE);
-  
+
   bool existed;
   rtx *slot = &local_get_addr_cache->get_or_insert (loc, &existed);
   if (existed)
     return *slot;
 
   x = get_addr_from_global_cache (loc);
-  
+
   /* Tentative, avoiding infinite recursion.  */
   *slot = x;
 
@@ -2304,7 +2366,7 @@ drop_overlapping_mem_locs (variable_def **slot, overlapping_mems *coms)
 	      if (VAR_LOC_1PAUX (var))
 		VAR_LOC_FROM (var) = NULL;
 	    }
-	  pool_free (loc_chain_pool, loc);
+	  delete loc;
 	}
 
       if (!var->var_part[0].loc_chain)
@@ -2538,7 +2600,7 @@ val_reset (dataflow_set *set, decl_or_value dv)
   if (var->onepart == ONEPART_VALUE)
     {
       rtx x = dv_as_value (dv);
-      
+
       /* Relationships in the global cache don't change, so reset the
 	 local cache entry only.  */
       rtx *slot = local_get_addr_cache->get (x);
@@ -2807,7 +2869,7 @@ variable_union (variable src, dataflow_set *set)
 		  goto restart_onepart_unshared;
 		}
 
-	      *nodep = nnode = (location_chain) pool_alloc (loc_chain_pool);
+	      *nodep = nnode = new location_chain_def;
 	      nnode->loc = snode->loc;
 	      nnode->init = snode->init;
 	      if (!snode->set_src || MEM_P (snode->set_src))
@@ -2927,7 +2989,7 @@ variable_union (variable src, dataflow_set *set)
 		    location_chain new_node;
 
 		    /* Copy the location from SRC.  */
-		    new_node = (location_chain) pool_alloc (loc_chain_pool);
+		    new_node = new location_chain_def;
 		    new_node->loc = node->loc;
 		    new_node->init = node->init;
 		    if (!node->set_src || MEM_P (node->set_src))
@@ -2982,7 +3044,7 @@ variable_union (variable src, dataflow_set *set)
 		      location_chain new_node;
 
 		      /* Copy the location from SRC.  */
-		      new_node = (location_chain) pool_alloc (loc_chain_pool);
+		      new_node = new location_chain_def;
 		      new_node->loc = node->loc;
 		      new_node->init = node->init;
 		      if (!node->set_src || MEM_P (node->set_src))
@@ -3078,7 +3140,7 @@ variable_union (variable src, dataflow_set *set)
 	    {
 	      location_chain new_lc;
 
-	      new_lc = (location_chain) pool_alloc (loc_chain_pool);
+	      new_lc = new location_chain_def;
 	      new_lc->next = NULL;
 	      new_lc->init = node->init;
 	      if (!node->set_src || MEM_P (node->set_src))
@@ -3296,7 +3358,7 @@ insert_into_intersection (location_chain *nodep, rtx loc,
     else if (r > 0)
       break;
 
-  node = (location_chain) pool_alloc (loc_chain_pool);
+  node = new location_chain_def;
 
   node->loc = loc;
   node->set_src = NULL;
@@ -3817,7 +3879,7 @@ canonicalize_values_star (variable_def **slot, dataflow_set *set)
 		    if (dv_as_opaque (list->dv) == dv_as_opaque (cdv))
 		      {
 			*listp = list->next;
-			pool_free (attrs_pool, list);
+			delete list;
 			list = *listp;
 			break;
 		      }
@@ -3835,7 +3897,7 @@ canonicalize_values_star (variable_def **slot, dataflow_set *set)
 		    if (dv_as_opaque (list->dv) == dv_as_opaque (dv))
 		      {
 			*listp = list->next;
-			pool_free (attrs_pool, list);
+			delete list;
 			list = *listp;
 			break;
 		      }
@@ -4016,7 +4078,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
 	{
 	  if (node)
 	    {
-	      dvar = (variable) pool_alloc (onepart_pool (onepart));
+	      dvar = onepart_pool (onepart).allocate ();
 	      dvar->dv = dv;
 	      dvar->refcount = 1;
 	      dvar->n_var_parts = 1;
@@ -4152,8 +4214,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
 							  INSERT);
 		  if (!*slot)
 		    {
-		      variable var = (variable) pool_alloc (onepart_pool
-							    (ONEPART_VALUE));
+		      variable var = onepart_pool (ONEPART_VALUE).allocate ();
 		      var->dv = dv;
 		      var->refcount = 1;
 		      var->n_var_parts = 1;
@@ -4240,7 +4301,7 @@ dataflow_set_merge (dataflow_set *dst, dataflow_set *src2)
   dataflow_set_init (dst);
   dst->stack_adjust = cur.stack_adjust;
   shared_hash_destroy (dst->vars);
-  dst->vars = (shared_hash) pool_alloc (shared_hash_pool);
+  dst->vars = new shared_hash_def;
   dst->vars->refcount = 1;
   dst->vars->htab = new variable_table_type (MAX (src1_elems, src2_elems));
 
@@ -4366,7 +4427,7 @@ remove_duplicate_values (variable var)
 	    {
 	      /* Remove duplicate value node.  */
 	      *nodep = node->next;
-	      pool_free (loc_chain_pool, node);
+	      delete node;
 	      continue;
 	    }
 	  else
@@ -4519,7 +4580,7 @@ variable_post_merge_new_vals (variable_def **slot, dfset_post_merge *dfpm)
 		 to be added when we bring perm in.  */
 	      att = *curp;
 	      *curp = att->next;
-	      pool_free (attrs_pool, att);
+	      delete att;
 	    }
 	}
 
@@ -4779,7 +4840,7 @@ dataflow_set_preserve_mem_locs (variable_def **slot, dataflow_set *set)
 		}
 	    }
 	  *locp = loc->next;
-	  pool_free (loc_chain_pool, loc);
+	  delete loc;
 	}
 
       if (!var->var_part[0].loc_chain)
@@ -4851,7 +4912,7 @@ dataflow_set_remove_mem_locs (variable_def **slot, dataflow_set *set)
 	      if (VAR_LOC_1PAUX (var))
 		VAR_LOC_FROM (var) = NULL;
 	    }
-	  pool_free (loc_chain_pool, loc);
+	  delete loc;
 	}
 
       if (!var->var_part[0].loc_chain)
@@ -7302,7 +7363,7 @@ variable_from_dropped (decl_or_value dv, enum insert_option insert)
 
   gcc_checking_assert (onepart == ONEPART_VALUE || onepart == ONEPART_DEXPR);
 
-  empty_var = (variable) pool_alloc (onepart_pool (onepart));
+  empty_var = onepart_pool (onepart).allocate ();
   empty_var->dv = dv;
   empty_var->refcount = 1;
   empty_var->n_var_parts = 0;
@@ -7406,7 +7467,7 @@ variable_was_changed (variable var, dataflow_set *set)
 
 	  if (!empty_var)
 	    {
-	      empty_var = (variable) pool_alloc (onepart_pool (onepart));
+	      empty_var = onepart_pool (onepart).allocate ();
 	      empty_var->dv = var->dv;
 	      empty_var->refcount = 1;
 	      empty_var->n_var_parts = 0;
@@ -7530,7 +7591,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
   if (!var)
     {
       /* Create new variable information.  */
-      var = (variable) pool_alloc (onepart_pool (onepart));
+      var = onepart_pool (onepart).allocate ();
       var->dv = dv;
       var->refcount = 1;
       var->n_var_parts = 1;
@@ -7725,7 +7786,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
 		set_src = node->set_src;
 	      if (var->var_part[pos].cur_loc == node->loc)
 		var->var_part[pos].cur_loc = NULL;
-	      pool_free (loc_chain_pool, node);
+	      delete node;
 	      *nextp = next;
 	      break;
 	    }
@@ -7737,7 +7798,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
     }
 
   /* Add the location to the beginning.  */
-  node = (location_chain) pool_alloc (loc_chain_pool);
+  node = new location_chain_def;
   node->loc = loc;
   node->init = initialized;
   node->set_src = set_src;
@@ -7819,7 +7880,7 @@ clobber_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
 		      if (dv_as_opaque (anode->dv) == dv_as_opaque (var->dv)
 			  && anode->offset == offset)
 			{
-			  pool_free (attrs_pool, anode);
+			  delete anode;
 			  *anextp = anext;
 			}
 		      else
@@ -7919,7 +7980,7 @@ delete_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
 		  if (pos == 0 && var->onepart && VAR_LOC_1PAUX (var))
 		    VAR_LOC_FROM (var) = NULL;
 		}
-	      pool_free (loc_chain_pool, node);
+	      delete node;
 	      *nextp = next;
 	      break;
 	    }
@@ -8080,7 +8141,7 @@ loc_exp_insert_dep (variable var, rtx x, variable_table_type *vars)
     return;
 
   if (var->onepart == NOT_ONEPART)
-    led = (loc_exp_dep *) pool_alloc (loc_exp_dep_pool);
+    led = new loc_exp_dep;
   else
     {
       loc_exp_dep empty;
@@ -8888,7 +8949,7 @@ notify_dependents_of_changed_value (rtx val, variable_table_type *htab,
 	  break;
 
 	case NOT_ONEPART:
-	  pool_free (loc_exp_dep_pool, led);
+	  delete led;
 	  ivar = htab->find_with_hash (ldv, dv_htab_hash (ldv));
 	  if (ivar)
 	    {
@@ -9010,7 +9071,7 @@ emit_notes_for_differences_1 (variable_def **slot, variable_table_type *new_vars
 
       if (!empty_var)
 	{
-	  empty_var = (variable) pool_alloc (onepart_pool (old_var->onepart));
+	  empty_var = onepart_pool (old_var->onepart).allocate ();
 	  empty_var->dv = old_var->dv;
 	  empty_var->refcount = 0;
 	  empty_var->n_var_parts = 0;
@@ -9451,8 +9512,6 @@ vt_emit_notes (void)
   if (MAY_HAVE_DEBUG_INSNS)
     {
       dropped_values = new variable_table_type (cselib_get_next_uid () * 2);
-      loc_exp_dep_pool = create_alloc_pool ("loc_exp_dep pool",
-					    sizeof (loc_exp_dep), 64);
     }
 
   dataflow_set_init (&cur);
@@ -9871,18 +9930,7 @@ vt_initialize (void)
 
   alloc_aux_for_blocks (sizeof (struct variable_tracking_info_def));
 
-  attrs_pool = create_alloc_pool ("attrs_def pool",
-				  sizeof (struct attrs_def), 1024);
-  var_pool = create_alloc_pool ("variable_def pool",
-				sizeof (struct variable_def)
-				+ (MAX_VAR_PARTS - 1)
-				* sizeof (((variable)NULL)->var_part[0]), 64);
-  loc_chain_pool = create_alloc_pool ("location_chain_def pool",
-				      sizeof (struct location_chain_def),
-				      1024);
-  shared_hash_pool = create_alloc_pool ("shared_hash_def pool",
-					sizeof (struct shared_hash_def), 256);
-  empty_shared_hash = (shared_hash) pool_alloc (shared_hash_pool);
+  empty_shared_hash = new shared_hash_def;
   empty_shared_hash->refcount = 1;
   empty_shared_hash->htab = new variable_table_type (1);
   changed_variables = new variable_table_type (10);
@@ -9901,15 +9949,12 @@ vt_initialize (void)
     {
       cselib_init (CSELIB_RECORD_MEMORY | CSELIB_PRESERVE_CONSTANTS);
       scratch_regs = BITMAP_ALLOC (NULL);
-      valvar_pool = create_alloc_pool ("small variable_def pool",
-				       sizeof (struct variable_def), 256);
       preserved_values.create (256);
       global_get_addr_cache = new hash_map<rtx, rtx>;
     }
   else
     {
       scratch_regs = NULL;
-      valvar_pool = NULL;
       global_get_addr_cache = NULL;
     }
 
@@ -10243,20 +10288,18 @@ vt_finalize (void)
   empty_shared_hash->htab = NULL;
   delete changed_variables;
   changed_variables = NULL;
-  free_alloc_pool (attrs_pool);
-  free_alloc_pool (var_pool);
-  free_alloc_pool (loc_chain_pool);
-  free_alloc_pool (shared_hash_pool);
+  attrs_def::pool.release ();
+  var_pool.release ();
+  location_chain_def::pool.release ();
+  shared_hash_def::pool.release ();
 
   if (MAY_HAVE_DEBUG_INSNS)
     {
       if (global_get_addr_cache)
 	delete global_get_addr_cache;
       global_get_addr_cache = NULL;
-      if (loc_exp_dep_pool)
-	free_alloc_pool (loc_exp_dep_pool);
-      loc_exp_dep_pool = NULL;
-      free_alloc_pool (valvar_pool);
+      loc_exp_dep::pool.release ();
+      valvar_pool.release ();
       preserved_values.release ();
       cselib_finish ();
       BITMAP_FREE (scratch_regs);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 09/35] Change use to type-based pool allocator in c-format.c.
  2015-05-27 18:01   ` Jeff Law
@ 2015-05-29 13:35     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:35 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 294 bytes --]

On 05/27/2015 07:55 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/c-family/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * c-format.c (check_format_arg):Use new type-based pool allocator.
>>     (check_format_info_main) Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0008-Change-use-to-type-based-pool-allocator-in-c-format..patch --]
[-- Type: text/x-patch, Size: 4382 bytes --]

From c9d58f9d3e3b16b9ed588a1009efbaf1cb4ad72e Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:47 +0200
Subject: [PATCH 08/32] Change use to type-based pool allocator in c-format.c.

gcc/c-family/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* c-format.c (check_format_arg):Use new type-based pool allocator.
	(check_format_info_main) Likewise.
---
 gcc/c-family/c-format.c | 26 ++++++++++++--------------
 1 file changed, 12 insertions(+), 14 deletions(-)

diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c
index 145bbfd..a6c2500 100644
--- a/gcc/c-family/c-format.c
+++ b/gcc/c-family/c-format.c
@@ -191,7 +191,7 @@ handle_format_arg_attribute (tree *node, tree ARG_UNUSED (name),
   if (prototype_p (type))
     {
       /* The format arg can be any string reference valid for the language and
-         target.  We cannot be more specific in this case.  */
+	target.  We cannot be more specific in this case.  */
       if (!check_format_string (type, format_num, flags, no_add_attrs, -1))
 	return NULL_TREE;
     }
@@ -1031,7 +1031,8 @@ static void check_format_arg (void *, tree, unsigned HOST_WIDE_INT);
 static void check_format_info_main (format_check_results *,
 				    function_format_info *,
 				    const char *, int, tree,
-                                    unsigned HOST_WIDE_INT, alloc_pool);
+				    unsigned HOST_WIDE_INT,
+				    pool_allocator<format_wanted_type> &);
 
 static void init_dollar_format_checking (int, tree);
 static int maybe_read_dollar_number (const char **, int,
@@ -1518,7 +1519,6 @@ check_format_arg (void *ctx, tree format_tree,
   const char *format_chars;
   tree array_size = 0;
   tree array_init;
-  alloc_pool fwt_pool;
 
   if (TREE_CODE (format_tree) == VAR_DECL)
     {
@@ -1587,7 +1587,7 @@ check_format_arg (void *ctx, tree format_tree,
     {
       bool objc_str = (info->format_type == gcc_objc_string_format_type);
       /* We cannot examine this string here - but we can check that it is
-         a valid type.  */
+	 a valid type.  */
       if (TREE_CODE (format_tree) != CONST_DECL
 	  || !((objc_str && objc_string_ref_type_p (TREE_TYPE (format_tree)))
 		|| (*targetcm.string_object_ref_type_p) 
@@ -1605,9 +1605,9 @@ check_format_arg (void *ctx, tree format_tree,
 	  ++arg_num;
 	}
       /* So, we have a valid literal string object and one or more params.
-         We need to use an external helper to parse the string into format
-         info.  For Objective-C variants we provide the resource within the
-         objc tree, for target variants, via a hook.  */
+	 We need to use an external helper to parse the string into format
+	 info.  For Objective-C variants we provide the resource within the
+	 objc tree, for target variants, via a hook.  */
       if (objc_str)
 	objc_check_format_arg (format_tree, params);
       else if (targetcm.check_string_object_format_arg)
@@ -1694,11 +1694,9 @@ check_format_arg (void *ctx, tree format_tree,
      will decrement it if it finds there are extra arguments, but this way
      need not adjust it for every return.  */
   res->number_other++;
-  fwt_pool = create_alloc_pool ("format_wanted_type pool",
-                                sizeof (format_wanted_type), 10);
+  pool_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool", 10);
   check_format_info_main (res, info, format_chars, format_length,
-                          params, arg_num, fwt_pool);
-  free_alloc_pool (fwt_pool);
+			  params, arg_num, fwt_pool);
 }
 
 
@@ -1713,7 +1711,8 @@ static void
 check_format_info_main (format_check_results *res,
 			function_format_info *info, const char *format_chars,
 			int format_length, tree params,
-                        unsigned HOST_WIDE_INT arg_num, alloc_pool fwt_pool)
+			unsigned HOST_WIDE_INT arg_num,
+			pool_allocator<format_wanted_type> &fwt_pool)
 {
   const char *orig_format_chars = format_chars;
   tree first_fillin_param = params;
@@ -2424,8 +2423,7 @@ check_format_info_main (format_check_results *res,
 	      fci = fci->chain;
 	      if (fci)
 		{
-                  wanted_type_ptr = (format_wanted_type *)
-                      pool_alloc (fwt_pool);
+		  wanted_type_ptr = fwt_pool.allocate ();
 		  arg_num++;
 		  wanted_type = *fci->types[length_chars_val].type;
 		  wanted_type_name = fci->types[length_chars_val].name;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 11/35] Change use to type-based pool allocator in sh.c.
  2015-05-27 18:03   ` Jeff Law
@ 2015-05-29 13:37     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:37 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 271 bytes --]

On 05/27/2015 07:59 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * config/sh/sh.c (add_constant):Use new type-based pool allocator.
>>     (sh_reorg) Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0010-Change-use-to-type-based-pool-allocator-in-sh.c.patch --]
[-- Type: text/x-patch, Size: 2909 bytes --]

From 68f0b72993d882d5dfe4096806f2bb78c87a37c4 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:47 +0200
Subject: [PATCH 10/32] Change use to type-based pool allocator in sh.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* config/sh/sh.c (add_constant):Use new type-based pool allocator.
	(sh_reorg) Likewise.
---
 gcc/config/sh/sh.c | 30 ++++++++++++++++++++++--------
 1 file changed, 22 insertions(+), 8 deletions(-)

diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index bc1ce24..285aa18 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -4648,14 +4648,31 @@ gen_datalabel_ref (rtx sym)
 }
 
 \f
-static alloc_pool label_ref_list_pool;
-
 typedef struct label_ref_list_d
 {
   rtx_code_label *label;
   struct label_ref_list_d *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((label_ref_list_d *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<label_ref_list_d> pool;
+
 } *label_ref_list_t;
 
+pool_allocator<label_ref_list_d> label_ref_list_d::pool
+  ("label references list", 30);
+
 /* The SH cannot load a large constant into a register, constants have to
    come from a pc relative load.  The reference of a pc relative load
    instruction must be less than 1k in front of the instruction.  This
@@ -4775,7 +4792,7 @@ add_constant (rtx x, machine_mode mode, rtx last_value)
 		}
 	      if (lab && pool_window_label)
 		{
-		  newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
+		  newref = new label_ref_list_d;
 		  newref->label = pool_window_label;
 		  ref = pool_vector[pool_window_last].wend;
 		  newref->next = ref;
@@ -4804,7 +4821,7 @@ add_constant (rtx x, machine_mode mode, rtx last_value)
   pool_vector[pool_size].part_of_sequence_p = (lab == 0);
   if (lab && pool_window_label)
     {
-      newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
+      newref = new label_ref_list_d;
       newref->label = pool_window_label;
       ref = pool_vector[pool_window_last].wend;
       newref->next = ref;
@@ -6359,9 +6376,6 @@ sh_reorg (void)
 
   /* Scan the function looking for move instructions which have to be
      changed to pc-relative loads and insert the literal tables.  */
-  label_ref_list_pool = create_alloc_pool ("label references list",
-					   sizeof (struct label_ref_list_d),
-					   30);
   mdep_reorg_phase = SH_FIXUP_PCLOAD;
   for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
     {
@@ -6553,7 +6567,7 @@ sh_reorg (void)
 	  insn = barrier;
 	}
     }
-  free_alloc_pool (label_ref_list_pool);
+  label_ref_list_d::pool.release ();
   for (insn = first; insn; insn = NEXT_INSN (insn))
     PUT_MODE (insn, VOIDmode);
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 13/35] Change use to type-based pool allocator in df-problems.c.
  2015-05-27 18:05   ` Jeff Law
@ 2015-05-29 13:37     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:37 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 611 bytes --]

On 05/27/2015 08:01 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * df-problems.c (df_chain_create):Use new type-based pool allocator.
>>     (df_chain_unlink_1) Likewise.
>>     (df_chain_unlink) Likewise.
>>     (df_chain_remove_problem) Likewise.
>>     (df_chain_alloc) Likewise.
>>     (df_chain_free) Likewise.
>>     * df.h (struct dataflow) Likewise.
> OK.
>
> As Jakub noted, please double-check your ChangeLogs for proper formatting before committing.  There's consistently nits to fix in them.
>
> Jeff
>

v2

[-- Attachment #2: 0012-Change-use-to-type-based-pool-allocator-in-df-proble.patch --]
[-- Type: text/x-patch, Size: 3084 bytes --]

From 66961142fc65ce20b326bb027c590a146736d2d3 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:48 +0200
Subject: [PATCH 12/32] Change use to type-based pool allocator in
 df-problems.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* df-problems.c (df_chain_create):Use new type-based pool allocator.
	(df_chain_unlink_1) Likewise.
	(df_chain_unlink) Likewise.
	(df_chain_remove_problem) Likewise.
	(df_chain_alloc) Likewise.
	(df_chain_free) Likewise.
	* df.h (struct dataflow) Likewise.
---
 gcc/df-problems.c | 14 +++++++-------
 gcc/df.h          |  2 +-
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index ff08abd..7700157 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -1879,7 +1879,7 @@ struct df_link *
 df_chain_create (df_ref src, df_ref dst)
 {
   struct df_link *head = DF_REF_CHAIN (src);
-  struct df_link *link = (struct df_link *) pool_alloc (df_chain->block_pool);
+  struct df_link *link = df_chain->block_pool->allocate ();
 
   DF_REF_CHAIN (src) = link;
   link->next = head;
@@ -1904,7 +1904,7 @@ df_chain_unlink_1 (df_ref ref, df_ref target)
 	    prev->next = chain->next;
 	  else
 	    DF_REF_CHAIN (ref) = chain->next;
-	  pool_free (df_chain->block_pool, chain);
+	  df_chain->block_pool->remove (chain);
 	  return;
 	}
       prev = chain;
@@ -1924,7 +1924,7 @@ df_chain_unlink (df_ref ref)
       struct df_link *next = chain->next;
       /* Delete the other side if it exists.  */
       df_chain_unlink_1 (chain->ref, ref);
-      pool_free (df_chain->block_pool, chain);
+      df_chain->block_pool->remove (chain);
       chain = next;
     }
   DF_REF_CHAIN (ref) = NULL;
@@ -1956,7 +1956,7 @@ df_chain_remove_problem (void)
 
   /* Wholesale destruction of the old chains.  */
   if (df_chain->block_pool)
-    free_alloc_pool (df_chain->block_pool);
+    delete df_chain->block_pool;
 
   EXECUTE_IF_SET_IN_BITMAP (df_chain->out_of_date_transfer_functions, 0, bb_index, bi)
     {
@@ -2010,8 +2010,8 @@ static void
 df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
 {
   df_chain_remove_problem ();
-  df_chain->block_pool = create_alloc_pool ("df_chain_block pool",
-					 sizeof (struct df_link), 50);
+  df_chain->block_pool = new pool_allocator<df_link> ("df_chain_block pool",
+						      50);
   df_chain->optional_p = true;
 }
 
@@ -2146,7 +2146,7 @@ df_chain_finalize (bitmap all_blocks)
 static void
 df_chain_free (void)
 {
-  free_alloc_pool (df_chain->block_pool);
+  delete df_chain->block_pool;
   BITMAP_FREE (df_chain->out_of_date_transfer_functions);
   free (df_chain);
 }
diff --git a/gcc/df.h b/gcc/df.h
index 7e233667..8a5b21f 100644
--- a/gcc/df.h
+++ b/gcc/df.h
@@ -305,7 +305,7 @@ struct dataflow
   unsigned int block_info_size;
 
   /* The pool to allocate the block_info from. */
-  alloc_pool block_pool;
+  pool_allocator<df_link> *block_pool;
 
   /* The lr and live problems have their transfer functions recomputed
      only if necessary.  This is possible for them because, the
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 12/35] Change use to type-based pool allocator in cselib.c.
  2015-05-27 14:00 ` [PATCH 12/35] Change use to type-based pool allocator in cselib.c mliska
@ 2015-05-29 13:38   ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:38 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 10601 bytes --]

On 05/27/2015 03:56 PM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* cselib.c (new_elt_list):Use new type-based pool allocator.
> 	(new_elt_loc_list) Likewise.
> 	(unchain_one_elt_list) Likewise.
> 	(unchain_one_elt_loc_list) Likewise.
> 	(unchain_one_value) Likewise.
> 	(new_cselib_val) Likewise.
> 	(cselib_init) Likewise.
> 	(cselib_finish) Likewise.
> ---
>   gcc/alias.c          |  1 +
>   gcc/cfgcleanup.c     |  1 +
>   gcc/cprop.c          |  1 +
>   gcc/cselib.c         | 63 ++++++++++++++++++++++++++++++++--------------------
>   gcc/cselib.h         | 33 ++++++++++++++++++++++++++-
>   gcc/gcse.c           |  1 +
>   gcc/postreload.c     |  1 +
>   gcc/print-rtl.c      |  1 +
>   gcc/sel-sched-dump.c |  1 +
>   9 files changed, 78 insertions(+), 25 deletions(-)
>
> diff --git a/gcc/alias.c b/gcc/alias.c
> index aa7dc21..bc8e2b4 100644
> --- a/gcc/alias.c
> +++ b/gcc/alias.c
> @@ -53,6 +53,7 @@ along with GCC; see the file COPYING3.  If not see
>   #include "tm_p.h"
>   #include "regs.h"
>   #include "diagnostic-core.h"
> +#include "alloc-pool.h"
>   #include "cselib.h"
>   #include "hash-map.h"
>   #include "langhooks.h"
> diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
> index aff64ef..fc2ed31 100644
> --- a/gcc/cfgcleanup.c
> +++ b/gcc/cfgcleanup.c
> @@ -50,6 +50,7 @@ along with GCC; see the file COPYING3.  If not see
>   #include "flags.h"
>   #include "recog.h"
>   #include "diagnostic-core.h"
> +#include "alloc-pool.h"
>   #include "cselib.h"
>   #include "params.h"
>   #include "tm_p.h"
> diff --git a/gcc/cprop.c b/gcc/cprop.c
> index 57c44ef..41ca201 100644
> --- a/gcc/cprop.c
> +++ b/gcc/cprop.c
> @@ -63,6 +63,7 @@ along with GCC; see the file COPYING3.  If not see
>   #include "expr.h"
>   #include "except.h"
>   #include "params.h"
> +#include "alloc-pool.h"
>   #include "cselib.h"
>   #include "intl.h"
>   #include "obstack.h"
> diff --git a/gcc/cselib.c b/gcc/cselib.c
> index 7a50f50..8de85bc 100644
> --- a/gcc/cselib.c
> +++ b/gcc/cselib.c
> @@ -46,6 +46,7 @@ along with GCC; see the file COPYING3.  If not see
>   #include "ggc.h"
>   #include "hash-table.h"
>   #include "dumpfile.h"
> +#include "alloc-pool.h"
>   #include "cselib.h"
>   #include "predict.h"
>   #include "basic-block.h"
> @@ -56,9 +57,25 @@ along with GCC; see the file COPYING3.  If not see
>   #include "bitmap.h"
>
>   /* A list of cselib_val structures.  */
> -struct elt_list {
> -    struct elt_list *next;
> -    cselib_val *elt;
> +struct elt_list
> +{
> +  struct elt_list *next;
> +  cselib_val *elt;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((elt_list *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<elt_list> pool;
>   };
>
>   static bool cselib_record_memory;
> @@ -260,7 +277,13 @@ static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
>      May or may not contain the useless values - the list is compacted
>      each time memory is invalidated.  */
>   static cselib_val *first_containing_mem = &dummy_val;
> -static alloc_pool elt_loc_list_pool, elt_list_pool, cselib_val_pool, value_pool;
> +
> +pool_allocator<elt_list> elt_list::pool ("elt_list", 10);
> +pool_allocator<elt_loc_list> elt_loc_list::pool ("elt_loc_list", 10);
> +pool_allocator<cselib_val> cselib_val::pool ("cselib_val_list", 10);
> +
> +static pool_allocator<rtx_def> value_pool ("value", 100, RTX_CODE_SIZE (VALUE),
> +					   true);
>
>   /* If nonnull, cselib will call this function before freeing useless
>      VALUEs.  A VALUE is deemed useless if its "locs" field is null.  */
> @@ -288,8 +311,7 @@ void (*cselib_record_sets_hook) (rtx_insn *insn, struct cselib_set *sets,
>   static inline struct elt_list *
>   new_elt_list (struct elt_list *next, cselib_val *elt)
>   {
> -  struct elt_list *el;
> -  el = (struct elt_list *) pool_alloc (elt_list_pool);
> +  elt_list *el = new elt_list ();
>     el->next = next;
>     el->elt = elt;
>     return el;
> @@ -373,14 +395,14 @@ new_elt_loc_list (cselib_val *val, rtx loc)
>   	}
>
>         /* Chain LOC back to VAL.  */
> -      el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
> +      el = new elt_loc_list;
>         el->loc = val->val_rtx;
>         el->setting_insn = cselib_current_insn;
>         el->next = NULL;
>         CSELIB_VAL_PTR (loc)->locs = el;
>       }
>
> -  el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
> +  el = new elt_loc_list;
>     el->loc = loc;
>     el->setting_insn = cselib_current_insn;
>     el->next = next;
> @@ -420,7 +442,7 @@ unchain_one_elt_list (struct elt_list **pl)
>     struct elt_list *l = *pl;
>
>     *pl = l->next;
> -  pool_free (elt_list_pool, l);
> +  delete l;
>   }
>
>   /* Likewise for elt_loc_lists.  */
> @@ -431,7 +453,7 @@ unchain_one_elt_loc_list (struct elt_loc_list **pl)
>     struct elt_loc_list *l = *pl;
>
>     *pl = l->next;
> -  pool_free (elt_loc_list_pool, l);
> +  delete l;
>   }
>
>   /* Likewise for cselib_vals.  This also frees the addr_list associated with
> @@ -443,7 +465,7 @@ unchain_one_value (cselib_val *v)
>     while (v->addr_list)
>       unchain_one_elt_list (&v->addr_list);
>
> -  pool_free (cselib_val_pool, v);
> +  delete v;
>   }
>
>   /* Remove all entries from the hash table.  Also used during
> @@ -1306,7 +1328,7 @@ cselib_hash_rtx (rtx x, int create, machine_mode memmode)
>   static inline cselib_val *
>   new_cselib_val (unsigned int hash, machine_mode mode, rtx x)
>   {
> -  cselib_val *e = (cselib_val *) pool_alloc (cselib_val_pool);
> +  cselib_val *e = new cselib_val;
>
>     gcc_assert (hash);
>     gcc_assert (next_uid);
> @@ -1318,7 +1340,7 @@ new_cselib_val (unsigned int hash, machine_mode mode, rtx x)
>        precisely when we can have VALUE RTXen (when cselib is active)
>        so we don't need to put them in garbage collected memory.
>        ??? Why should a VALUE be an RTX in the first place?  */
> -  e->val_rtx = (rtx) pool_alloc (value_pool);
> +  e->val_rtx = value_pool.allocate ();
>     memset (e->val_rtx, 0, RTX_HDR_SIZE);
>     PUT_CODE (e->val_rtx, VALUE);
>     PUT_MODE (e->val_rtx, mode);
> @@ -2729,13 +2751,6 @@ cselib_process_insn (rtx_insn *insn)
>   void
>   cselib_init (int record_what)
>   {
> -  elt_list_pool = create_alloc_pool ("elt_list",
> -				     sizeof (struct elt_list), 10);
> -  elt_loc_list_pool = create_alloc_pool ("elt_loc_list",
> -				         sizeof (struct elt_loc_list), 10);
> -  cselib_val_pool = create_alloc_pool ("cselib_val_list",
> -				       sizeof (cselib_val), 10);
> -  value_pool = create_alloc_pool ("value", RTX_CODE_SIZE (VALUE), 100);
>     cselib_record_memory = record_what & CSELIB_RECORD_MEMORY;
>     cselib_preserve_constants = record_what & CSELIB_PRESERVE_CONSTANTS;
>     cselib_any_perm_equivs = false;
> @@ -2777,10 +2792,10 @@ cselib_finish (void)
>     cselib_any_perm_equivs = false;
>     cfa_base_preserved_val = NULL;
>     cfa_base_preserved_regno = INVALID_REGNUM;
> -  free_alloc_pool (elt_list_pool);
> -  free_alloc_pool (elt_loc_list_pool);
> -  free_alloc_pool (cselib_val_pool);
> -  free_alloc_pool (value_pool);
> +  elt_list::pool.release ();
> +  elt_loc_list::pool.release ();
> +  cselib_val::pool.release ();
> +  value_pool.release ();
>     cselib_clear_table ();
>     delete cselib_hash_table;
>     cselib_hash_table = NULL;
> diff --git a/gcc/cselib.h b/gcc/cselib.h
> index 082bf54..5fe9076 100644
> --- a/gcc/cselib.h
> +++ b/gcc/cselib.h
> @@ -21,7 +21,8 @@ along with GCC; see the file COPYING3.  If not see
>   #define GCC_CSELIB_H
>
>   /* Describe a value.  */
> -struct cselib_val {
> +struct cselib_val
> +{
>     /* The hash value.  */
>     unsigned int hash;
>
> @@ -40,6 +41,21 @@ struct cselib_val {
>     struct elt_list *addr_list;
>
>     struct cselib_val *next_containing_mem;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((cselib_val *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<cselib_val> pool;
>   };
>
>   /* A list of rtl expressions that hold the same value.  */
> @@ -50,6 +66,21 @@ struct elt_loc_list {
>     rtx loc;
>     /* The insn that made the equivalence.  */
>     rtx_insn *setting_insn;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((elt_loc_list *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<elt_loc_list> pool;
>   };
>
>   /* Describe a single set that is part of an insn.  */
> diff --git a/gcc/gcse.c b/gcc/gcse.c
> index efbe4f4..28476fb 100644
> --- a/gcc/gcse.c
> +++ b/gcc/gcse.c
> @@ -180,6 +180,7 @@ along with GCC; see the file COPYING3.  If not see
>   #include "except.h"
>   #include "ggc.h"
>   #include "params.h"
> +#include "alloc-pool.h"
>   #include "cselib.h"
>   #include "intl.h"
>   #include "obstack.h"
> diff --git a/gcc/postreload.c b/gcc/postreload.c
> index 4d3c26f..06c4973 100644
> --- a/gcc/postreload.c
> +++ b/gcc/postreload.c
> @@ -63,6 +63,7 @@ along with GCC; see the file COPYING3.  If not see
>   #include "basic-block.h"
>   #include "reload.h"
>   #include "recog.h"
> +#include "alloc-pool.h"
>   #include "cselib.h"
>   #include "diagnostic-core.h"
>   #include "except.h"
> diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c
> index 882f808..5e8838a 100644
> --- a/gcc/print-rtl.c
> +++ b/gcc/print-rtl.c
> @@ -52,6 +52,7 @@ along with GCC; see the file COPYING3.  If not see
>   #include "basic-block.h"
>   #include "diagnostic.h"
>   #include "tree-pretty-print.h"
> +#include "alloc-pool.h"
>   #include "cselib.h"
>   #include "dumpfile.h"	/* for dump_flags */
>   #include "dwarf2out.h"
> diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
> index 6f174a5..943fdd0 100644
> --- a/gcc/sel-sched-dump.c
> +++ b/gcc/sel-sched-dump.c
> @@ -40,6 +40,7 @@ along with GCC; see the file COPYING3.  If not see
>   #include "insn-config.h"
>   #include "insn-attr.h"
>   #include "params.h"
> +#include "alloc-pool.h"
>   #include "cselib.h"
>   #include "target.h"
>
>

v2

[-- Attachment #2: 0011-Change-use-to-type-based-pool-allocator-in-cselib.c.patch --]
[-- Type: text/x-patch, Size: 10050 bytes --]

From ceb3d9a77a044a194f99f417ec2f909b3f7e1a64 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:48 +0200
Subject: [PATCH 11/32] Change use to type-based pool allocator in cselib.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* cselib.c (new_elt_list):Use new type-based pool allocator.
	(new_elt_loc_list) Likewise.
	(unchain_one_elt_list) Likewise.
	(unchain_one_elt_loc_list) Likewise.
	(unchain_one_value) Likewise.
	(new_cselib_val) Likewise.
	(cselib_init) Likewise.
	(cselib_finish) Likewise.
---
 gcc/alias.c          |  1 +
 gcc/cfgcleanup.c     |  1 +
 gcc/cprop.c          |  1 +
 gcc/cselib.c         | 63 ++++++++++++++++++++++++++++++++--------------------
 gcc/cselib.h         | 33 ++++++++++++++++++++++++++-
 gcc/gcse.c           |  1 +
 gcc/postreload.c     |  1 +
 gcc/print-rtl.c      |  1 +
 gcc/sel-sched-dump.c |  1 +
 9 files changed, 78 insertions(+), 25 deletions(-)

diff --git a/gcc/alias.c b/gcc/alias.c
index aa7dc21..bc8e2b4 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -53,6 +53,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "tm_p.h"
 #include "regs.h"
 #include "diagnostic-core.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "hash-map.h"
 #include "langhooks.h"
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index aff64ef..fc2ed31 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -50,6 +50,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "flags.h"
 #include "recog.h"
 #include "diagnostic-core.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "params.h"
 #include "tm_p.h"
diff --git a/gcc/cprop.c b/gcc/cprop.c
index 57c44ef..41ca201 100644
--- a/gcc/cprop.c
+++ b/gcc/cprop.c
@@ -63,6 +63,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "expr.h"
 #include "except.h"
 #include "params.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "intl.h"
 #include "obstack.h"
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 7a50f50..624d0a9 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "ggc.h"
 #include "hash-table.h"
 #include "dumpfile.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "predict.h"
 #include "basic-block.h"
@@ -56,9 +57,25 @@ along with GCC; see the file COPYING3.  If not see
 #include "bitmap.h"
 
 /* A list of cselib_val structures.  */
-struct elt_list {
-    struct elt_list *next;
-    cselib_val *elt;
+struct elt_list
+{
+  struct elt_list *next;
+  cselib_val *elt;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((elt_list *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<elt_list> pool;
 };
 
 static bool cselib_record_memory;
@@ -260,7 +277,13 @@ static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
    May or may not contain the useless values - the list is compacted
    each time memory is invalidated.  */
 static cselib_val *first_containing_mem = &dummy_val;
-static alloc_pool elt_loc_list_pool, elt_list_pool, cselib_val_pool, value_pool;
+
+pool_allocator<elt_list> elt_list::pool ("elt_list", 10);
+pool_allocator<elt_loc_list> elt_loc_list::pool ("elt_loc_list", 10);
+pool_allocator<cselib_val> cselib_val::pool ("cselib_val_list", 10);
+
+static pool_allocator<rtx_def> value_pool ("value", 100, RTX_CODE_SIZE (VALUE),
+					   true);
 
 /* If nonnull, cselib will call this function before freeing useless
    VALUEs.  A VALUE is deemed useless if its "locs" field is null.  */
@@ -288,8 +311,7 @@ void (*cselib_record_sets_hook) (rtx_insn *insn, struct cselib_set *sets,
 static inline struct elt_list *
 new_elt_list (struct elt_list *next, cselib_val *elt)
 {
-  struct elt_list *el;
-  el = (struct elt_list *) pool_alloc (elt_list_pool);
+  elt_list *el = new elt_list ();
   el->next = next;
   el->elt = elt;
   return el;
@@ -373,14 +395,14 @@ new_elt_loc_list (cselib_val *val, rtx loc)
 	}
 
       /* Chain LOC back to VAL.  */
-      el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
+      el = new elt_loc_list;
       el->loc = val->val_rtx;
       el->setting_insn = cselib_current_insn;
       el->next = NULL;
       CSELIB_VAL_PTR (loc)->locs = el;
     }
 
-  el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
+  el = new elt_loc_list;
   el->loc = loc;
   el->setting_insn = cselib_current_insn;
   el->next = next;
@@ -420,7 +442,7 @@ unchain_one_elt_list (struct elt_list **pl)
   struct elt_list *l = *pl;
 
   *pl = l->next;
-  pool_free (elt_list_pool, l);
+  delete l;
 }
 
 /* Likewise for elt_loc_lists.  */
@@ -431,7 +453,7 @@ unchain_one_elt_loc_list (struct elt_loc_list **pl)
   struct elt_loc_list *l = *pl;
 
   *pl = l->next;
-  pool_free (elt_loc_list_pool, l);
+  delete l;
 }
 
 /* Likewise for cselib_vals.  This also frees the addr_list associated with
@@ -443,7 +465,7 @@ unchain_one_value (cselib_val *v)
   while (v->addr_list)
     unchain_one_elt_list (&v->addr_list);
 
-  pool_free (cselib_val_pool, v);
+  delete v;
 }
 
 /* Remove all entries from the hash table.  Also used during
@@ -1306,7 +1328,7 @@ cselib_hash_rtx (rtx x, int create, machine_mode memmode)
 static inline cselib_val *
 new_cselib_val (unsigned int hash, machine_mode mode, rtx x)
 {
-  cselib_val *e = (cselib_val *) pool_alloc (cselib_val_pool);
+  cselib_val *e = new cselib_val;
 
   gcc_assert (hash);
   gcc_assert (next_uid);
@@ -1318,7 +1340,7 @@ new_cselib_val (unsigned int hash, machine_mode mode, rtx x)
      precisely when we can have VALUE RTXen (when cselib is active)
      so we don't need to put them in garbage collected memory.
      ??? Why should a VALUE be an RTX in the first place?  */
-  e->val_rtx = (rtx) pool_alloc (value_pool);
+  e->val_rtx = value_pool.allocate ();
   memset (e->val_rtx, 0, RTX_HDR_SIZE);
   PUT_CODE (e->val_rtx, VALUE);
   PUT_MODE (e->val_rtx, mode);
@@ -2729,13 +2751,6 @@ cselib_process_insn (rtx_insn *insn)
 void
 cselib_init (int record_what)
 {
-  elt_list_pool = create_alloc_pool ("elt_list",
-				     sizeof (struct elt_list), 10);
-  elt_loc_list_pool = create_alloc_pool ("elt_loc_list",
-				         sizeof (struct elt_loc_list), 10);
-  cselib_val_pool = create_alloc_pool ("cselib_val_list",
-				       sizeof (cselib_val), 10);
-  value_pool = create_alloc_pool ("value", RTX_CODE_SIZE (VALUE), 100);
   cselib_record_memory = record_what & CSELIB_RECORD_MEMORY;
   cselib_preserve_constants = record_what & CSELIB_PRESERVE_CONSTANTS;
   cselib_any_perm_equivs = false;
@@ -2777,10 +2792,10 @@ cselib_finish (void)
   cselib_any_perm_equivs = false;
   cfa_base_preserved_val = NULL;
   cfa_base_preserved_regno = INVALID_REGNUM;
-  free_alloc_pool (elt_list_pool);
-  free_alloc_pool (elt_loc_list_pool);
-  free_alloc_pool (cselib_val_pool);
-  free_alloc_pool (value_pool);
+  elt_list::pool.release ();
+  elt_loc_list::pool.release ();
+  cselib_val::pool.release ();
+  value_pool.release ();
   cselib_clear_table ();
   delete cselib_hash_table;
   cselib_hash_table = NULL;
diff --git a/gcc/cselib.h b/gcc/cselib.h
index 082bf54..cdd06ad 100644
--- a/gcc/cselib.h
+++ b/gcc/cselib.h
@@ -21,7 +21,8 @@ along with GCC; see the file COPYING3.  If not see
 #define GCC_CSELIB_H
 
 /* Describe a value.  */
-struct cselib_val {
+struct cselib_val
+{
   /* The hash value.  */
   unsigned int hash;
 
@@ -40,6 +41,21 @@ struct cselib_val {
   struct elt_list *addr_list;
 
   struct cselib_val *next_containing_mem;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((cselib_val *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<cselib_val> pool;
 };
 
 /* A list of rtl expressions that hold the same value.  */
@@ -50,6 +66,21 @@ struct elt_loc_list {
   rtx loc;
   /* The insn that made the equivalence.  */
   rtx_insn *setting_insn;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((elt_loc_list *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<elt_loc_list> pool;
 };
 
 /* Describe a single set that is part of an insn.  */
diff --git a/gcc/gcse.c b/gcc/gcse.c
index efbe4f4..28476fb 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -180,6 +180,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "except.h"
 #include "ggc.h"
 #include "params.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "intl.h"
 #include "obstack.h"
diff --git a/gcc/postreload.c b/gcc/postreload.c
index 4d3c26f..06c4973 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -63,6 +63,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "basic-block.h"
 #include "reload.h"
 #include "recog.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "diagnostic-core.h"
 #include "except.h"
diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c
index 882f808..5e8838a 100644
--- a/gcc/print-rtl.c
+++ b/gcc/print-rtl.c
@@ -52,6 +52,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "basic-block.h"
 #include "diagnostic.h"
 #include "tree-pretty-print.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "dumpfile.h"	/* for dump_flags */
 #include "dwarf2out.h"
diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
index 6f174a5..943fdd0 100644
--- a/gcc/sel-sched-dump.c
+++ b/gcc/sel-sched-dump.c
@@ -40,6 +40,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "insn-config.h"
 #include "insn-attr.h"
 #include "params.h"
+#include "alloc-pool.h"
 #include "cselib.h"
 #include "target.h"
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 15/35] Change use to type-based pool allocator in dse.c.
  2015-05-27 14:21 ` [PATCH 15/35] Change use to type-based pool allocator in dse.c mliska
@ 2015-05-29 13:38   ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:38 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 15441 bytes --]

On 05/27/2015 03:56 PM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* dse.c (get_group_info):Use new type-based pool allocator.
> 	(dse_step0) Likewise.
> 	(free_store_info) Likewise.
> 	(delete_dead_store_insn) Likewise.
> 	(free_read_records) Likewise.
> 	(record_store) Likewise.
> 	(replace_read) Likewise.
> 	(check_mem_read_rtx) Likewise.
> 	(scan_insn) Likewise.
> 	(dse_step1) Likewise.
> 	(dse_step7) Likewise.
> ---
>   gcc/dse.c | 201 ++++++++++++++++++++++++++++++++++++++++----------------------
>   1 file changed, 129 insertions(+), 72 deletions(-)
>
> diff --git a/gcc/dse.c b/gcc/dse.c
> index b3b38d5..5ade9dd 100644
> --- a/gcc/dse.c
> +++ b/gcc/dse.c
> @@ -249,7 +249,7 @@ static struct obstack dse_obstack;
>   /* Scratch bitmap for cselib's cselib_expand_value_rtx.  */
>   static bitmap scratch = NULL;
>
> -struct insn_info;
> +struct insn_info_type;
>
>   /* This structure holds information about a candidate store.  */
>   struct store_info
> @@ -316,7 +316,7 @@ struct store_info
>     /* Set if this store stores the same constant value as REDUNDANT_REASON
>        insn stored.  These aren't eliminated early, because doing that
>        might prevent the earlier larger store to be eliminated.  */
> -  struct insn_info *redundant_reason;
> +  struct insn_info_type *redundant_reason;
>   };
>
>   /* Return a bitmask with the first N low bits set.  */
> @@ -329,12 +329,15 @@ lowpart_bitmask (int n)
>   }
>
>   typedef struct store_info *store_info_t;
> -static alloc_pool cse_store_info_pool;
> -static alloc_pool rtx_store_info_pool;
> +static pool_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
> +						       100);
> +
> +static pool_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
> +						       100);
>
>   /* This structure holds information about a load.  These are only
>      built for rtx bases.  */
> -struct read_info
> +struct read_info_type
>   {
>     /* The id of the mem group of the base address.  */
>     int group_id;
> @@ -351,15 +354,30 @@ struct read_info
>     rtx mem;
>
>     /* The next read_info for this insn.  */
> -  struct read_info *next;
> +  struct read_info_type *next;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((read_info_type *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<read_info_type> pool;
>   };
> -typedef struct read_info *read_info_t;
> -static alloc_pool read_info_pool;
> +typedef struct read_info_type *read_info_t;
>
> +pool_allocator<read_info_type> read_info_type::pool ("read_info_pool", 100);
>
>   /* One of these records is created for each insn.  */
>
> -struct insn_info
> +struct insn_info_type
>   {
>     /* Set true if the insn contains a store but the insn itself cannot
>        be deleted.  This is set if the insn is a parallel and there is
> @@ -433,27 +451,41 @@ struct insn_info
>     regset fixed_regs_live;
>
>     /* The prev insn in the basic block.  */
> -  struct insn_info * prev_insn;
> +  struct insn_info_type * prev_insn;
>
>     /* The linked list of insns that are in consideration for removal in
>        the forwards pass through the basic block.  This pointer may be
>        trash as it is not cleared when a wild read occurs.  The only
>        time it is guaranteed to be correct is when the traversal starts
>        at active_local_stores.  */
> -  struct insn_info * next_local_store;
> +  struct insn_info_type * next_local_store;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((insn_info_type *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<insn_info_type> pool;
>   };
> +typedef struct insn_info_type *insn_info_t;
>
> -typedef struct insn_info *insn_info_t;
> -static alloc_pool insn_info_pool;
> +pool_allocator<insn_info_type> insn_info_type::pool ("insn_info_pool", 100);
>
>   /* The linked list of stores that are under consideration in this
>      basic block.  */
>   static insn_info_t active_local_stores;
>   static int active_local_stores_len;
>
> -struct dse_bb_info
> +struct dse_bb_info_type
>   {
> -
>     /* Pointer to the insn info for the last insn in the block.  These
>        are linked so this is how all of the insns are reached.  During
>        scanning this is the current insn being scanned.  */
> @@ -507,10 +539,25 @@ struct dse_bb_info
>        to assure that shift and/or add sequences that are inserted do not
>        accidentally clobber live hard regs.  */
>     bitmap regs_live;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((dse_bb_info_type *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<dse_bb_info_type> pool;
>   };
>
> -typedef struct dse_bb_info *bb_info_t;
> -static alloc_pool bb_info_pool;
> +typedef struct dse_bb_info_type *bb_info_t;
> +pool_allocator<dse_bb_info_type> dse_bb_info_type::pool ("bb_info_pool", 100);
>
>   /* Table to hold all bb_infos.  */
>   static bb_info_t *bb_table;
> @@ -578,10 +625,26 @@ struct group_info
>        care about.  */
>     int *offset_map_n, *offset_map_p;
>     int offset_map_size_n, offset_map_size_p;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((group_info *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<group_info> pool;
>   };
>   typedef struct group_info *group_info_t;
>   typedef const struct group_info *const_group_info_t;
> -static alloc_pool rtx_group_info_pool;
> +
> +pool_allocator<group_info> group_info::pool ("rtx_group_info_pool", 100);
>
>   /* Index into the rtx_group_vec.  */
>   static int rtx_group_next_id;
> @@ -602,10 +665,27 @@ struct deferred_change
>     rtx reg;
>
>     struct deferred_change *next;
> +
> +  /* Pool allocation new operator.  */
> +  inline void *operator new (size_t)
> +  {
> +    return pool.allocate ();
> +  }
> +
> +  /* Delete operator utilizing pool allocation.  */
> +  inline void operator delete (void *ptr)
> +  {
> +    pool.remove((deferred_change *) ptr);
> +  }
> +
> +  /* Memory allocation pool.  */
> +  static pool_allocator<deferred_change> pool;
>   };
>
>   typedef struct deferred_change *deferred_change_t;
> -static alloc_pool deferred_change_pool;
> +
> +pool_allocator<deferred_change> deferred_change::pool
> +  ("deferred_change_pool", 10);
>
>   static deferred_change_t deferred_change_list = NULL;
>
> @@ -712,8 +792,7 @@ get_group_info (rtx base)
>       {
>         if (!clear_alias_group)
>   	{
> -	  clear_alias_group = gi =
> -	    (group_info_t) pool_alloc (rtx_group_info_pool);
> +	  clear_alias_group = gi = new group_info;
>   	  memset (gi, 0, sizeof (struct group_info));
>   	  gi->id = rtx_group_next_id++;
>   	  gi->store1_n = BITMAP_ALLOC (&dse_bitmap_obstack);
> @@ -735,7 +814,7 @@ get_group_info (rtx base)
>
>     if (gi == NULL)
>       {
> -      *slot = gi = (group_info_t) pool_alloc (rtx_group_info_pool);
> +      *slot = gi = new group_info;
>         gi->rtx_base = base;
>         gi->id = rtx_group_next_id++;
>         gi->base_mem = gen_rtx_MEM (BLKmode, base);
> @@ -776,24 +855,6 @@ dse_step0 (void)
>     scratch = BITMAP_ALLOC (&reg_obstack);
>     kill_on_calls = BITMAP_ALLOC (&dse_bitmap_obstack);
>
> -  rtx_store_info_pool
> -    = create_alloc_pool ("rtx_store_info_pool",
> -			 sizeof (struct store_info), 100);
> -  read_info_pool
> -    = create_alloc_pool ("read_info_pool",
> -			 sizeof (struct read_info), 100);
> -  insn_info_pool
> -    = create_alloc_pool ("insn_info_pool",
> -			 sizeof (struct insn_info), 100);
> -  bb_info_pool
> -    = create_alloc_pool ("bb_info_pool",
> -			 sizeof (struct dse_bb_info), 100);
> -  rtx_group_info_pool
> -    = create_alloc_pool ("rtx_group_info_pool",
> -			 sizeof (struct group_info), 100);
> -  deferred_change_pool
> -    = create_alloc_pool ("deferred_change_pool",
> -			 sizeof (struct deferred_change), 10);
>
>     rtx_group_table = new hash_table<invariant_group_base_hasher> (11);
>
> @@ -829,9 +890,9 @@ free_store_info (insn_info_t insn_info)
>         if (store_info->is_large)
>   	BITMAP_FREE (store_info->positions_needed.large.bmap);
>         if (store_info->cse_base)
> -	pool_free (cse_store_info_pool, store_info);
> +	cse_store_info_pool.remove (store_info);
>         else
> -	pool_free (rtx_store_info_pool, store_info);
> +	rtx_store_info_pool.remove (store_info);
>         store_info = next;
>       }
>
> @@ -948,7 +1009,7 @@ check_for_inc_dec_1 (insn_info_t insn_info)
>   bool
>   check_for_inc_dec (rtx_insn *insn)
>   {
> -  struct insn_info insn_info;
> +  insn_info_type insn_info;
>     rtx note;
>
>     insn_info.insn = insn;
> @@ -989,7 +1050,7 @@ delete_dead_store_insn (insn_info_t insn_info)
>     while (read_info)
>       {
>         read_info_t next = read_info->next;
> -      pool_free (read_info_pool, read_info);
> +      delete read_info;
>         read_info = next;
>       }
>     insn_info->read_rec = NULL;
> @@ -1113,7 +1174,7 @@ free_read_records (bb_info_t bb_info)
>         read_info_t next = (*ptr)->next;
>         if ((*ptr)->alias_set == 0)
>           {
> -          pool_free (read_info_pool, *ptr);
> +	  delete *ptr;
>             *ptr = next;
>           }
>         else
> @@ -1167,7 +1228,7 @@ const_or_frame_p (rtx x)
>   	return true;
>         return false;
>       }
> -
> +
>     return false;
>   }
>
> @@ -1488,7 +1549,7 @@ record_store (rtx body, bb_info_t bb_info)
>         if (clear_alias_group->offset_map_size_p < spill_alias_set)
>   	clear_alias_group->offset_map_size_p = spill_alias_set;
>
> -      store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
> +      store_info = rtx_store_info_pool.allocate ();
>
>         if (dump_file && (dump_flags & TDF_DETAILS))
>   	fprintf (dump_file, " processing spill store %d(%s)\n",
> @@ -1503,7 +1564,7 @@ record_store (rtx body, bb_info_t bb_info)
>   	= rtx_group_vec[group_id];
>         tree expr = MEM_EXPR (mem);
>
> -      store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
> +      store_info = rtx_store_info_pool.allocate ();
>         set_usage_bits (group, offset, width, expr);
>
>         if (dump_file && (dump_flags & TDF_DETAILS))
> @@ -1516,7 +1577,7 @@ record_store (rtx body, bb_info_t bb_info)
>   	insn_info->stack_pointer_based = true;
>         insn_info->contains_cselib_groups = true;
>
> -      store_info = (store_info_t) pool_alloc (cse_store_info_pool);
> +      store_info = cse_store_info_pool.allocate ();
>         group_id = -1;
>
>         if (dump_file && (dump_flags & TDF_DETAILS))
> @@ -2060,8 +2121,7 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
>
>     if (validate_change (read_insn->insn, loc, read_reg, 0))
>       {
> -      deferred_change_t deferred_change =
> -	(deferred_change_t) pool_alloc (deferred_change_pool);
> +      deferred_change_t change = new deferred_change;
>
>         /* Insert this right before the store insn where it will be safe
>   	 from later insns that might change it before the read.  */
> @@ -2091,15 +2151,15 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
>   	 block we can put them back.  */
>
>         *loc = read_info->mem;
> -      deferred_change->next = deferred_change_list;
> -      deferred_change_list = deferred_change;
> -      deferred_change->loc = loc;
> -      deferred_change->reg = read_reg;
> +      change->next = deferred_change_list;
> +      deferred_change_list = change;
> +      change->loc = loc;
> +      change->reg = read_reg;
>
>         /* Get rid of the read_info, from the point of view of the
>   	 rest of dse, play like this read never happened.  */
>         read_insn->read_rec = read_info->next;
> -      pool_free (read_info_pool, read_info);
> +      delete read_info;
>         if (dump_file && (dump_flags & TDF_DETAILS))
>   	{
>   	  fprintf (dump_file, " -- replaced the loaded MEM with ");
> @@ -2165,7 +2225,7 @@ check_mem_read_rtx (rtx *loc, bb_info_t bb_info)
>     else
>       width = GET_MODE_SIZE (GET_MODE (mem));
>
> -  read_info = (read_info_t) pool_alloc (read_info_pool);
> +  read_info = new read_info_type;
>     read_info->group_id = group_id;
>     read_info->mem = mem;
>     read_info->alias_set = spill_alias_set;
> @@ -2481,9 +2541,9 @@ static void
>   scan_insn (bb_info_t bb_info, rtx_insn *insn)
>   {
>     rtx body;
> -  insn_info_t insn_info = (insn_info_t) pool_alloc (insn_info_pool);
> +  insn_info_type *insn_info = new insn_info_type;
>     int mems_found = 0;
> -  memset (insn_info, 0, sizeof (struct insn_info));
> +  memset (insn_info, 0, sizeof (struct insn_info_type));
>
>     if (dump_file && (dump_flags & TDF_DETAILS))
>       fprintf (dump_file, "\n**scanning insn=%d\n",
> @@ -2740,9 +2800,9 @@ dse_step1 (void)
>     FOR_ALL_BB_FN (bb, cfun)
>       {
>         insn_info_t ptr;
> -      bb_info_t bb_info = (bb_info_t) pool_alloc (bb_info_pool);
> +      bb_info_t bb_info = new dse_bb_info_type;
>
> -      memset (bb_info, 0, sizeof (struct dse_bb_info));
> +      memset (bb_info, 0, sizeof (dse_bb_info_type));
>         bitmap_set_bit (all_blocks, bb->index);
>         bb_info->regs_live = regs_live;
>
> @@ -2756,9 +2816,6 @@ dse_step1 (void)
>   	{
>   	  rtx_insn *insn;
>
> -	  cse_store_info_pool
> -	    = create_alloc_pool ("cse_store_info_pool",
> -				 sizeof (struct store_info), 100);
>   	  active_local_stores = NULL;
>   	  active_local_stores_len = 0;
>   	  cselib_clear_table ();
> @@ -2820,7 +2877,7 @@ dse_step1 (void)
>   	      /* There is no reason to validate this change.  That was
>   		 done earlier.  */
>   	      *deferred_change_list->loc = deferred_change_list->reg;
> -	      pool_free (deferred_change_pool, deferred_change_list);
> +	      delete deferred_change_list;
>   	      deferred_change_list = next;
>   	    }
>
> @@ -2866,7 +2923,7 @@ dse_step1 (void)
>   	      ptr = ptr->prev_insn;
>   	    }
>
> -	  free_alloc_pool (cse_store_info_pool);
> +	  cse_store_info_pool.release ();
>   	}
>         bb_info->regs_live = NULL;
>       }
> @@ -3704,12 +3761,12 @@ dse_step7 (void)
>     BITMAP_FREE (all_blocks);
>     BITMAP_FREE (scratch);
>
> -  free_alloc_pool (rtx_store_info_pool);
> -  free_alloc_pool (read_info_pool);
> -  free_alloc_pool (insn_info_pool);
> -  free_alloc_pool (bb_info_pool);
> -  free_alloc_pool (rtx_group_info_pool);
> -  free_alloc_pool (deferred_change_pool);
> +  rtx_store_info_pool.release ();
> +  read_info_type::pool.release ();
> +  insn_info_type::pool.release ();
> +  dse_bb_info_type::pool.release ();
> +  group_info::pool.release ();
> +  deferred_change::pool.release ();
>   }
>
>
>

v2

[-- Attachment #2: 0014-Change-use-to-type-based-pool-allocator-in-dse.c.patch --]
[-- Type: text/x-patch, Size: 14651 bytes --]

From fdce35bba20e7a6f6d97197b448a68d2d9a61ec6 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:49 +0200
Subject: [PATCH 14/32] Change use to type-based pool allocator in dse.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* dse.c (get_group_info):Use new type-based pool allocator.
	(dse_step0) Likewise.
	(free_store_info) Likewise.
	(delete_dead_store_insn) Likewise.
	(free_read_records) Likewise.
	(record_store) Likewise.
	(replace_read) Likewise.
	(check_mem_read_rtx) Likewise.
	(scan_insn) Likewise.
	(dse_step1) Likewise.
	(dse_step7) Likewise.
---
 gcc/dse.c | 201 ++++++++++++++++++++++++++++++++++++++++----------------------
 1 file changed, 129 insertions(+), 72 deletions(-)

diff --git a/gcc/dse.c b/gcc/dse.c
index b3b38d5..fae63af 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -249,7 +249,7 @@ static struct obstack dse_obstack;
 /* Scratch bitmap for cselib's cselib_expand_value_rtx.  */
 static bitmap scratch = NULL;
 
-struct insn_info;
+struct insn_info_type;
 
 /* This structure holds information about a candidate store.  */
 struct store_info
@@ -316,7 +316,7 @@ struct store_info
   /* Set if this store stores the same constant value as REDUNDANT_REASON
      insn stored.  These aren't eliminated early, because doing that
      might prevent the earlier larger store to be eliminated.  */
-  struct insn_info *redundant_reason;
+  struct insn_info_type *redundant_reason;
 };
 
 /* Return a bitmask with the first N low bits set.  */
@@ -329,12 +329,15 @@ lowpart_bitmask (int n)
 }
 
 typedef struct store_info *store_info_t;
-static alloc_pool cse_store_info_pool;
-static alloc_pool rtx_store_info_pool;
+static pool_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
+						       100);
+
+static pool_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
+						       100);
 
 /* This structure holds information about a load.  These are only
    built for rtx bases.  */
-struct read_info
+struct read_info_type
 {
   /* The id of the mem group of the base address.  */
   int group_id;
@@ -351,15 +354,30 @@ struct read_info
   rtx mem;
 
   /* The next read_info for this insn.  */
-  struct read_info *next;
+  struct read_info_type *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((read_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<read_info_type> pool;
 };
-typedef struct read_info *read_info_t;
-static alloc_pool read_info_pool;
+typedef struct read_info_type *read_info_t;
 
+pool_allocator<read_info_type> read_info_type::pool ("read_info_pool", 100);
 
 /* One of these records is created for each insn.  */
 
-struct insn_info
+struct insn_info_type
 {
   /* Set true if the insn contains a store but the insn itself cannot
      be deleted.  This is set if the insn is a parallel and there is
@@ -433,27 +451,41 @@ struct insn_info
   regset fixed_regs_live;
 
   /* The prev insn in the basic block.  */
-  struct insn_info * prev_insn;
+  struct insn_info_type * prev_insn;
 
   /* The linked list of insns that are in consideration for removal in
      the forwards pass through the basic block.  This pointer may be
      trash as it is not cleared when a wild read occurs.  The only
      time it is guaranteed to be correct is when the traversal starts
      at active_local_stores.  */
-  struct insn_info * next_local_store;
+  struct insn_info_type * next_local_store;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((insn_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<insn_info_type> pool;
 };
+typedef struct insn_info_type *insn_info_t;
 
-typedef struct insn_info *insn_info_t;
-static alloc_pool insn_info_pool;
+pool_allocator<insn_info_type> insn_info_type::pool ("insn_info_pool", 100);
 
 /* The linked list of stores that are under consideration in this
    basic block.  */
 static insn_info_t active_local_stores;
 static int active_local_stores_len;
 
-struct dse_bb_info
+struct dse_bb_info_type
 {
-
   /* Pointer to the insn info for the last insn in the block.  These
      are linked so this is how all of the insns are reached.  During
      scanning this is the current insn being scanned.  */
@@ -507,10 +539,25 @@ struct dse_bb_info
      to assure that shift and/or add sequences that are inserted do not
      accidentally clobber live hard regs.  */
   bitmap regs_live;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((dse_bb_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<dse_bb_info_type> pool;
 };
 
-typedef struct dse_bb_info *bb_info_t;
-static alloc_pool bb_info_pool;
+typedef struct dse_bb_info_type *bb_info_t;
+pool_allocator<dse_bb_info_type> dse_bb_info_type::pool ("bb_info_pool", 100);
 
 /* Table to hold all bb_infos.  */
 static bb_info_t *bb_table;
@@ -578,10 +625,26 @@ struct group_info
      care about.  */
   int *offset_map_n, *offset_map_p;
   int offset_map_size_n, offset_map_size_p;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((group_info *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<group_info> pool;
 };
 typedef struct group_info *group_info_t;
 typedef const struct group_info *const_group_info_t;
-static alloc_pool rtx_group_info_pool;
+
+pool_allocator<group_info> group_info::pool ("rtx_group_info_pool", 100);
 
 /* Index into the rtx_group_vec.  */
 static int rtx_group_next_id;
@@ -602,10 +665,27 @@ struct deferred_change
   rtx reg;
 
   struct deferred_change *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((deferred_change *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<deferred_change> pool;
 };
 
 typedef struct deferred_change *deferred_change_t;
-static alloc_pool deferred_change_pool;
+
+pool_allocator<deferred_change> deferred_change::pool
+  ("deferred_change_pool", 10);
 
 static deferred_change_t deferred_change_list = NULL;
 
@@ -712,8 +792,7 @@ get_group_info (rtx base)
     {
       if (!clear_alias_group)
 	{
-	  clear_alias_group = gi =
-	    (group_info_t) pool_alloc (rtx_group_info_pool);
+	  clear_alias_group = gi = new group_info;
 	  memset (gi, 0, sizeof (struct group_info));
 	  gi->id = rtx_group_next_id++;
 	  gi->store1_n = BITMAP_ALLOC (&dse_bitmap_obstack);
@@ -735,7 +814,7 @@ get_group_info (rtx base)
 
   if (gi == NULL)
     {
-      *slot = gi = (group_info_t) pool_alloc (rtx_group_info_pool);
+      *slot = gi = new group_info;
       gi->rtx_base = base;
       gi->id = rtx_group_next_id++;
       gi->base_mem = gen_rtx_MEM (BLKmode, base);
@@ -776,24 +855,6 @@ dse_step0 (void)
   scratch = BITMAP_ALLOC (&reg_obstack);
   kill_on_calls = BITMAP_ALLOC (&dse_bitmap_obstack);
 
-  rtx_store_info_pool
-    = create_alloc_pool ("rtx_store_info_pool",
-			 sizeof (struct store_info), 100);
-  read_info_pool
-    = create_alloc_pool ("read_info_pool",
-			 sizeof (struct read_info), 100);
-  insn_info_pool
-    = create_alloc_pool ("insn_info_pool",
-			 sizeof (struct insn_info), 100);
-  bb_info_pool
-    = create_alloc_pool ("bb_info_pool",
-			 sizeof (struct dse_bb_info), 100);
-  rtx_group_info_pool
-    = create_alloc_pool ("rtx_group_info_pool",
-			 sizeof (struct group_info), 100);
-  deferred_change_pool
-    = create_alloc_pool ("deferred_change_pool",
-			 sizeof (struct deferred_change), 10);
 
   rtx_group_table = new hash_table<invariant_group_base_hasher> (11);
 
@@ -829,9 +890,9 @@ free_store_info (insn_info_t insn_info)
       if (store_info->is_large)
 	BITMAP_FREE (store_info->positions_needed.large.bmap);
       if (store_info->cse_base)
-	pool_free (cse_store_info_pool, store_info);
+	cse_store_info_pool.remove (store_info);
       else
-	pool_free (rtx_store_info_pool, store_info);
+	rtx_store_info_pool.remove (store_info);
       store_info = next;
     }
 
@@ -948,7 +1009,7 @@ check_for_inc_dec_1 (insn_info_t insn_info)
 bool
 check_for_inc_dec (rtx_insn *insn)
 {
-  struct insn_info insn_info;
+  insn_info_type insn_info;
   rtx note;
 
   insn_info.insn = insn;
@@ -989,7 +1050,7 @@ delete_dead_store_insn (insn_info_t insn_info)
   while (read_info)
     {
       read_info_t next = read_info->next;
-      pool_free (read_info_pool, read_info);
+      delete read_info;
       read_info = next;
     }
   insn_info->read_rec = NULL;
@@ -1113,7 +1174,7 @@ free_read_records (bb_info_t bb_info)
       read_info_t next = (*ptr)->next;
       if ((*ptr)->alias_set == 0)
         {
-          pool_free (read_info_pool, *ptr);
+	  delete *ptr;
           *ptr = next;
         }
       else
@@ -1167,7 +1228,7 @@ const_or_frame_p (rtx x)
 	return true;
       return false;
     }
-  
+
   return false;
 }
 
@@ -1488,7 +1549,7 @@ record_store (rtx body, bb_info_t bb_info)
       if (clear_alias_group->offset_map_size_p < spill_alias_set)
 	clear_alias_group->offset_map_size_p = spill_alias_set;
 
-      store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
+      store_info = rtx_store_info_pool.allocate ();
 
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file, " processing spill store %d(%s)\n",
@@ -1503,7 +1564,7 @@ record_store (rtx body, bb_info_t bb_info)
 	= rtx_group_vec[group_id];
       tree expr = MEM_EXPR (mem);
 
-      store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
+      store_info = rtx_store_info_pool.allocate ();
       set_usage_bits (group, offset, width, expr);
 
       if (dump_file && (dump_flags & TDF_DETAILS))
@@ -1516,7 +1577,7 @@ record_store (rtx body, bb_info_t bb_info)
 	insn_info->stack_pointer_based = true;
       insn_info->contains_cselib_groups = true;
 
-      store_info = (store_info_t) pool_alloc (cse_store_info_pool);
+      store_info = cse_store_info_pool.allocate ();
       group_id = -1;
 
       if (dump_file && (dump_flags & TDF_DETAILS))
@@ -2060,8 +2121,7 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
 
   if (validate_change (read_insn->insn, loc, read_reg, 0))
     {
-      deferred_change_t deferred_change =
-	(deferred_change_t) pool_alloc (deferred_change_pool);
+      deferred_change_t change = new deferred_change;
 
       /* Insert this right before the store insn where it will be safe
 	 from later insns that might change it before the read.  */
@@ -2091,15 +2151,15 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
 	 block we can put them back.  */
 
       *loc = read_info->mem;
-      deferred_change->next = deferred_change_list;
-      deferred_change_list = deferred_change;
-      deferred_change->loc = loc;
-      deferred_change->reg = read_reg;
+      change->next = deferred_change_list;
+      deferred_change_list = change;
+      change->loc = loc;
+      change->reg = read_reg;
 
       /* Get rid of the read_info, from the point of view of the
 	 rest of dse, play like this read never happened.  */
       read_insn->read_rec = read_info->next;
-      pool_free (read_info_pool, read_info);
+      delete read_info;
       if (dump_file && (dump_flags & TDF_DETAILS))
 	{
 	  fprintf (dump_file, " -- replaced the loaded MEM with ");
@@ -2165,7 +2225,7 @@ check_mem_read_rtx (rtx *loc, bb_info_t bb_info)
   else
     width = GET_MODE_SIZE (GET_MODE (mem));
 
-  read_info = (read_info_t) pool_alloc (read_info_pool);
+  read_info = new read_info_type;
   read_info->group_id = group_id;
   read_info->mem = mem;
   read_info->alias_set = spill_alias_set;
@@ -2481,9 +2541,9 @@ static void
 scan_insn (bb_info_t bb_info, rtx_insn *insn)
 {
   rtx body;
-  insn_info_t insn_info = (insn_info_t) pool_alloc (insn_info_pool);
+  insn_info_type *insn_info = new insn_info_type;
   int mems_found = 0;
-  memset (insn_info, 0, sizeof (struct insn_info));
+  memset (insn_info, 0, sizeof (struct insn_info_type));
 
   if (dump_file && (dump_flags & TDF_DETAILS))
     fprintf (dump_file, "\n**scanning insn=%d\n",
@@ -2740,9 +2800,9 @@ dse_step1 (void)
   FOR_ALL_BB_FN (bb, cfun)
     {
       insn_info_t ptr;
-      bb_info_t bb_info = (bb_info_t) pool_alloc (bb_info_pool);
+      bb_info_t bb_info = new dse_bb_info_type;
 
-      memset (bb_info, 0, sizeof (struct dse_bb_info));
+      memset (bb_info, 0, sizeof (dse_bb_info_type));
       bitmap_set_bit (all_blocks, bb->index);
       bb_info->regs_live = regs_live;
 
@@ -2756,9 +2816,6 @@ dse_step1 (void)
 	{
 	  rtx_insn *insn;
 
-	  cse_store_info_pool
-	    = create_alloc_pool ("cse_store_info_pool",
-				 sizeof (struct store_info), 100);
 	  active_local_stores = NULL;
 	  active_local_stores_len = 0;
 	  cselib_clear_table ();
@@ -2820,7 +2877,7 @@ dse_step1 (void)
 	      /* There is no reason to validate this change.  That was
 		 done earlier.  */
 	      *deferred_change_list->loc = deferred_change_list->reg;
-	      pool_free (deferred_change_pool, deferred_change_list);
+	      delete deferred_change_list;
 	      deferred_change_list = next;
 	    }
 
@@ -2866,7 +2923,7 @@ dse_step1 (void)
 	      ptr = ptr->prev_insn;
 	    }
 
-	  free_alloc_pool (cse_store_info_pool);
+	  cse_store_info_pool.release ();
 	}
       bb_info->regs_live = NULL;
     }
@@ -3704,12 +3761,12 @@ dse_step7 (void)
   BITMAP_FREE (all_blocks);
   BITMAP_FREE (scratch);
 
-  free_alloc_pool (rtx_store_info_pool);
-  free_alloc_pool (read_info_pool);
-  free_alloc_pool (insn_info_pool);
-  free_alloc_pool (bb_info_pool);
-  free_alloc_pool (rtx_group_info_pool);
-  free_alloc_pool (deferred_change_pool);
+  rtx_store_info_pool.release ();
+  read_info_type::pool.release ();
+  insn_info_type::pool.release ();
+  dse_bb_info_type::pool.release ();
+  group_info::pool.release ();
+  deferred_change::pool.release ();
 }
 
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 14/35] Change use to type-based pool allocator in df-scan.c.
  2015-05-27 14:19 ` [PATCH 14/35] Change use to type-based pool allocator in df-scan.c mliska
@ 2015-05-29 13:38   ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:38 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 9870 bytes --]

On 05/27/2015 03:56 PM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* df-scan.c (struct df_scan_problem_data):Use new type-based pool allocator.
> 	(df_scan_free_internal) Likewise.
> 	(df_scan_alloc) Likewise.
> 	(df_grow_reg_info) Likewise.
> 	(df_free_ref) Likewise.
> 	(df_insn_create_insn_record) Likewise.
> 	(df_mw_hardreg_chain_delete) Likewise.
> 	(df_insn_info_delete) Likewise.
> 	(df_free_collection_rec) Likewise.
> 	(df_mw_hardreg_chain_delete_eq_uses) Likewise.
> 	(df_sort_and_compress_mws) Likewise.
> 	(df_ref_create_structure) Likewise.
> 	(df_ref_record) Likewise.
> ---
>   gcc/df-scan.c | 94 +++++++++++++++++++++++++++++------------------------------
>   1 file changed, 46 insertions(+), 48 deletions(-)
>
> diff --git a/gcc/df-scan.c b/gcc/df-scan.c
> index e32eaf5..4646bcf 100644
> --- a/gcc/df-scan.c
> +++ b/gcc/df-scan.c
> @@ -159,15 +159,18 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
>      it gets run.  It also has no need for the iterative solver.
>   ----------------------------------------------------------------------------*/
>
> +#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
> +
>   /* Problem data for the scanning dataflow function.  */
>   struct df_scan_problem_data
>   {
> -  alloc_pool ref_base_pool;
> -  alloc_pool ref_artificial_pool;
> -  alloc_pool ref_regular_pool;
> -  alloc_pool insn_pool;
> -  alloc_pool reg_pool;
> -  alloc_pool mw_reg_pool;
> +  pool_allocator<df_base_ref> *ref_base_pool;
> +  pool_allocator<df_artificial_ref> *ref_artificial_pool;
> +  pool_allocator<df_regular_ref> *ref_regular_pool;
> +  pool_allocator<df_insn_info> *insn_pool;
> +  pool_allocator<df_reg_info> *reg_pool;
> +  pool_allocator<df_mw_hardreg> *mw_reg_pool;
> +
>     bitmap_obstack reg_bitmaps;
>     bitmap_obstack insn_bitmaps;
>   };
> @@ -218,12 +221,12 @@ df_scan_free_internal (void)
>     bitmap_clear (&df->insns_to_rescan);
>     bitmap_clear (&df->insns_to_notes_rescan);
>
> -  free_alloc_pool (problem_data->ref_base_pool);
> -  free_alloc_pool (problem_data->ref_artificial_pool);
> -  free_alloc_pool (problem_data->ref_regular_pool);
> -  free_alloc_pool (problem_data->insn_pool);
> -  free_alloc_pool (problem_data->reg_pool);
> -  free_alloc_pool (problem_data->mw_reg_pool);
> +  delete problem_data->ref_base_pool;
> +  delete problem_data->ref_artificial_pool;
> +  delete problem_data->ref_regular_pool;
> +  delete problem_data->insn_pool;
> +  delete problem_data->reg_pool;
> +  delete problem_data->mw_reg_pool;
>     bitmap_obstack_release (&problem_data->reg_bitmaps);
>     bitmap_obstack_release (&problem_data->insn_bitmaps);
>     free (df_scan->problem_data);
> @@ -264,7 +267,6 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
>   {
>     struct df_scan_problem_data *problem_data;
>     unsigned int insn_num = get_max_uid () + 1;
> -  unsigned int block_size = 512;
>     basic_block bb;
>
>     /* Given the number of pools, this is really faster than tearing
> @@ -276,24 +278,18 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
>     df_scan->problem_data = problem_data;
>     df_scan->computed = true;
>
> -  problem_data->ref_base_pool
> -    = create_alloc_pool ("df_scan ref base",
> -			 sizeof (struct df_base_ref), block_size);
> -  problem_data->ref_artificial_pool
> -    = create_alloc_pool ("df_scan ref artificial",
> -			 sizeof (struct df_artificial_ref), block_size);
> -  problem_data->ref_regular_pool
> -    = create_alloc_pool ("df_scan ref regular",
> -			 sizeof (struct df_regular_ref), block_size);
> -  problem_data->insn_pool
> -    = create_alloc_pool ("df_scan insn",
> -			 sizeof (struct df_insn_info), block_size);
> -  problem_data->reg_pool
> -    = create_alloc_pool ("df_scan reg",
> -			 sizeof (struct df_reg_info), block_size);
> -  problem_data->mw_reg_pool
> -    = create_alloc_pool ("df_scan mw_reg",
> -			 sizeof (struct df_mw_hardreg), block_size / 16);
> +  problem_data->ref_base_pool = new pool_allocator<df_base_ref>
> +    ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
> +    ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
> +    ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->insn_pool = new pool_allocator<df_insn_info>
> +    ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->reg_pool = new pool_allocator<df_reg_info>
> +    ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
> +    ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
>
>     bitmap_obstack_initialize (&problem_data->reg_bitmaps);
>     bitmap_obstack_initialize (&problem_data->insn_bitmaps);
> @@ -519,13 +515,14 @@ df_grow_reg_info (void)
>       {
>         struct df_reg_info *reg_info;
>
> -      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
> +      // TODO
> +      reg_info = problem_data->reg_pool->allocate ();
>         memset (reg_info, 0, sizeof (struct df_reg_info));
>         df->def_regs[i] = reg_info;
> -      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
> +      reg_info = problem_data->reg_pool->allocate ();
>         memset (reg_info, 0, sizeof (struct df_reg_info));
>         df->use_regs[i] = reg_info;
> -      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
> +      reg_info = problem_data->reg_pool->allocate ();
>         memset (reg_info, 0, sizeof (struct df_reg_info));
>         df->eq_use_regs[i] = reg_info;
>         df->def_info.begin[i] = 0;
> @@ -740,15 +737,17 @@ df_free_ref (df_ref ref)
>     switch (DF_REF_CLASS (ref))
>       {
>       case DF_REF_BASE:
> -      pool_free (problem_data->ref_base_pool, ref);
> +      problem_data->ref_base_pool->remove ((df_base_ref *) (ref));
>         break;
>
>       case DF_REF_ARTIFICIAL:
> -      pool_free (problem_data->ref_artificial_pool, ref);
> +      problem_data->ref_artificial_pool->remove
> +	((df_artificial_ref *) (ref));
>         break;
>
>       case DF_REF_REGULAR:
> -      pool_free (problem_data->ref_regular_pool, ref);
> +      problem_data->ref_regular_pool->remove
> +	((df_regular_ref *) (ref));
>         break;
>       }
>   }
> @@ -851,7 +850,7 @@ df_insn_create_insn_record (rtx_insn *insn)
>     insn_rec = DF_INSN_INFO_GET (insn);
>     if (!insn_rec)
>       {
> -      insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool);
> +      insn_rec = problem_data->insn_pool->allocate ();
>         DF_INSN_INFO_SET (insn, insn_rec);
>       }
>     memset (insn_rec, 0, sizeof (struct df_insn_info));
> @@ -899,7 +898,7 @@ df_mw_hardreg_chain_delete (struct df_mw_hardreg *hardregs)
>     for (; hardregs; hardregs = next)
>       {
>         next = DF_MWS_NEXT (hardregs);
> -      pool_free (problem_data->mw_reg_pool, hardregs);
> +      problem_data->mw_reg_pool->remove (hardregs);
>       }
>   }
>
> @@ -940,7 +939,7 @@ df_insn_info_delete (unsigned int uid)
>         df_ref_chain_delete (insn_info->uses);
>         df_ref_chain_delete (insn_info->eq_uses);
>
> -      pool_free (problem_data->insn_pool, insn_info);
> +      problem_data->insn_pool->remove (insn_info);
>         DF_INSN_UID_SET (uid, NULL);
>       }
>   }
> @@ -1024,7 +1023,7 @@ df_free_collection_rec (struct df_collection_rec *collection_rec)
>     FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
>       df_free_ref (ref);
>     FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
> -    pool_free (problem_data->mw_reg_pool, mw);
> +    problem_data->mw_reg_pool->remove (mw);
>
>     collection_rec->def_vec.release ();
>     collection_rec->use_vec.release ();
> @@ -1949,7 +1948,7 @@ df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
>         if (mw->flags & DF_REF_IN_NOTE)
>   	{
>   	  *mw_ptr = DF_MWS_NEXT (mw);
> -	  pool_free (problem_data->mw_reg_pool, mw);
> +	  problem_data->mw_reg_pool->remove (mw);
>   	}
>         else
>   	mw_ptr = &DF_MWS_NEXT (mw);
> @@ -2296,8 +2295,7 @@ df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
>         while (i + dist + 1 < count
>   	     && df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
>   	{
> -	  pool_free (problem_data->mw_reg_pool,
> -		     (*mw_vec)[i + dist + 1]);
> +	  problem_data->mw_reg_pool->remove ((*mw_vec)[i + dist + 1]);
>   	  dist++;
>   	}
>         /* Copy it down to the next position.  */
> @@ -2525,18 +2523,18 @@ df_ref_create_structure (enum df_ref_class cl,
>     switch (cl)
>       {
>       case DF_REF_BASE:
> -      this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
> +      this_ref = (df_ref) (problem_data->ref_base_pool->allocate ());
>         gcc_checking_assert (loc == NULL);
>         break;
>
>       case DF_REF_ARTIFICIAL:
> -      this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
> +      this_ref = (df_ref) (problem_data->ref_artificial_pool->allocate ());
>         this_ref->artificial_ref.bb = bb;
>         gcc_checking_assert (loc == NULL);
>         break;
>
>       case DF_REF_REGULAR:
> -      this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
> +      this_ref = (df_ref) (problem_data->ref_regular_pool->allocate ());
>         this_ref->regular_ref.loc = loc;
>         gcc_checking_assert (loc);
>         break;
> @@ -2638,7 +2636,7 @@ df_ref_record (enum df_ref_class cl,
>   	    ref_flags |= DF_REF_PARTIAL;
>   	  ref_flags |= DF_REF_MW_HARDREG;
>
> -	  hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
> +	  hardreg = problem_data->mw_reg_pool->allocate ();
>   	  hardreg->type = ref_type;
>   	  hardreg->flags = ref_flags;
>   	  hardreg->mw_reg = reg;
>

v2.

[-- Attachment #2: 0013-Change-use-to-type-based-pool-allocator-in-df-scan.c.patch --]
[-- Type: text/x-patch, Size: 9512 bytes --]

From 2168bcd7c3f63a06372d4b2de3959397cf9105af Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:48 +0200
Subject: [PATCH 13/32] Change use to type-based pool allocator in df-scan.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* df-scan.c (struct df_scan_problem_data):Use new type-based pool allocator.
	(df_scan_free_internal) Likewise.
	(df_scan_alloc) Likewise.
	(df_grow_reg_info) Likewise.
	(df_free_ref) Likewise.
	(df_insn_create_insn_record) Likewise.
	(df_mw_hardreg_chain_delete) Likewise.
	(df_insn_info_delete) Likewise.
	(df_free_collection_rec) Likewise.
	(df_mw_hardreg_chain_delete_eq_uses) Likewise.
	(df_sort_and_compress_mws) Likewise.
	(df_ref_create_structure) Likewise.
	(df_ref_record) Likewise.
---
 gcc/df-scan.c | 94 +++++++++++++++++++++++++++++------------------------------
 1 file changed, 46 insertions(+), 48 deletions(-)

diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index e32eaf5..4646bcf 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -159,15 +159,18 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
    it gets run.  It also has no need for the iterative solver.
 ----------------------------------------------------------------------------*/
 
+#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
+
 /* Problem data for the scanning dataflow function.  */
 struct df_scan_problem_data
 {
-  alloc_pool ref_base_pool;
-  alloc_pool ref_artificial_pool;
-  alloc_pool ref_regular_pool;
-  alloc_pool insn_pool;
-  alloc_pool reg_pool;
-  alloc_pool mw_reg_pool;
+  pool_allocator<df_base_ref> *ref_base_pool;
+  pool_allocator<df_artificial_ref> *ref_artificial_pool;
+  pool_allocator<df_regular_ref> *ref_regular_pool;
+  pool_allocator<df_insn_info> *insn_pool;
+  pool_allocator<df_reg_info> *reg_pool;
+  pool_allocator<df_mw_hardreg> *mw_reg_pool;
+
   bitmap_obstack reg_bitmaps;
   bitmap_obstack insn_bitmaps;
 };
@@ -218,12 +221,12 @@ df_scan_free_internal (void)
   bitmap_clear (&df->insns_to_rescan);
   bitmap_clear (&df->insns_to_notes_rescan);
 
-  free_alloc_pool (problem_data->ref_base_pool);
-  free_alloc_pool (problem_data->ref_artificial_pool);
-  free_alloc_pool (problem_data->ref_regular_pool);
-  free_alloc_pool (problem_data->insn_pool);
-  free_alloc_pool (problem_data->reg_pool);
-  free_alloc_pool (problem_data->mw_reg_pool);
+  delete problem_data->ref_base_pool;
+  delete problem_data->ref_artificial_pool;
+  delete problem_data->ref_regular_pool;
+  delete problem_data->insn_pool;
+  delete problem_data->reg_pool;
+  delete problem_data->mw_reg_pool;
   bitmap_obstack_release (&problem_data->reg_bitmaps);
   bitmap_obstack_release (&problem_data->insn_bitmaps);
   free (df_scan->problem_data);
@@ -264,7 +267,6 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
 {
   struct df_scan_problem_data *problem_data;
   unsigned int insn_num = get_max_uid () + 1;
-  unsigned int block_size = 512;
   basic_block bb;
 
   /* Given the number of pools, this is really faster than tearing
@@ -276,24 +278,18 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
   df_scan->problem_data = problem_data;
   df_scan->computed = true;
 
-  problem_data->ref_base_pool
-    = create_alloc_pool ("df_scan ref base",
-			 sizeof (struct df_base_ref), block_size);
-  problem_data->ref_artificial_pool
-    = create_alloc_pool ("df_scan ref artificial",
-			 sizeof (struct df_artificial_ref), block_size);
-  problem_data->ref_regular_pool
-    = create_alloc_pool ("df_scan ref regular",
-			 sizeof (struct df_regular_ref), block_size);
-  problem_data->insn_pool
-    = create_alloc_pool ("df_scan insn",
-			 sizeof (struct df_insn_info), block_size);
-  problem_data->reg_pool
-    = create_alloc_pool ("df_scan reg",
-			 sizeof (struct df_reg_info), block_size);
-  problem_data->mw_reg_pool
-    = create_alloc_pool ("df_scan mw_reg",
-			 sizeof (struct df_mw_hardreg), block_size / 16);
+  problem_data->ref_base_pool = new pool_allocator<df_base_ref>
+    ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
+    ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
+    ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->insn_pool = new pool_allocator<df_insn_info>
+    ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->reg_pool = new pool_allocator<df_reg_info>
+    ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
+    ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
 
   bitmap_obstack_initialize (&problem_data->reg_bitmaps);
   bitmap_obstack_initialize (&problem_data->insn_bitmaps);
@@ -519,13 +515,14 @@ df_grow_reg_info (void)
     {
       struct df_reg_info *reg_info;
 
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      // TODO
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->def_regs[i] = reg_info;
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->use_regs[i] = reg_info;
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->eq_use_regs[i] = reg_info;
       df->def_info.begin[i] = 0;
@@ -740,15 +737,17 @@ df_free_ref (df_ref ref)
   switch (DF_REF_CLASS (ref))
     {
     case DF_REF_BASE:
-      pool_free (problem_data->ref_base_pool, ref);
+      problem_data->ref_base_pool->remove ((df_base_ref *) (ref));
       break;
 
     case DF_REF_ARTIFICIAL:
-      pool_free (problem_data->ref_artificial_pool, ref);
+      problem_data->ref_artificial_pool->remove
+	((df_artificial_ref *) (ref));
       break;
 
     case DF_REF_REGULAR:
-      pool_free (problem_data->ref_regular_pool, ref);
+      problem_data->ref_regular_pool->remove
+	((df_regular_ref *) (ref));
       break;
     }
 }
@@ -851,7 +850,7 @@ df_insn_create_insn_record (rtx_insn *insn)
   insn_rec = DF_INSN_INFO_GET (insn);
   if (!insn_rec)
     {
-      insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool);
+      insn_rec = problem_data->insn_pool->allocate ();
       DF_INSN_INFO_SET (insn, insn_rec);
     }
   memset (insn_rec, 0, sizeof (struct df_insn_info));
@@ -899,7 +898,7 @@ df_mw_hardreg_chain_delete (struct df_mw_hardreg *hardregs)
   for (; hardregs; hardregs = next)
     {
       next = DF_MWS_NEXT (hardregs);
-      pool_free (problem_data->mw_reg_pool, hardregs);
+      problem_data->mw_reg_pool->remove (hardregs);
     }
 }
 
@@ -940,7 +939,7 @@ df_insn_info_delete (unsigned int uid)
       df_ref_chain_delete (insn_info->uses);
       df_ref_chain_delete (insn_info->eq_uses);
 
-      pool_free (problem_data->insn_pool, insn_info);
+      problem_data->insn_pool->remove (insn_info);
       DF_INSN_UID_SET (uid, NULL);
     }
 }
@@ -1024,7 +1023,7 @@ df_free_collection_rec (struct df_collection_rec *collection_rec)
   FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
     df_free_ref (ref);
   FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
-    pool_free (problem_data->mw_reg_pool, mw);
+    problem_data->mw_reg_pool->remove (mw);
 
   collection_rec->def_vec.release ();
   collection_rec->use_vec.release ();
@@ -1949,7 +1948,7 @@ df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
       if (mw->flags & DF_REF_IN_NOTE)
 	{
 	  *mw_ptr = DF_MWS_NEXT (mw);
-	  pool_free (problem_data->mw_reg_pool, mw);
+	  problem_data->mw_reg_pool->remove (mw);
 	}
       else
 	mw_ptr = &DF_MWS_NEXT (mw);
@@ -2296,8 +2295,7 @@ df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
       while (i + dist + 1 < count
 	     && df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
 	{
-	  pool_free (problem_data->mw_reg_pool,
-		     (*mw_vec)[i + dist + 1]);
+	  problem_data->mw_reg_pool->remove ((*mw_vec)[i + dist + 1]);
 	  dist++;
 	}
       /* Copy it down to the next position.  */
@@ -2525,18 +2523,18 @@ df_ref_create_structure (enum df_ref_class cl,
   switch (cl)
     {
     case DF_REF_BASE:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
+      this_ref = (df_ref) (problem_data->ref_base_pool->allocate ());
       gcc_checking_assert (loc == NULL);
       break;
 
     case DF_REF_ARTIFICIAL:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
+      this_ref = (df_ref) (problem_data->ref_artificial_pool->allocate ());
       this_ref->artificial_ref.bb = bb;
       gcc_checking_assert (loc == NULL);
       break;
 
     case DF_REF_REGULAR:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
+      this_ref = (df_ref) (problem_data->ref_regular_pool->allocate ());
       this_ref->regular_ref.loc = loc;
       gcc_checking_assert (loc);
       break;
@@ -2638,7 +2636,7 @@ df_ref_record (enum df_ref_class cl,
 	    ref_flags |= DF_REF_PARTIAL;
 	  ref_flags |= DF_REF_MW_HARDREG;
 
-	  hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
+	  hardreg = problem_data->mw_reg_pool->allocate ();
 	  hardreg->type = ref_type;
 	  hardreg->flags = ref_flags;
 	  hardreg->mw_reg = reg;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 16/35] Change use to type-based pool allocator in tree-sra.c.
  2015-05-27 18:11   ` Jeff Law
@ 2015-05-29 13:39     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:39 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 409 bytes --]

On 05/27/2015 08:02 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * tree-sra.c (sra_initialize): Use new type-based pool allocator.
>>     (sra_deinitialize) Likewise.
>>     (create_access_1) Likewise.
>>     (build_accesses_from_assign) Likewise.
>>     (create_artificial_child_access) Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0015-Change-use-to-type-based-pool-allocator-in-tree-sra..patch --]
[-- Type: text/x-patch, Size: 4444 bytes --]

From a9d3c0a7d4c596d6b5b550b14d0754eae748e2b4 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:49 +0200
Subject: [PATCH 15/32] Change use to type-based pool allocator in tree-sra.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-sra.c (sra_initialize): Use new type-based pool allocator.
	(sra_deinitialize) Likewise.
	(create_access_1) Likewise.
	(build_accesses_from_assign) Likewise.
	(create_artificial_child_access) Likewise.
---
 gcc/tree-sra.c | 48 +++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 37 insertions(+), 11 deletions(-)

diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 4b0d2a8..3c3c320 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -300,13 +300,28 @@ struct access
   /* Set when we discover that this pointer is not safe to dereference in the
      caller.  */
   unsigned grp_not_necessarilly_dereferenced : 1;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((access *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<access> pool;
 };
 
 typedef struct access *access_p;
 
 
 /* Alloc pool for allocating access structures.  */
-static alloc_pool access_pool;
+pool_allocator<struct access> access::pool ("SRA accesses", 16);
 
 /* A structure linking lhs and rhs accesses from an aggregate assignment.  They
    are used to propagate subaccesses from rhs to lhs as long as they don't
@@ -315,10 +330,25 @@ struct assign_link
 {
   struct access *lacc, *racc;
   struct assign_link *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((assign_link *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<assign_link> pool;
 };
 
 /* Alloc pool for allocating assign link structures.  */
-static alloc_pool link_pool;
+pool_allocator<assign_link> assign_link::pool ("SRA links", 16);
 
 /* Base (tree) -> Vector (vec<access_p> *) map.  */
 static hash_map<tree, auto_vec<access_p> > *base_access_vec;
@@ -690,8 +720,6 @@ sra_initialize (void)
   should_scalarize_away_bitmap = BITMAP_ALLOC (NULL);
   cannot_scalarize_away_bitmap = BITMAP_ALLOC (NULL);
   gcc_obstack_init (&name_obstack);
-  access_pool = create_alloc_pool ("SRA accesses", sizeof (struct access), 16);
-  link_pool = create_alloc_pool ("SRA links", sizeof (struct assign_link), 16);
   base_access_vec = new hash_map<tree, auto_vec<access_p> >;
   memset (&sra_stats, 0, sizeof (sra_stats));
   encountered_apply_args = false;
@@ -709,8 +737,8 @@ sra_deinitialize (void)
   candidates = NULL;
   BITMAP_FREE (should_scalarize_away_bitmap);
   BITMAP_FREE (cannot_scalarize_away_bitmap);
-  free_alloc_pool (access_pool);
-  free_alloc_pool (link_pool);
+  access::pool.release ();
+  assign_link::pool.release ();
   obstack_free (&name_obstack, NULL);
 
   delete base_access_vec;
@@ -862,9 +890,8 @@ mark_parm_dereference (tree base, HOST_WIDE_INT dist, gimple stmt)
 static struct access *
 create_access_1 (tree base, HOST_WIDE_INT offset, HOST_WIDE_INT size)
 {
-  struct access *access;
+  struct access *access = new struct access ();
 
-  access = (struct access *) pool_alloc (access_pool);
   memset (access, 0, sizeof (struct access));
   access->base = base;
   access->offset = offset;
@@ -1239,7 +1266,7 @@ build_accesses_from_assign (gimple stmt)
     {
       struct assign_link *link;
 
-      link = (struct assign_link *) pool_alloc (link_pool);
+      link = new assign_link;
       memset (link, 0, sizeof (struct assign_link));
 
       link->lacc = lacc;
@@ -2393,13 +2420,12 @@ static struct access *
 create_artificial_child_access (struct access *parent, struct access *model,
 				HOST_WIDE_INT new_offset)
 {
-  struct access *access;
   struct access **child;
   tree expr = parent->base;
 
   gcc_assert (!model->grp_unscalarizable_region);
 
-  access = (struct access *) pool_alloc (access_pool);
+  struct access *access = new struct access ();
   memset (access, 0, sizeof (struct access));
   if (!build_user_friendly_ref_for_offset (&expr, TREE_TYPE (expr), new_offset,
 					   model->type))
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 20/35] Change use to type-based pool allocator in ira-build.c.
  2015-05-27 18:15   ` Jeff Law
@ 2015-05-29 13:39     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:39 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 376 bytes --]

On 05/27/2015 08:12 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * ira-build.c (initiate_cost_vectors): Use new type-based pool allocator.
>>     (ira_allocate_cost_vector): Likewise.
>>     (ira_free_cost_vector): Likewise.
>>     (finish_cost_vectors): Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0019-Change-use-to-type-based-pool-allocator-in-ira-build.patch --]
[-- Type: text/x-patch, Size: 2301 bytes --]

From 3df359fb77e6e60341ef5f9dec2898708245f5ee Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:51 +0200
Subject: [PATCH 19/32] Change use to type-based pool allocator in ira-build.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-build.c (initiate_cost_vectors): Use new type-based pool allocator.
	(ira_allocate_cost_vector): Likewise.
	(ira_free_cost_vector): Likewise.
	(finish_cost_vectors): Likewise.
---
 gcc/ira-build.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 8b6b956..2de7d34 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -1633,7 +1633,7 @@ finish_copies (void)
 \f
 
 /* Pools for cost vectors.  It is defined only for allocno classes.  */
-static alloc_pool cost_vector_pool[N_REG_CLASSES];
+static pool_allocator<int> * cost_vector_pool[N_REG_CLASSES];
 
 /* The function initiates work with hard register cost vectors.  It
    creates allocation pool for each allocno class.  */
@@ -1646,10 +1646,9 @@ initiate_cost_vectors (void)
   for (i = 0; i < ira_allocno_classes_num; i++)
     {
       aclass = ira_allocno_classes[i];
-      cost_vector_pool[aclass]
-	= create_alloc_pool ("cost vectors",
-			     sizeof (int) * ira_class_hard_regs_num[aclass],
-			     100);
+      cost_vector_pool[aclass] = new pool_allocator<int>
+	("cost vectors", 100,
+	 sizeof (int) * (ira_class_hard_regs_num[aclass] - 1));
     }
 }
 
@@ -1657,7 +1656,7 @@ initiate_cost_vectors (void)
 int *
 ira_allocate_cost_vector (reg_class_t aclass)
 {
-  return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
+  return cost_vector_pool[(int) aclass]->allocate ();
 }
 
 /* Free a cost vector VEC for ACLASS.  */
@@ -1665,7 +1664,7 @@ void
 ira_free_cost_vector (int *vec, reg_class_t aclass)
 {
   ira_assert (vec != NULL);
-  pool_free (cost_vector_pool[(int) aclass], vec);
+  cost_vector_pool[(int) aclass]->remove (vec);
 }
 
 /* Finish work with hard register cost vectors.  Release allocation
@@ -1679,7 +1678,7 @@ finish_cost_vectors (void)
   for (i = 0; i < ira_allocno_classes_num; i++)
     {
       aclass = ira_allocno_classes[i];
-      free_alloc_pool (cost_vector_pool[aclass]);
+      delete cost_vector_pool[aclass];
     }
 }
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 18/35] Change use to type-based pool allocator in stmt.c.
  2015-05-27 18:13   ` Jeff Law
@ 2015-05-29 13:39     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:39 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 316 bytes --]

On 05/27/2015 08:09 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * stmt.c (add_case_node): Use new type-based pool allocator.
>>     (expand_case): Likewise.
>>     (expand_sjlj_dispatch_table): Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0017-Change-use-to-type-based-pool-allocator-in-stmt.c.patch --]
[-- Type: text/x-patch, Size: 3114 bytes --]

From 7d6b26873043b383aaa0b382f4edbcf43424ea54 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:50 +0200
Subject: [PATCH 17/32] Change use to type-based pool allocator in stmt.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* stmt.c (add_case_node): Use new type-based pool allocator.
	(expand_case): Likewise.
	(expand_sjlj_dispatch_table): Likewise.
---
 gcc/stmt.c | 16 +++++-----------
 1 file changed, 5 insertions(+), 11 deletions(-)

diff --git a/gcc/stmt.c b/gcc/stmt.c
index 303df72..a04f69b 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -748,7 +748,7 @@ do_jump_if_equal (machine_mode mode, rtx op0, rtx op1, rtx_code_label *label,
 
 static struct case_node *
 add_case_node (struct case_node *head, tree low, tree high,
-               tree label, int prob, alloc_pool case_node_pool)
+	       tree label, int prob, pool_allocator<case_node> &case_node_pool)
 {
   struct case_node *r;
 
@@ -756,7 +756,7 @@ add_case_node (struct case_node *head, tree low, tree high,
   gcc_checking_assert (high && (TREE_TYPE (low) == TREE_TYPE (high)));
 
   /* Add this label to the chain.  */
-  r = (struct case_node *) pool_alloc (case_node_pool);
+  r = case_node_pool.allocate ();
   r->low = low;
   r->high = high;
   r->code_label = label;
@@ -1160,7 +1160,7 @@ expand_case (gswitch *stmt)
   struct case_node *case_list = 0;
 
   /* A pool for case nodes.  */
-  alloc_pool case_node_pool;
+  pool_allocator<case_node> case_node_pool ("struct case_node pool", 100);
 
   /* An ERROR_MARK occurs for various reasons including invalid data type.
      ??? Can this still happen, with GIMPLE and all?  */
@@ -1171,9 +1171,6 @@ expand_case (gswitch *stmt)
      expressions being INTEGER_CST.  */
   gcc_assert (TREE_CODE (index_expr) != INTEGER_CST);
   
-  case_node_pool = create_alloc_pool ("struct case_node pool",
-				      sizeof (struct case_node),
-				      100);
 
   do_pending_stack_adjust ();
 
@@ -1273,7 +1270,6 @@ expand_case (gswitch *stmt)
   reorder_insns (NEXT_INSN (before_case), get_last_insn (), before_case);
 
   free_temp_slots ();
-  free_alloc_pool (case_node_pool);
 }
 
 /* Expand the dispatch to a short decrement chain if there are few cases
@@ -1340,9 +1336,8 @@ expand_sjlj_dispatch_table (rtx dispatch_index,
     {
       /* Similar to expand_case, but much simpler.  */
       struct case_node *case_list = 0;
-      alloc_pool case_node_pool = create_alloc_pool ("struct sjlj_case pool",
-						     sizeof (struct case_node),
-						     ncases);
+      pool_allocator<case_node> case_node_pool ("struct sjlj_case pool",
+						ncases);
       tree index_expr = make_tree (index_type, dispatch_index);
       tree minval = build_int_cst (index_type, 0);
       tree maxval = CASE_LOW (dispatch_table.last ());
@@ -1362,7 +1357,6 @@ expand_sjlj_dispatch_table (rtx dispatch_index,
 				minval, maxval, range,
                                 BLOCK_FOR_INSN (before_case));
       emit_label (default_label);
-      free_alloc_pool (case_node_pool);
     }
 
   /* Dispatching something not handled?  Trap!  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 17/35] Change use to type-based pool allocator in tree-ssa-math-opts.c.
  2015-05-27 18:12   ` Jeff Law
@ 2015-05-29 13:39     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:39 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 323 bytes --]

On 05/27/2015 08:03 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * tree-ssa-math-opts.c (occ_new): Use new type-based pool allocator.
>>     (free_bb): Likewise.
>>     (pass_cse_reciprocals::execute): Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0016-Change-use-to-type-based-pool-allocator-in-tree-ssa-.patch --]
[-- Type: text/x-patch, Size: 2139 bytes --]

From 3fffec068dfb9ee2b8c23a6b95db33afd8dc6c90 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:49 +0200
Subject: [PATCH 16/32] Change use to type-based pool allocator in
 tree-ssa-math-opts.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-math-opts.c (occ_new): Use new type-based pool allocator.
	(free_bb): Likewise.
	(pass_cse_reciprocals::execute): Likewise.
---
 gcc/tree-ssa-math-opts.c | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index 98e2c49..0df755b 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -229,7 +229,7 @@ static struct
 static struct occurrence *occ_head;
 
 /* Allocation pool for getting instances of "struct occurrence".  */
-static alloc_pool occ_pool;
+static pool_allocator<occurrence> *occ_pool;
 
 
 
@@ -240,7 +240,7 @@ occ_new (basic_block bb, struct occurrence *children)
 {
   struct occurrence *occ;
 
-  bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
+  bb->aux = occ = occ_pool->allocate ();
   memset (occ, 0, sizeof (struct occurrence));
 
   occ->bb = bb;
@@ -468,7 +468,7 @@ free_bb (struct occurrence *occ)
   next = occ->next;
   child = occ->children;
   occ->bb->aux = NULL;
-  pool_free (occ_pool, occ);
+  occ_pool->remove (occ);
 
   /* Now ensure that we don't recurse unless it is necessary.  */
   if (!child)
@@ -572,9 +572,8 @@ pass_cse_reciprocals::execute (function *fun)
   basic_block bb;
   tree arg;
 
-  occ_pool = create_alloc_pool ("dominators for recip",
-				sizeof (struct occurrence),
-				n_basic_blocks_for_fn (fun) / 3 + 1);
+  occ_pool = new pool_allocator<occurrence>
+    ("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1);
 
   memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
   calculate_dominance_info (CDI_DOMINATORS);
@@ -704,7 +703,7 @@ pass_cse_reciprocals::execute (function *fun)
 
   free_dominance_info (CDI_DOMINATORS);
   free_dominance_info (CDI_POST_DOMINATORS);
-  free_alloc_pool (occ_pool);
+  delete occ_pool;
   return 0;
 }
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 21/35] Change use to type-based pool allocator in regcprop.c.
  2015-05-27 18:14   ` Jeff Law
@ 2015-05-29 13:40     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:40 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 344 bytes --]

On 05/27/2015 08:11 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * regcprop.c (free_debug_insn_changes): Use new type-based pool allocator.
>>     (replace_oldest_value_reg): Likewise.
>>     (pass_cprop_hardreg::execute): Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0020-Change-use-to-type-based-pool-allocator-in-regcprop..patch --]
[-- Type: text/x-patch, Size: 2933 bytes --]

From 572893e8d7330b43da2373c7e7ab40a51ce6a40c Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:51 +0200
Subject: [PATCH 20/32] Change use to type-based pool allocator in regcprop.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* regcprop.c (free_debug_insn_changes): Use new type-based pool allocator.
	(replace_oldest_value_reg): Likewise.
	(pass_cprop_hardreg::execute): Likewise.
---
 gcc/regcprop.c | 31 +++++++++++++++++++++----------
 1 file changed, 21 insertions(+), 10 deletions(-)

diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index 7d7a9a09..9bac11a 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -62,6 +62,21 @@ struct queued_debug_insn_change
   rtx_insn *insn;
   rtx *loc;
   rtx new_rtx;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove ((queued_debug_insn_change *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<queued_debug_insn_change> pool;
 };
 
 /* For each register, we have a list of registers that contain the same
@@ -85,7 +100,9 @@ struct value_data
   unsigned int n_debug_insn_changes;
 };
 
-static alloc_pool debug_insn_changes_pool;
+pool_allocator<queued_debug_insn_change> queued_debug_insn_change::pool
+  ("debug insn changes pool", 256);
+
 static bool skip_debug_insn_p;
 
 static void kill_value_one_regno (unsigned, struct value_data *);
@@ -124,7 +141,7 @@ free_debug_insn_changes (struct value_data *vd, unsigned int regno)
     {
       next = cur->next;
       --vd->n_debug_insn_changes;
-      pool_free (debug_insn_changes_pool, cur);
+      delete cur;
     }
   vd->e[regno].debug_insn_changes = NULL;
 }
@@ -495,8 +512,7 @@ replace_oldest_value_reg (rtx *loc, enum reg_class cl, rtx_insn *insn,
 	    fprintf (dump_file, "debug_insn %u: queued replacing reg %u with %u\n",
 		     INSN_UID (insn), REGNO (*loc), REGNO (new_rtx));
 
-	  change = (struct queued_debug_insn_change *)
-		   pool_alloc (debug_insn_changes_pool);
+	  change = new queued_debug_insn_change;
 	  change->next = vd->e[REGNO (new_rtx)].debug_insn_changes;
 	  change->insn = insn;
 	  change->loc = loc;
@@ -1244,11 +1260,6 @@ pass_cprop_hardreg::execute (function *fun)
   visited = sbitmap_alloc (last_basic_block_for_fn (fun));
   bitmap_clear (visited);
 
-  if (MAY_HAVE_DEBUG_INSNS)
-    debug_insn_changes_pool
-      = create_alloc_pool ("debug insn changes pool",
-			   sizeof (struct queued_debug_insn_change), 256);
-
   FOR_EACH_BB_FN (bb, fun)
     {
       bitmap_set_bit (visited, bb->index);
@@ -1308,7 +1319,7 @@ pass_cprop_hardreg::execute (function *fun)
 		}
 	  }
 
-      free_alloc_pool (debug_insn_changes_pool);
+      queued_debug_insn_change::pool.release ();
     }
 
   sbitmap_free (visited);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 19/35] Change use to type-based pool allocator in sel-sched-ir.c.
  2015-05-27 18:12   ` Jeff Law
@ 2015-05-29 13:40     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:40 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 367 bytes --]

On 05/27/2015 08:04 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * sel-sched-ir.c (alloc_sched_pools): Use new type-based pool allocator.
>>     (free_sched_pools): Likewise.
>>     * sel-sched-ir.h (_list_alloc): Likewise.
>>     (_list_remove): Likewise.
> OK
> jeff
>

v2

[-- Attachment #2: 0018-Change-use-to-type-based-pool-allocator-in-sel-sched.patch --]
[-- Type: text/x-patch, Size: 2347 bytes --]

From 9e5b4f84bb12652353c92827371645ca97be2a72 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:50 +0200
Subject: [PATCH 18/32] Change use to type-based pool allocator in
 sel-sched-ir.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* sel-sched-ir.c (alloc_sched_pools): Use new type-based pool allocator.
	(free_sched_pools): Likewise.
	* sel-sched-ir.h (_list_alloc): Likewise.
	(_list_remove): Likewise.
---
 gcc/sel-sched-ir.c | 7 ++-----
 gcc/sel-sched-ir.h | 6 +++---
 2 files changed, 5 insertions(+), 8 deletions(-)

diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 94f6c43..ffaba56 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -70,7 +70,7 @@ vec<sel_region_bb_info_def>
     sel_region_bb_info = vNULL;
 
 /* A pool for allocating all lists.  */
-alloc_pool sched_lists_pool;
+pool_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
 
 /* This contains information about successors for compute_av_set.  */
 struct succs_info current_succs;
@@ -5030,9 +5030,6 @@ alloc_sched_pools (void)
   succs_info_pool.size = succs_size;
   succs_info_pool.top = -1;
   succs_info_pool.max_top = -1;
-
-  sched_lists_pool = create_alloc_pool ("sel-sched-lists",
-                                        sizeof (struct _list_node), 500);
 }
 
 /* Free the pools.  */
@@ -5041,7 +5038,7 @@ free_sched_pools (void)
 {
   int i;
 
-  free_alloc_pool (sched_lists_pool);
+  sched_lists_pool.release ();
   gcc_assert (succs_info_pool.top == -1);
   for (i = 0; i <= succs_info_pool.max_top; i++)
     {
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index 91ce92f..3707a87 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -364,12 +364,12 @@ struct _list_node
 /* _list_t functions.
    All of _*list_* functions are used through accessor macros, thus
    we can't move them in sel-sched-ir.c.  */
-extern alloc_pool sched_lists_pool;
+extern pool_allocator<_list_node> sched_lists_pool;
 
 static inline _list_t
 _list_alloc (void)
 {
-  return (_list_t) pool_alloc (sched_lists_pool);
+  return sched_lists_pool.allocate ();
 }
 
 static inline void
@@ -395,7 +395,7 @@ _list_remove (_list_t *lp)
   _list_t n = *lp;
 
   *lp = _LIST_NEXT (n);
-  pool_free (sched_lists_pool, n);
+  sched_lists_pool.remove (n);
 }
 
 static inline void
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 22/35] Change use to type-based pool allocator in sched-deps.c.
  2015-05-27 18:16   ` Jeff Law
@ 2015-05-29 13:40     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:40 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 504 bytes --]

On 05/27/2015 08:14 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * sched-deps.c (create_dep_node): Use new type-based pool allocator.
>>     (delete_dep_node): Likewise.
>>     (create_deps_list): Likewise.
>>     (free_deps_list): Likewise.
>>     (sched_deps_init): Likewise.
>>     (sched_deps_finish): Likewise.
> OK.
>
> First use of the release_if_empty API that I've seen in these patches.
>
> jeff
>

v2

[-- Attachment #2: 0021-Change-use-to-type-based-pool-allocator-in-sched-dep.patch --]
[-- Type: text/x-patch, Size: 3145 bytes --]

From a9216021be50a7b5719fd75c8786ac6831010b05 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:52 +0200
Subject: [PATCH 21/32] Change use to type-based pool allocator in
 sched-deps.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* sched-deps.c (create_dep_node): Use new type-based pool allocator.
	(delete_dep_node): Likewise.
	(create_deps_list): Likewise.
	(free_deps_list): Likewise.
	(sched_deps_init): Likewise.
	(sched_deps_finish): Likewise.
---
 gcc/sched-deps.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index c1cfc1f..30d4630 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -334,7 +334,7 @@ dep_link_is_detached_p (dep_link_t link)
 }
 
 /* Pool to hold all dependency nodes (dep_node_t).  */
-static alloc_pool dn_pool;
+static pool_allocator<_dep_node> *dn_pool;
 
 /* Number of dep_nodes out there.  */
 static int dn_pool_diff = 0;
@@ -343,7 +343,7 @@ static int dn_pool_diff = 0;
 static dep_node_t
 create_dep_node (void)
 {
-  dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
+  dep_node_t n = dn_pool->allocate ();
   dep_link_t back = DEP_NODE_BACK (n);
   dep_link_t forw = DEP_NODE_FORW (n);
 
@@ -371,11 +371,11 @@ delete_dep_node (dep_node_t n)
 
   --dn_pool_diff;
 
-  pool_free (dn_pool, n);
+  dn_pool->remove (n);
 }
 
 /* Pool to hold dependencies lists (deps_list_t).  */
-static alloc_pool dl_pool;
+static pool_allocator<_deps_list> *dl_pool;
 
 /* Number of deps_lists out there.  */
 static int dl_pool_diff = 0;
@@ -393,7 +393,7 @@ deps_list_empty_p (deps_list_t l)
 static deps_list_t
 create_deps_list (void)
 {
-  deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
+  deps_list_t l = dl_pool->allocate ();
 
   DEPS_LIST_FIRST (l) = NULL;
   DEPS_LIST_N_LINKS (l) = 0;
@@ -410,7 +410,7 @@ free_deps_list (deps_list_t l)
 
   --dl_pool_diff;
 
-  pool_free (dl_pool, l);
+  dl_pool->remove (l);
 }
 
 /* Return true if there is no dep_nodes and deps_lists out there.
@@ -4075,10 +4075,10 @@ sched_deps_init (bool global_p)
 
   if (global_p)
     {
-      dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
+      dl_pool = new pool_allocator<_deps_list> ("deps_list",
                                    /* Allocate lists for one block at a time.  */
                                    insns_in_block);
-      dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
+      dn_pool = new pool_allocator<_dep_node> ("dep_node",
                                    /* Allocate nodes for one block at a time.
                                       We assume that average insn has
                                       5 producers.  */
@@ -4128,9 +4128,10 @@ void
 sched_deps_finish (void)
 {
   gcc_assert (deps_pools_are_empty_p ());
-  free_alloc_pool_if_empty (&dn_pool);
-  free_alloc_pool_if_empty (&dl_pool);
-  gcc_assert (dn_pool == NULL && dl_pool == NULL);
+  dn_pool->release_if_empty ();
+  dn_pool = NULL;
+  dl_pool->release_if_empty ();
+  dl_pool = NULL;
 
   h_d_i_d.release ();
   cache_size = 0;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 25/35] Change use to type-based pool allocator in tree-ssa-sccvn.c.
  2015-05-27 18:16   ` Jeff Law
@ 2015-05-29 13:41     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:41 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 548 bytes --]

On 05/27/2015 08:13 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * tree-ssa-sccvn.c (vn_reference_insert): Use new type-based pool allocator.
>>     (vn_reference_insert_pieces): Likewise.
>>     (vn_phi_insert): Likewise.
>>     (visit_reference_op_call): Likewise.
>>     (copy_phi): Likewise.
>>     (copy_reference): Likewise.
>>     (process_scc): Likewise.
>>     (allocate_vn_table): Likewise.
>>     (free_vn_table): Likewise.
> OK.
> jeff
>
>

v2

[-- Attachment #2: 0024-Change-use-to-type-based-pool-allocator-in-tree-ssa-.patch --]
[-- Type: text/x-patch, Size: 4821 bytes --]

From a82fef12303a5382582e3967ab48c2a077830a90 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:52 +0200
Subject: [PATCH 24/32] Change use to type-based pool allocator in
 tree-ssa-sccvn.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-sccvn.c (vn_reference_insert): Use new type-based pool allocator.
	(vn_reference_insert_pieces): Likewise.
	(vn_phi_insert): Likewise.
	(visit_reference_op_call): Likewise.
	(copy_phi): Likewise.
	(copy_reference): Likewise.
	(process_scc): Likewise.
	(allocate_vn_table): Likewise.
	(free_vn_table): Likewise.
---
 gcc/tree-ssa-sccvn.c | 33 +++++++++++++++------------------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 03be480..98b0cc5 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -289,8 +289,8 @@ typedef struct vn_tables_s
   vn_phi_table_type *phis;
   vn_reference_table_type *references;
   struct obstack nary_obstack;
-  alloc_pool phis_pool;
-  alloc_pool references_pool;
+  pool_allocator<vn_phi_s> *phis_pool;
+  pool_allocator<vn_reference_s> *references_pool;
 } *vn_tables_t;
 
 
@@ -2285,7 +2285,7 @@ vn_reference_insert (tree op, tree result, tree vuse, tree vdef)
   vn_reference_t vr1;
   bool tem;
 
-  vr1 = (vn_reference_t) pool_alloc (current_info->references_pool);
+  vr1 = current_info->references_pool->allocate ();
   if (TREE_CODE (result) == SSA_NAME)
     vr1->value_id = VN_INFO (result)->value_id;
   else
@@ -2330,7 +2330,7 @@ vn_reference_insert_pieces (tree vuse, alias_set_type set, tree type,
   vn_reference_s **slot;
   vn_reference_t vr1;
 
-  vr1 = (vn_reference_t) pool_alloc (current_info->references_pool);
+  vr1 = current_info->references_pool->allocate ();
   vr1->value_id = value_id;
   vr1->vuse = vuse ? SSA_VAL (vuse) : NULL_TREE;
   vr1->operands = valueize_refs (operands);
@@ -2756,7 +2756,7 @@ static vn_phi_t
 vn_phi_insert (gimple phi, tree result)
 {
   vn_phi_s **slot;
-  vn_phi_t vp1 = (vn_phi_t) pool_alloc (current_info->phis_pool);
+  vn_phi_t vp1 = current_info->phis_pool->allocate ();
   unsigned i;
   vec<tree> args = vNULL;
 
@@ -2999,7 +2999,7 @@ visit_reference_op_call (tree lhs, gcall *stmt)
 	changed |= set_ssa_val_to (vdef, vdef);
       if (lhs)
 	changed |= set_ssa_val_to (lhs, lhs);
-      vr2 = (vn_reference_t) pool_alloc (current_info->references_pool);
+      vr2 = current_info->references_pool->allocate ();
       vr2->vuse = vr1.vuse;
       /* As we are not walking the virtual operand chain we know the
 	 shared_lookup_references are still original so we can re-use
@@ -3873,7 +3873,7 @@ copy_nary (vn_nary_op_t onary, vn_tables_t info)
 static void
 copy_phi (vn_phi_t ophi, vn_tables_t info)
 {
-  vn_phi_t phi = (vn_phi_t) pool_alloc (info->phis_pool);
+  vn_phi_t phi = info->phis_pool->allocate ();
   vn_phi_s **slot;
   memcpy (phi, ophi, sizeof (*phi));
   ophi->phiargs.create (0);
@@ -3889,7 +3889,7 @@ copy_reference (vn_reference_t oref, vn_tables_t info)
 {
   vn_reference_t ref;
   vn_reference_s **slot;
-  ref = (vn_reference_t) pool_alloc (info->references_pool);
+  ref = info->references_pool->allocate ();
   memcpy (ref, oref, sizeof (*ref));
   oref->operands.create (0);
   slot = info->references->find_slot_with_hash (ref, ref->hashcode, INSERT);
@@ -3954,8 +3954,8 @@ process_scc (vec<tree> scc)
       optimistic_info->references->empty ();
       obstack_free (&optimistic_info->nary_obstack, NULL);
       gcc_obstack_init (&optimistic_info->nary_obstack);
-      empty_alloc_pool (optimistic_info->phis_pool);
-      empty_alloc_pool (optimistic_info->references_pool);
+      optimistic_info->phis_pool->release ();
+      optimistic_info->references_pool->release ();
       FOR_EACH_VEC_ELT (scc, i, var)
 	VN_INFO (var)->expr = NULL_TREE;
       FOR_EACH_VEC_ELT (scc, i, var)
@@ -4132,12 +4132,9 @@ allocate_vn_table (vn_tables_t table)
   table->references = new vn_reference_table_type (23);
 
   gcc_obstack_init (&table->nary_obstack);
-  table->phis_pool = create_alloc_pool ("VN phis",
-					sizeof (struct vn_phi_s),
-					30);
-  table->references_pool = create_alloc_pool ("VN references",
-					      sizeof (struct vn_reference_s),
-					      30);
+  table->phis_pool = new pool_allocator<vn_phi_s> ("VN phis", 30);
+  table->references_pool = new pool_allocator<vn_reference_s> ("VN references",
+							       30);
 }
 
 /* Free a value number table.  */
@@ -4152,8 +4149,8 @@ free_vn_table (vn_tables_t table)
   delete table->references;
   table->references = NULL;
   obstack_free (&table->nary_obstack, NULL);
-  free_alloc_pool (table->phis_pool);
-  free_alloc_pool (table->references_pool);
+  delete table->phis_pool;
+  delete table->references_pool;
 }
 
 static void
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 23/35] Change use to type-based pool allocator in tree-ssa-pre.c.
  2015-05-27 18:59   ` Jeff Law
@ 2015-05-29 13:41     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:41 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 514 bytes --]

On 05/27/2015 08:18 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * tree-ssa-pre.c (get_or_alloc_expr_for_name): Use new type-based pool allocator.
>>     (bitmap_set_new): Likewise.
>>     (get_or_alloc_expr_for_constant): Likewise.
>>     (get_or_alloc_expr_for): Likewise.
>>     (phi_translate_1): Likewise.
>>     (compute_avail): Likewise.
>>     (init_pre): Likewise.
>>     (fini_pre): Likewise.
> OK.
> Jeff
>

v2

[-- Attachment #2: 0022-Change-use-to-type-based-pool-allocator-in-tree-ssa-.patch --]
[-- Type: text/x-patch, Size: 5426 bytes --]

From 041bfe4c8ae1cc45874631b6656fb38c8e026e05 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:52 +0200
Subject: [PATCH 22/32] Change use to type-based pool allocator in
 tree-ssa-pre.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-pre.c (get_or_alloc_expr_for_name): Use new type-based pool allocator.
	(bitmap_set_new): Likewise.
	(get_or_alloc_expr_for_constant): Likewise.
	(get_or_alloc_expr_for): Likewise.
	(phi_translate_1): Likewise.
	(compute_avail): Likewise.
	(init_pre): Likewise.
	(fini_pre): Likewise.
---
 gcc/tree-ssa-pre.c | 32 ++++++++++++++------------------
 1 file changed, 14 insertions(+), 18 deletions(-)

diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index d857d84..082dbaf 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -380,7 +380,7 @@ clear_expression_ids (void)
   expressions.release ();
 }
 
-static alloc_pool pre_expr_pool;
+static pool_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
 
 /* Given an SSA_NAME NAME, get or create a pre_expr to represent it.  */
 
@@ -398,7 +398,7 @@ get_or_alloc_expr_for_name (tree name)
   if (result_id != 0)
     return expression_for_id (result_id);
 
-  result = (pre_expr) pool_alloc (pre_expr_pool);
+  result = pre_expr_pool.allocate ();
   result->kind = NAME;
   PRE_EXPR_NAME (result) = name;
   alloc_expression_id (result);
@@ -519,7 +519,7 @@ static unsigned int get_expr_value_id (pre_expr);
 /* We can add and remove elements and entries to and from sets
    and hash tables, so we use alloc pools for them.  */
 
-static alloc_pool bitmap_set_pool;
+static pool_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
 static bitmap_obstack grand_bitmap_obstack;
 
 /* Set of blocks with statements that have had their EH properties changed.  */
@@ -635,7 +635,7 @@ add_to_value (unsigned int v, pre_expr e)
 static bitmap_set_t
 bitmap_set_new (void)
 {
-  bitmap_set_t ret = (bitmap_set_t) pool_alloc (bitmap_set_pool);
+  bitmap_set_t ret = bitmap_set_pool.allocate ();
   bitmap_initialize (&ret->expressions, &grand_bitmap_obstack);
   bitmap_initialize (&ret->values, &grand_bitmap_obstack);
   return ret;
@@ -1125,7 +1125,7 @@ get_or_alloc_expr_for_constant (tree constant)
   if (result_id != 0)
     return expression_for_id (result_id);
 
-  newexpr = (pre_expr) pool_alloc (pre_expr_pool);
+  newexpr = pre_expr_pool.allocate ();
   newexpr->kind = CONSTANT;
   PRE_EXPR_CONSTANT (newexpr) = constant;
   alloc_expression_id (newexpr);
@@ -1176,13 +1176,13 @@ get_or_alloc_expr_for (tree t)
       vn_nary_op_lookup (t, &result);
       if (result != NULL)
 	{
-	  pre_expr e = (pre_expr) pool_alloc (pre_expr_pool);
+	  pre_expr e = pre_expr_pool.allocate ();
 	  e->kind = NARY;
 	  PRE_EXPR_NARY (e) = result;
 	  result_id = lookup_expression_id (e);
 	  if (result_id != 0)
 	    {
-	      pool_free (pre_expr_pool, e);
+	      pre_expr_pool.remove (e);
 	      e = expression_for_id (result_id);
 	      return e;
 	    }
@@ -1526,7 +1526,7 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
 	    if (result && is_gimple_min_invariant (result))
 	      return get_or_alloc_expr_for_constant (result);
 
-	    expr = (pre_expr) pool_alloc (pre_expr_pool);
+	    expr = pre_expr_pool.allocate ();
 	    expr->kind = NARY;
 	    expr->id = 0;
 	    if (nary)
@@ -1688,7 +1688,7 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
 		return NULL;
 	      }
 
-	    expr = (pre_expr) pool_alloc (pre_expr_pool);
+	    expr = pre_expr_pool.allocate ();
 	    expr->kind = REFERENCE;
 	    expr->id = 0;
 
@@ -3795,7 +3795,7 @@ compute_avail (void)
 		    || gimple_bb (SSA_NAME_DEF_STMT
 				    (gimple_vuse (stmt))) != block)
 		  {
-		    result = (pre_expr) pool_alloc (pre_expr_pool);
+		    result = pre_expr_pool.allocate ();
 		    result->kind = REFERENCE;
 		    result->id = 0;
 		    PRE_EXPR_REFERENCE (result) = ref;
@@ -3835,7 +3835,7 @@ compute_avail (void)
 			  && vn_nary_may_trap (nary))
 			continue;
 
-		      result = (pre_expr) pool_alloc (pre_expr_pool);
+		      result = pre_expr_pool.allocate ();
 		      result->kind = NARY;
 		      result->id = 0;
 		      PRE_EXPR_NARY (result) = nary;
@@ -3876,7 +3876,7 @@ compute_avail (void)
 			    continue;
 			}
 
-		      result = (pre_expr) pool_alloc (pre_expr_pool);
+		      result = pre_expr_pool.allocate ();
 		      result->kind = REFERENCE;
 		      result->id = 0;
 		      PRE_EXPR_REFERENCE (result) = ref;
@@ -4779,10 +4779,6 @@ init_pre (void)
   bitmap_obstack_initialize (&grand_bitmap_obstack);
   phi_translate_table = new hash_table<expr_pred_trans_d> (5110);
   expression_to_id = new hash_table<pre_expr_d> (num_ssa_names * 3);
-  bitmap_set_pool = create_alloc_pool ("Bitmap sets",
-				       sizeof (struct bitmap_set), 30);
-  pre_expr_pool = create_alloc_pool ("pre_expr nodes",
-				     sizeof (struct pre_expr_d), 30);
   FOR_ALL_BB_FN (bb, cfun)
     {
       EXP_GEN (bb) = bitmap_set_new ();
@@ -4802,8 +4798,8 @@ fini_pre ()
   value_expressions.release ();
   BITMAP_FREE (inserted_exprs);
   bitmap_obstack_release (&grand_bitmap_obstack);
-  free_alloc_pool (bitmap_set_pool);
-  free_alloc_pool (pre_expr_pool);
+  bitmap_set_pool.release ();
+  pre_expr_pool.release ();
   delete phi_translate_table;
   phi_translate_table = NULL;
   delete expression_to_id;
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 24/35] Change use to type-based pool allocator in tree-ssa-reassoc.c.
  2015-05-27 18:15   ` Jeff Law
@ 2015-05-29 13:41     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:41 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 433 bytes --]

On 05/27/2015 08:12 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * tree-ssa-reassoc.c (add_to_ops_vec): Use new type-based pool allocator.
>>     (add_repeat_to_ops_vec): Likewise.
>>     (get_ops): Likewise.
>>     (maybe_optimize_range_tests): Likewise.
>>     (init_reassoc): Likewise.
>>     (fini_reassoc): Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0023-Change-use-to-type-based-pool-allocator-in-tree-ssa-.patch --]
[-- Type: text/x-patch, Size: 3413 bytes --]

From de04d44798779d4ff12d39144db747936990773e Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:52 +0200
Subject: [PATCH 23/32] Change use to type-based pool allocator in
 tree-ssa-reassoc.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-reassoc.c (add_to_ops_vec): Use new type-based pool allocator.
	(add_repeat_to_ops_vec): Likewise.
	(get_ops): Likewise.
	(maybe_optimize_range_tests): Likewise.
	(init_reassoc): Likewise.
	(fini_reassoc): Likewise.
---
 gcc/tree-ssa-reassoc.c | 19 ++++++++-----------
 1 file changed, 8 insertions(+), 11 deletions(-)

diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 0c67379..c1a7f4b9 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -235,7 +235,8 @@ typedef struct operand_entry
   unsigned int count;
 } *operand_entry_t;
 
-static alloc_pool operand_entry_pool;
+static pool_allocator<operand_entry> operand_entry_pool ("operand entry pool",
+							 30);
 
 /* This is used to assign a unique ID to each struct operand_entry
    so that qsort results are identical on different hosts.  */
@@ -619,7 +620,7 @@ sort_by_operand_rank (const void *pa, const void *pb)
 static void
 add_to_ops_vec (vec<operand_entry_t> *ops, tree op)
 {
-  operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+  operand_entry_t oe = operand_entry_pool.allocate ();
 
   oe->op = op;
   oe->rank = get_rank (op);
@@ -635,7 +636,7 @@ static void
 add_repeat_to_ops_vec (vec<operand_entry_t> *ops, tree op,
 		       HOST_WIDE_INT repeat)
 {
-  operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+  operand_entry_t oe = operand_entry_pool.allocate ();
 
   oe->op = op;
   oe->rank = get_rank (op);
@@ -2990,7 +2991,7 @@ get_ops (tree var, enum tree_code code, vec<operand_entry_t> *ops,
 	&& !get_ops (rhs[i], code, ops, loop)
 	&& has_single_use (rhs[i]))
       {
-	operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+	operand_entry_t oe = operand_entry_pool.allocate ();
 
 	oe->op = rhs[i];
 	oe->rank = code;
@@ -3223,8 +3224,7 @@ maybe_optimize_range_tests (gimple stmt)
 	      && has_single_use (rhs))
 	    {
 	      /* Otherwise, push the _234 range test itself.  */
-	      operand_entry_t oe
-		= (operand_entry_t) pool_alloc (operand_entry_pool);
+	      operand_entry_t oe = operand_entry_pool.allocate ();
 
 	      oe->op = rhs;
 	      oe->rank = code;
@@ -3256,8 +3256,7 @@ maybe_optimize_range_tests (gimple stmt)
 			   loop_containing_stmt (stmt))))
 	{
 	  /* Or push the GIMPLE_COND stmt itself.  */
-	  operand_entry_t oe
-	    = (operand_entry_t) pool_alloc (operand_entry_pool);
+	  operand_entry_t oe = operand_entry_pool.allocate ();
 
 	  oe->op = NULL;
 	  oe->rank = (e->flags & EDGE_TRUE_VALUE)
@@ -5035,8 +5034,6 @@ init_reassoc (void)
 
   memset (&reassociate_stats, 0, sizeof (reassociate_stats));
 
-  operand_entry_pool = create_alloc_pool ("operand entry pool",
-					  sizeof (struct operand_entry), 30);
   next_operand_entry_id = 0;
 
   /* Reverse RPO (Reverse Post Order) will give us something where
@@ -5085,7 +5082,7 @@ fini_reassoc (void)
 			    reassociate_stats.pows_created);
 
   delete operand_rank;
-  free_alloc_pool (operand_entry_pool);
+  operand_entry_pool.release ();
   free (bb_rank);
   plus_negates.release ();
   free_dominance_info (CDI_POST_DOMINATORS);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 29/35] Change use to type-based pool allocator in ipa-prop.c.
  2015-05-27 18:22   ` Jeff Law
@ 2015-05-29 13:42     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:42 UTC (permalink / raw)
  To: Jeff Law, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 406 bytes --]

On 05/27/2015 08:16 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * ipa-prop.c (ipa_set_jf_constant): Use new type-based pool allocator.
>>     (ipa_edge_duplication_hook): Likewise.
>>     (ipa_free_all_structures_after_ipa_cp): Likewise.
>>     (ipa_free_all_structures_after_iinln): Likewise.
> OK.
> Jeff
>

v2

[-- Attachment #2: 0028-Change-use-to-type-based-pool-allocator-in-ipa-prop..patch --]
[-- Type: text/x-patch, Size: 3429 bytes --]

From 5de5c0c01ba9c1c3997b3348c022f4c3013fcefc Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:54 +0200
Subject: [PATCH 28/32] Change use to type-based pool allocator in ipa-prop.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ipa-prop.c (ipa_set_jf_constant): Use new type-based pool allocator.
	(ipa_edge_duplication_hook): Likewise.
	(ipa_free_all_structures_after_ipa_cp): Likewise.
	(ipa_free_all_structures_after_iinln): Likewise.
---
 gcc/ipa-prop.c | 23 +++++++----------------
 1 file changed, 7 insertions(+), 16 deletions(-)

diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 26be5f2..80ce6b8 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -176,7 +176,8 @@ struct ipa_cst_ref_desc
 
 /* Allocation pool for reference descriptions.  */
 
-static alloc_pool ipa_refdesc_pool;
+static pool_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
+  ("IPA-PROP ref descriptions", 32);
 
 /* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
    with NODE should prevent us from analyzing it for the purposes of IPA-CP.  */
@@ -508,11 +509,8 @@ ipa_set_jf_constant (struct ipa_jump_func *jfunc, tree constant,
       && TREE_CODE (TREE_OPERAND (constant, 0)) == FUNCTION_DECL)
     {
       struct ipa_cst_ref_desc *rdesc;
-      if (!ipa_refdesc_pool)
-	ipa_refdesc_pool = create_alloc_pool ("IPA-PROP ref descriptions",
-					sizeof (struct ipa_cst_ref_desc), 32);
 
-      rdesc = (struct ipa_cst_ref_desc *) pool_alloc (ipa_refdesc_pool);
+      rdesc = ipa_refdesc_pool.allocate ();
       rdesc->cs = cs;
       rdesc->next_duplicate = NULL;
       rdesc->refcount = 1;
@@ -3517,9 +3515,7 @@ ipa_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
 	      gcc_checking_assert (ref);
 	      dst->caller->clone_reference (ref, ref->stmt);
 
-	      gcc_checking_assert (ipa_refdesc_pool);
-	      struct ipa_cst_ref_desc *dst_rdesc
-		= (struct ipa_cst_ref_desc *) pool_alloc (ipa_refdesc_pool);
+	      struct ipa_cst_ref_desc *dst_rdesc = ipa_refdesc_pool.allocate ();
 	      dst_rdesc->cs = dst;
 	      dst_rdesc->refcount = src_rdesc->refcount;
 	      dst_rdesc->next_duplicate = NULL;
@@ -3527,10 +3523,7 @@ ipa_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
 	    }
 	  else if (src_rdesc->cs == src)
 	    {
-	      struct ipa_cst_ref_desc *dst_rdesc;
-	      gcc_checking_assert (ipa_refdesc_pool);
-	      dst_rdesc
-		= (struct ipa_cst_ref_desc *) pool_alloc (ipa_refdesc_pool);
+	      struct ipa_cst_ref_desc *dst_rdesc = ipa_refdesc_pool.allocate ();
 	      dst_rdesc->cs = dst;
 	      dst_rdesc->refcount = src_rdesc->refcount;
 	      dst_rdesc->next_duplicate = src_rdesc->next_duplicate;
@@ -3681,8 +3674,7 @@ ipa_free_all_structures_after_ipa_cp (void)
       free_alloc_pool (ipcp_poly_ctx_values_pool);
       free_alloc_pool (ipcp_agg_lattice_pool);
       ipa_unregister_cgraph_hooks ();
-      if (ipa_refdesc_pool)
-	free_alloc_pool (ipa_refdesc_pool);
+      ipa_refdesc_pool.release ();
     }
 }
 
@@ -3703,8 +3695,7 @@ ipa_free_all_structures_after_iinln (void)
     free_alloc_pool (ipcp_poly_ctx_values_pool);
   if (ipcp_agg_lattice_pool)
     free_alloc_pool (ipcp_agg_lattice_pool);
-  if (ipa_refdesc_pool)
-    free_alloc_pool (ipa_refdesc_pool);
+  ipa_refdesc_pool.release ();
 }
 
 /* Print ipa_tree_map data structures of all functions in the
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 27/35] Change use to type-based pool allocator in tree-ssa-structalias.c.
  2015-05-27 18:20   ` Jeff Law
@ 2015-05-29 13:42     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:42 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 374 bytes --]

On 05/27/2015 08:15 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * tree-ssa-structalias.c (new_var_info): Use new type-based pool allocator.
>>     (new_constraint): Likewise.
>>     (init_alias_vars): Likewise.
>>     (delete_points_to_sets): Likewise.
>> ---
> OK.
> Jeff
>
>

v2

[-- Attachment #2: 0026-Change-use-to-type-based-pool-allocator-in-tree-ssa-.patch --]
[-- Type: text/x-patch, Size: 2834 bytes --]

From 7fbd298945fdb725099dc3a91f981212c5c1f3c7 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:54 +0200
Subject: [PATCH 26/32] Change use to type-based pool allocator in
 tree-ssa-structalias.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-structalias.c (new_var_info): Use new type-based pool allocator.
	(new_constraint): Likewise.
	(init_alias_vars): Likewise.
	(delete_points_to_sets): Likewise.
---
 gcc/tree-ssa-structalias.c | 17 +++++++----------
 1 file changed, 7 insertions(+), 10 deletions(-)

diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index d6a9f67..e802d78 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -354,7 +354,8 @@ static varinfo_t lookup_vi_for_tree (tree);
 static inline bool type_can_have_subvars (const_tree);
 
 /* Pool of variable info structures.  */
-static alloc_pool variable_info_pool;
+static pool_allocator<variable_info> variable_info_pool
+  ("Variable info pool", 30);
 
 /* Map varinfo to final pt_solution.  */
 static hash_map<varinfo_t, pt_solution *> *final_solutions;
@@ -395,7 +396,7 @@ static varinfo_t
 new_var_info (tree t, const char *name)
 {
   unsigned index = varmap.length ();
-  varinfo_t ret = (varinfo_t) pool_alloc (variable_info_pool);
+  varinfo_t ret = variable_info_pool.allocate ();
 
   ret->id = index;
   ret->name = name;
@@ -554,7 +555,7 @@ struct constraint
 /* List of constraints that we use to build the constraint graph from.  */
 
 static vec<constraint_t> constraints;
-static alloc_pool constraint_pool;
+static pool_allocator<constraint> constraint_pool ("Constraint pool", 30);
 
 /* The constraint graph is represented as an array of bitmaps
    containing successor nodes.  */
@@ -676,7 +677,7 @@ static constraint_t
 new_constraint (const struct constraint_expr lhs,
 		const struct constraint_expr rhs)
 {
-  constraint_t ret = (constraint_t) pool_alloc (constraint_pool);
+  constraint_t ret = constraint_pool.allocate ();
   ret->lhs = lhs;
   ret->rhs = rhs;
   return ret;
@@ -6681,10 +6682,6 @@ init_alias_vars (void)
   bitmap_obstack_initialize (&oldpta_obstack);
   bitmap_obstack_initialize (&predbitmap_obstack);
 
-  constraint_pool = create_alloc_pool ("Constraint pool",
-				       sizeof (struct constraint), 30);
-  variable_info_pool = create_alloc_pool ("Variable info pool",
-					  sizeof (struct variable_info), 30);
   constraints.create (8);
   varmap.create (8);
   vi_for_tree = new hash_map<tree, varinfo_t>;
@@ -6964,8 +6961,8 @@ delete_points_to_sets (void)
   free (graph);
 
   varmap.release ();
-  free_alloc_pool (variable_info_pool);
-  free_alloc_pool (constraint_pool);
+  variable_info_pool.release ();
+  constraint_pool.release ();
 
   obstack_free (&fake_var_decl_obstack, NULL);
 
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 28/35] Change use to type-based pool allocator in ipa-profile.c.
  2015-05-27 18:18   ` Jeff Law
@ 2015-05-29 13:42     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:42 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 374 bytes --]

On 05/27/2015 08:15 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * ipa-profile.c (account_time_size): Use new type-based pool allocator.
>>     (ipa_profile_generate_summary): Likewise.
>>     (ipa_profile_read_summary): Likewise.
>>     (ipa_profile): Likewise.
> OK.
> jeff
>

v2

[-- Attachment #2: 0027-Change-use-to-type-based-pool-allocator-in-ipa-profi.patch --]
[-- Type: text/x-patch, Size: 2171 bytes --]

From 25e42b4ede0c9d2d6a2ba015166805611957fc53 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:54 +0200
Subject: [PATCH 27/32] Change use to type-based pool allocator in
 ipa-profile.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ipa-profile.c (account_time_size): Use new type-based pool allocator.
	(ipa_profile_generate_summary): Likewise.
	(ipa_profile_read_summary): Likewise.
	(ipa_profile): Likewise.
---
 gcc/ipa-profile.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c
index e0d4266..7c967f9 100644
--- a/gcc/ipa-profile.c
+++ b/gcc/ipa-profile.c
@@ -107,7 +107,8 @@ struct histogram_entry
    duplicate entries.  */
 
 vec<histogram_entry *> histogram;
-static alloc_pool histogram_pool;
+static pool_allocator<histogram_entry> histogram_pool
+  ("IPA histogram", 10);
 
 /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR.  */
 
@@ -144,7 +145,7 @@ account_time_size (hash_table<histogram_hash> *hashtable,
 
   if (!*val)
     {
-      *val = (histogram_entry *) pool_alloc (histogram_pool);
+      *val = histogram_pool.allocate ();
       **val = key;
       histogram.safe_push (*val);
     }
@@ -205,8 +206,6 @@ ipa_profile_generate_summary (void)
   basic_block bb;
 
   hash_table<histogram_hash> hashtable (10);
-  histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
-				      10);
   
   FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
     FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
@@ -287,8 +286,6 @@ ipa_profile_read_summary (void)
   int j = 0;
 
   hash_table<histogram_hash> hashtable (10);
-  histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
-				      10);
 
   while ((file_data = file_data_vec[j++]))
     {
@@ -593,7 +590,7 @@ ipa_profile (void)
 	}
     }
   histogram.release ();
-  free_alloc_pool (histogram_pool);
+  histogram_pool.release ();
 
   /* Produce speculative calls: we saved common traget from porfiling into
      e->common_target_id.  Now, at link time, we can look up corresponding
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 26/35] Change use to type-based pool allocator in tree-ssa-strlen.c.
  2015-05-27 14:21 ` [PATCH 26/35] Change use to type-based pool allocator in tree-ssa-strlen.c mliska
  2015-05-27 18:17   ` Jeff Law
@ 2015-05-29 13:42   ` Martin Liška
  1 sibling, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:42 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 2074 bytes --]

On 05/27/2015 03:56 PM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* tree-ssa-strlen.c (new_strinfo): Use new type-based pool allocator.
> 	(free_strinfo): Likewise.
> 	(pass_strlen::execute): Likewise.
> ---
>   gcc/tree-ssa-strlen.c | 10 ++++------
>   1 file changed, 4 insertions(+), 6 deletions(-)
>
> diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
> index 34776a3..2664189 100644
> --- a/gcc/tree-ssa-strlen.c
> +++ b/gcc/tree-ssa-strlen.c
> @@ -142,7 +142,7 @@ typedef struct strinfo_struct
>   } *strinfo;
>
>   /* Pool for allocating strinfo_struct entries.  */
> -static alloc_pool strinfo_pool;
> +static pool_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool", 64);
>
>   /* Vector mapping positive string indexes to strinfo, for the
>      current basic block.  The first pointer in the vector is special,
> @@ -431,7 +431,7 @@ new_addr_stridx (tree exp)
>   static strinfo
>   new_strinfo (tree ptr, int idx, tree length)
>   {
> -  strinfo si = (strinfo) pool_alloc (strinfo_pool);
> +  strinfo si = strinfo_pool.allocate ();
>     si->length = length;
>     si->ptr = ptr;
>     si->stmt = NULL;
> @@ -452,7 +452,7 @@ static inline void
>   free_strinfo (strinfo si)
>   {
>     if (si && --si->refcount == 0)
> -    pool_free (strinfo_pool, si);
> +    strinfo_pool.remove (si);
>   }
>
>   /* Set strinfo in the vector entry IDX to SI.  */
> @@ -2400,8 +2400,6 @@ pass_strlen::execute (function *fun)
>   {
>     ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
>     max_stridx = 1;
> -  strinfo_pool = create_alloc_pool ("strinfo_struct pool",
> -				    sizeof (struct strinfo_struct), 64);
>
>     calculate_dominance_info (CDI_DOMINATORS);
>
> @@ -2410,7 +2408,7 @@ pass_strlen::execute (function *fun)
>     strlen_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
>
>     ssa_ver_to_stridx.release ();
> -  free_alloc_pool (strinfo_pool);
> +  strinfo_pool.release ();
>     if (decl_to_stridxlist_htab)
>       {
>         obstack_free (&stridx_obstack, NULL);
>

v2

[-- Attachment #2: 0025-Change-use-to-type-based-pool-allocator-in-tree-ssa-.patch --]
[-- Type: text/x-patch, Size: 2136 bytes --]

From d3b5cce7467dea3cc06489e087ca5d2f15a0eb32 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:53 +0200
Subject: [PATCH 25/32] Change use to type-based pool allocator in
 tree-ssa-strlen.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* tree-ssa-strlen.c (new_strinfo): Use new type-based pool allocator.
	(free_strinfo): Likewise.
	(pass_strlen::execute): Likewise.
---
 gcc/tree-ssa-strlen.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
index 34776a3..2664189 100644
--- a/gcc/tree-ssa-strlen.c
+++ b/gcc/tree-ssa-strlen.c
@@ -142,7 +142,7 @@ typedef struct strinfo_struct
 } *strinfo;
 
 /* Pool for allocating strinfo_struct entries.  */
-static alloc_pool strinfo_pool;
+static pool_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool", 64);
 
 /* Vector mapping positive string indexes to strinfo, for the
    current basic block.  The first pointer in the vector is special,
@@ -431,7 +431,7 @@ new_addr_stridx (tree exp)
 static strinfo
 new_strinfo (tree ptr, int idx, tree length)
 {
-  strinfo si = (strinfo) pool_alloc (strinfo_pool);
+  strinfo si = strinfo_pool.allocate ();
   si->length = length;
   si->ptr = ptr;
   si->stmt = NULL;
@@ -452,7 +452,7 @@ static inline void
 free_strinfo (strinfo si)
 {
   if (si && --si->refcount == 0)
-    pool_free (strinfo_pool, si);
+    strinfo_pool.remove (si);
 }
 
 /* Set strinfo in the vector entry IDX to SI.  */
@@ -2400,8 +2400,6 @@ pass_strlen::execute (function *fun)
 {
   ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
   max_stridx = 1;
-  strinfo_pool = create_alloc_pool ("strinfo_struct pool",
-				    sizeof (struct strinfo_struct), 64);
 
   calculate_dominance_info (CDI_DOMINATORS);
 
@@ -2410,7 +2408,7 @@ pass_strlen::execute (function *fun)
   strlen_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
 
   ssa_ver_to_stridx.release ();
-  free_alloc_pool (strinfo_pool);
+  strinfo_pool.release ();
   if (decl_to_stridxlist_htab)
     {
       obstack_free (&stridx_obstack, NULL);
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 32/35] Change use to type-based pool allocator in ira-build.c.
  2015-05-27 19:34   ` Jeff Law
@ 2015-05-29 13:44     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 13:44 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 382 bytes --]

On 05/27/2015 08:21 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * ira-build.c (finish_allocnos): Use new type-based pool allocator.
>>     (finish_prefs): Likewise.
>>     (finish_copies): Likewise.
> Is this a partial duplicate of patch #34?  Something seems amiss here.
>
>
> jeff
>

v2

[-- Attachment #2: 0031-Change-use-to-type-based-pool-allocator-in-ira-build.patch --]
[-- Type: text/x-patch, Size: 6608 bytes --]

From b5e1163dc80daf303431589eb71611afd60c08ef Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:56 +0200
Subject: [PATCH 31/32] Change use to type-based pool allocator in ira-build.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ira-build.c (initiate_allocnos): Use new type-based pool allocator.
	(ira_create_object): Likewise.
	(ira_create_allocno): Likewise.
	(ira_create_live_range): Likewise.
	(copy_live_range): Likewise.
	(ira_finish_live_range): Likewise.
	(ira_free_allocno_costs): Likewise.
	(finish_allocno): Likewise.
	(finish_allocnos): Likewise.
	(initiate_prefs): Likewise.
	(ira_create_pref): Likewise.
	(finish_pref): Likewise.
	(finish_prefs): Likewise.
	(initiate_copies): Likewise.
	(ira_create_copy): Likewise.
	(finish_copy): Likewise.
	(finish_copies): Likewise.
	(finish_prefs): Likewise.
---
 gcc/ira-build.c | 51 +++++++++++++++++++++------------------------------
 1 file changed, 21 insertions(+), 30 deletions(-)

diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 2de7d34..534d0bc 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -428,7 +428,9 @@ rebuild_regno_allocno_maps (void)
 \f
 
 /* Pools for allocnos, allocno live ranges and objects.  */
-static alloc_pool allocno_pool, live_range_pool, object_pool;
+static pool_allocator<live_range> live_range_pool ("live ranges", 100);
+static pool_allocator<ira_allocno> allocno_pool ("allocnos", 100);
+static pool_allocator<ira_object> object_pool ("objects", 100);
 
 /* Vec containing references to all created allocnos.  It is a
    container of array allocnos.  */
@@ -442,13 +444,6 @@ static vec<ira_object_t> ira_object_id_map_vec;
 static void
 initiate_allocnos (void)
 {
-  live_range_pool
-    = create_alloc_pool ("live ranges",
-			 sizeof (struct live_range), 100);
-  allocno_pool
-    = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
-  object_pool
-    = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
   allocno_vec.create (max_reg_num () * 2);
   ira_allocnos = NULL;
   ira_allocnos_num = 0;
@@ -466,7 +461,7 @@ static ira_object_t
 ira_create_object (ira_allocno_t a, int subword)
 {
   enum reg_class aclass = ALLOCNO_CLASS (a);
-  ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
+  ira_object_t obj = object_pool.allocate ();
 
   OBJECT_ALLOCNO (obj) = a;
   OBJECT_SUBWORD (obj) = subword;
@@ -501,7 +496,7 @@ ira_create_allocno (int regno, bool cap_p,
 {
   ira_allocno_t a;
 
-  a = (ira_allocno_t) pool_alloc (allocno_pool);
+  a = allocno_pool.allocate ();
   ALLOCNO_REGNO (a) = regno;
   ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
   if (! cap_p)
@@ -943,7 +938,7 @@ ira_create_live_range (ira_object_t obj, int start, int finish,
 {
   live_range_t p;
 
-  p = (live_range_t) pool_alloc (live_range_pool);
+  p = live_range_pool.allocate ();
   p->object = obj;
   p->start = start;
   p->finish = finish;
@@ -968,7 +963,7 @@ copy_live_range (live_range_t r)
 {
   live_range_t p;
 
-  p = (live_range_t) pool_alloc (live_range_pool);
+  p = live_range_pool.allocate ();
   *p = *r;
   return p;
 }
@@ -1089,7 +1084,7 @@ ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
 void
 ira_finish_live_range (live_range_t r)
 {
-  pool_free (live_range_pool, r);
+  live_range_pool.remove (r);
 }
 
 /* Free list of allocno live ranges starting with R.  */
@@ -1136,7 +1131,7 @@ ira_free_allocno_costs (ira_allocno_t a)
       ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
       if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
 	ira_free (OBJECT_CONFLICT_ARRAY (obj));
-      pool_free (object_pool, obj);
+      object_pool.remove (obj);
     }
 
   ira_allocnos[ALLOCNO_NUM (a)] = NULL;
@@ -1160,7 +1155,7 @@ static void
 finish_allocno (ira_allocno_t a)
 {
   ira_free_allocno_costs (a);
-  pool_free (allocno_pool, a);
+  allocno_pool.remove (a);
 }
 
 /* Free the memory allocated for all allocnos.  */
@@ -1175,15 +1170,15 @@ finish_allocnos (void)
   ira_free (ira_regno_allocno_map);
   ira_object_id_map_vec.release ();
   allocno_vec.release ();
-  free_alloc_pool (allocno_pool);
-  free_alloc_pool (object_pool);
-  free_alloc_pool (live_range_pool);
+  allocno_pool.release ();
+  object_pool.release ();
+  live_range_pool.release ();
 }
 
 \f
 
 /* Pools for allocno preferences.  */
-static alloc_pool pref_pool;
+static pool_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
 
 /* Vec containing references to all created preferences.  It is a
    container of array ira_prefs.  */
@@ -1193,8 +1188,6 @@ static vec<ira_pref_t> pref_vec;
 static void
 initiate_prefs (void)
 {
-  pref_pool
-    = create_alloc_pool ("prefs", sizeof (struct ira_allocno_pref), 100);
   pref_vec.create (get_max_uid ());
   ira_prefs = NULL;
   ira_prefs_num = 0;
@@ -1218,7 +1211,7 @@ ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
 {
   ira_pref_t pref;
 
-  pref = (ira_pref_t) pool_alloc (pref_pool);
+  pref = pref_pool.allocate ();
   pref->num = ira_prefs_num;
   pref->allocno = a;
   pref->hard_regno = hard_regno;
@@ -1316,7 +1309,7 @@ static void
 finish_pref (ira_pref_t pref)
 {
   ira_prefs[pref->num] = NULL;
-  pool_free (pref_pool, pref);
+  pref_pool.remove (pref);
 }
 
 /* Remove PREF from the list of allocno prefs and free memory for
@@ -1366,13 +1359,13 @@ finish_prefs (void)
   FOR_EACH_PREF (pref, pi)
     finish_pref (pref);
   pref_vec.release ();
-  free_alloc_pool (pref_pool);
+  pref_pool.release ();
 }
 
 \f
 
 /* Pools for copies.  */
-static alloc_pool copy_pool;
+static pool_allocator<ira_allocno_copy> copy_pool ("copies", 100);
 
 /* Vec containing references to all created copies.  It is a
    container of array ira_copies.  */
@@ -1382,8 +1375,6 @@ static vec<ira_copy_t> copy_vec;
 static void
 initiate_copies (void)
 {
-  copy_pool
-    = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
   copy_vec.create (get_max_uid ());
   ira_copies = NULL;
   ira_copies_num = 0;
@@ -1428,7 +1419,7 @@ ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
 {
   ira_copy_t cp;
 
-  cp = (ira_copy_t) pool_alloc (copy_pool);
+  cp = copy_pool.allocate ();
   cp->num = ira_copies_num;
   cp->first = first;
   cp->second = second;
@@ -1613,7 +1604,7 @@ ira_debug_allocno_copies (ira_allocno_t a)
 static void
 finish_copy (ira_copy_t cp)
 {
-  pool_free (copy_pool, cp);
+  copy_pool.remove (cp);
 }
 
 
@@ -1627,7 +1618,7 @@ finish_copies (void)
   FOR_EACH_COPY (cp, ci)
     finish_copy (cp);
   copy_vec.release ();
-  free_alloc_pool (copy_pool);
+  copy_pool.release ();
 }
 
 \f
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 30/35] Change use to type-based pool allocator in ipa-inline-analysis.c.
  2015-05-27 14:21 ` [PATCH 30/35] Change use to type-based pool allocator in ipa-inline-analysis.c mliska
@ 2015-05-29 14:06   ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 14:06 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 6481 bytes --]

On 05/27/2015 03:56 PM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* ipa-inline-analysis.c (edge_set_predicate): Use new type-based pool allocator.
> 	(set_hint_predicate): Likewise.
> 	(inline_summary_alloc): Likewise.
> 	(reset_inline_edge_summary): Likewise.
> 	(reset_inline_summary): Likewise.
> 	(set_cond_stmt_execution_predicate): Likewise.
> 	(set_switch_stmt_execution_predicate): Likewise.
> 	(compute_bb_predicates): Likewise.
> 	(estimate_function_body_sizes): Likewise.
> 	(inline_free_summary): Likewise.
> ---
>   gcc/ipa-inline-analysis.c | 39 +++++++++++++++++----------------------
>   1 file changed, 17 insertions(+), 22 deletions(-)
>
> diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
> index 5d99887..7d8edee 100644
> --- a/gcc/ipa-inline-analysis.c
> +++ b/gcc/ipa-inline-analysis.c
> @@ -170,7 +170,7 @@ vec<inline_edge_summary_t> inline_edge_summary_vec;
>   vec<edge_growth_cache_entry> edge_growth_cache;
>
>   /* Edge predicates goes here.  */
> -static alloc_pool edge_predicate_pool;
> +static pool_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
>
>   /* Return true predicate (tautology).
>      We represent it by empty list of clauses.  */
> @@ -804,13 +804,13 @@ edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
>     if (predicate && !true_predicate_p (predicate))
>       {
>         if (!es->predicate)
> -	es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool);
> +	es->predicate = edge_predicate_pool.allocate ();
>         *es->predicate = *predicate;
>       }
>     else
>       {
>         if (es->predicate)
> -	pool_free (edge_predicate_pool, es->predicate);
> +	edge_predicate_pool.remove (es->predicate);
>         es->predicate = NULL;
>       }
>   }
> @@ -823,13 +823,13 @@ set_hint_predicate (struct predicate **p, struct predicate new_predicate)
>     if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
>       {
>         if (*p)
> -	pool_free (edge_predicate_pool, *p);
> +	edge_predicate_pool.remove (*p);
>         *p = NULL;
>       }
>     else
>       {
>         if (!*p)
> -	*p = (struct predicate *) pool_alloc (edge_predicate_pool);
> +	*p = edge_predicate_pool.allocate ();
>         **p = new_predicate;
>       }
>   }
> @@ -1044,9 +1044,6 @@ inline_summary_alloc (void)
>
>     if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
>       inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
> -  if (!edge_predicate_pool)
> -    edge_predicate_pool = create_alloc_pool ("edge predicates",
> -					     sizeof (struct predicate), 10);
>   }
>
>   /* We are called multiple time for given function; clear
> @@ -1061,7 +1058,7 @@ reset_inline_edge_summary (struct cgraph_edge *e)
>
>         es->call_stmt_size = es->call_stmt_time = 0;
>         if (es->predicate)
> -	pool_free (edge_predicate_pool, es->predicate);
> +	edge_predicate_pool.remove (es->predicate);
>         es->predicate = NULL;
>         es->param.release ();
>       }
> @@ -1086,17 +1083,17 @@ reset_inline_summary (struct cgraph_node *node,
>     info->scc_no = 0;
>     if (info->loop_iterations)
>       {
> -      pool_free (edge_predicate_pool, info->loop_iterations);
> +      edge_predicate_pool.remove (info->loop_iterations);
>         info->loop_iterations = NULL;
>       }
>     if (info->loop_stride)
>       {
> -      pool_free (edge_predicate_pool, info->loop_stride);
> +      edge_predicate_pool.remove (info->loop_stride);
>         info->loop_stride = NULL;
>       }
>     if (info->array_index)
>       {
> -      pool_free (edge_predicate_pool, info->array_index);
> +      edge_predicate_pool.remove (info->array_index);
>         info->array_index = NULL;
>       }
>     vec_free (info->conds);
> @@ -1812,7 +1809,7 @@ set_cond_stmt_execution_predicate (struct ipa_node_params *info,
>   	      struct predicate p = add_condition (summary, index, &aggpos,
>   						  this_code,
>   						  gimple_cond_rhs (last));
> -	      e->aux = pool_alloc (edge_predicate_pool);
> +	      e->aux = edge_predicate_pool.allocate ();
>   	      *(struct predicate *) e->aux = p;
>   	    }
>   	}
> @@ -1845,7 +1842,7 @@ set_cond_stmt_execution_predicate (struct ipa_node_params *info,
>       {
>         struct predicate p = add_condition (summary, index, &aggpos,
>   					  IS_NOT_CONSTANT, NULL_TREE);
> -      e->aux = pool_alloc (edge_predicate_pool);
> +      e->aux = edge_predicate_pool.allocate ();
>         *(struct predicate *) e->aux = p;
>       }
>   }
> @@ -1878,7 +1875,7 @@ set_switch_stmt_execution_predicate (struct ipa_node_params *info,
>
>     FOR_EACH_EDGE (e, ei, bb->succs)
>       {
> -      e->aux = pool_alloc (edge_predicate_pool);
> +      e->aux = edge_predicate_pool.allocate ();
>         *(struct predicate *) e->aux = false_predicate ();
>       }
>     n = gimple_switch_num_labels (last);
> @@ -1932,7 +1929,7 @@ compute_bb_predicates (struct cgraph_node *node,
>
>     /* Entry block is always executable.  */
>     ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
> -    = pool_alloc (edge_predicate_pool);
> +    = edge_predicate_pool.allocate ();
>     *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
>       = true_predicate ();
>
> @@ -1968,7 +1965,7 @@ compute_bb_predicates (struct cgraph_node *node,
>   	      if (!bb->aux)
>   		{
>   		  done = false;
> -		  bb->aux = pool_alloc (edge_predicate_pool);
> +		  bb->aux = edge_predicate_pool.allocate ();
>   		  *((struct predicate *) bb->aux) = p;
>   		}
>   	      else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
> @@ -2864,12 +2861,12 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early)
>         edge_iterator ei;
>
>         if (bb->aux)
> -	pool_free (edge_predicate_pool, bb->aux);
> +	edge_predicate_pool.remove ((predicate *)bb->aux);
>         bb->aux = NULL;
>         FOR_EACH_EDGE (e, ei, bb->succs)
>   	{
>   	  if (e->aux)
> -	    pool_free (edge_predicate_pool, e->aux);
> +	    edge_predicate_pool.remove ((predicate *) e->aux);
>   	  e->aux = NULL;
>   	}
>       }
> @@ -4460,7 +4457,5 @@ inline_free_summary (void)
>     inline_summaries->release ();
>     inline_summaries = NULL;
>     inline_edge_summary_vec.release ();
> -  if (edge_predicate_pool)
> -    free_alloc_pool (edge_predicate_pool);
> -  edge_predicate_pool = 0;
> +  edge_predicate_pool.release ();
>   }
>

v2

[-- Attachment #2: 0029-Change-use-to-type-based-pool-allocator-in-ipa-inlin.patch --]
[-- Type: text/x-patch, Size: 6264 bytes --]

From c91771a81d63b037456c33f22372976d5bd38907 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:54 +0200
Subject: [PATCH 29/32] Change use to type-based pool allocator in
 ipa-inline-analysis.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ipa-inline-analysis.c (edge_set_predicate): Use new type-based pool allocator.
	(set_hint_predicate): Likewise.
	(inline_summary_alloc): Likewise.
	(reset_inline_edge_summary): Likewise.
	(reset_inline_summary): Likewise.
	(set_cond_stmt_execution_predicate): Likewise.
	(set_switch_stmt_execution_predicate): Likewise.
	(compute_bb_predicates): Likewise.
	(estimate_function_body_sizes): Likewise.
	(inline_free_summary): Likewise.
---
 gcc/ipa-inline-analysis.c | 39 +++++++++++++++++----------------------
 1 file changed, 17 insertions(+), 22 deletions(-)

diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index 5d99887..7d8edee 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -170,7 +170,7 @@ vec<inline_edge_summary_t> inline_edge_summary_vec;
 vec<edge_growth_cache_entry> edge_growth_cache;
 
 /* Edge predicates goes here.  */
-static alloc_pool edge_predicate_pool;
+static pool_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
 
 /* Return true predicate (tautology).
    We represent it by empty list of clauses.  */
@@ -804,13 +804,13 @@ edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
   if (predicate && !true_predicate_p (predicate))
     {
       if (!es->predicate)
-	es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool);
+	es->predicate = edge_predicate_pool.allocate ();
       *es->predicate = *predicate;
     }
   else
     {
       if (es->predicate)
-	pool_free (edge_predicate_pool, es->predicate);
+	edge_predicate_pool.remove (es->predicate);
       es->predicate = NULL;
     }
 }
@@ -823,13 +823,13 @@ set_hint_predicate (struct predicate **p, struct predicate new_predicate)
   if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
     {
       if (*p)
-	pool_free (edge_predicate_pool, *p);
+	edge_predicate_pool.remove (*p);
       *p = NULL;
     }
   else
     {
       if (!*p)
-	*p = (struct predicate *) pool_alloc (edge_predicate_pool);
+	*p = edge_predicate_pool.allocate ();
       **p = new_predicate;
     }
 }
@@ -1044,9 +1044,6 @@ inline_summary_alloc (void)
 
   if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
     inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
-  if (!edge_predicate_pool)
-    edge_predicate_pool = create_alloc_pool ("edge predicates",
-					     sizeof (struct predicate), 10);
 }
 
 /* We are called multiple time for given function; clear
@@ -1061,7 +1058,7 @@ reset_inline_edge_summary (struct cgraph_edge *e)
 
       es->call_stmt_size = es->call_stmt_time = 0;
       if (es->predicate)
-	pool_free (edge_predicate_pool, es->predicate);
+	edge_predicate_pool.remove (es->predicate);
       es->predicate = NULL;
       es->param.release ();
     }
@@ -1086,17 +1083,17 @@ reset_inline_summary (struct cgraph_node *node,
   info->scc_no = 0;
   if (info->loop_iterations)
     {
-      pool_free (edge_predicate_pool, info->loop_iterations);
+      edge_predicate_pool.remove (info->loop_iterations);
       info->loop_iterations = NULL;
     }
   if (info->loop_stride)
     {
-      pool_free (edge_predicate_pool, info->loop_stride);
+      edge_predicate_pool.remove (info->loop_stride);
       info->loop_stride = NULL;
     }
   if (info->array_index)
     {
-      pool_free (edge_predicate_pool, info->array_index);
+      edge_predicate_pool.remove (info->array_index);
       info->array_index = NULL;
     }
   vec_free (info->conds);
@@ -1812,7 +1809,7 @@ set_cond_stmt_execution_predicate (struct ipa_node_params *info,
 	      struct predicate p = add_condition (summary, index, &aggpos,
 						  this_code,
 						  gimple_cond_rhs (last));
-	      e->aux = pool_alloc (edge_predicate_pool);
+	      e->aux = edge_predicate_pool.allocate ();
 	      *(struct predicate *) e->aux = p;
 	    }
 	}
@@ -1845,7 +1842,7 @@ set_cond_stmt_execution_predicate (struct ipa_node_params *info,
     {
       struct predicate p = add_condition (summary, index, &aggpos,
 					  IS_NOT_CONSTANT, NULL_TREE);
-      e->aux = pool_alloc (edge_predicate_pool);
+      e->aux = edge_predicate_pool.allocate ();
       *(struct predicate *) e->aux = p;
     }
 }
@@ -1878,7 +1875,7 @@ set_switch_stmt_execution_predicate (struct ipa_node_params *info,
 
   FOR_EACH_EDGE (e, ei, bb->succs)
     {
-      e->aux = pool_alloc (edge_predicate_pool);
+      e->aux = edge_predicate_pool.allocate ();
       *(struct predicate *) e->aux = false_predicate ();
     }
   n = gimple_switch_num_labels (last);
@@ -1932,7 +1929,7 @@ compute_bb_predicates (struct cgraph_node *node,
 
   /* Entry block is always executable.  */
   ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
-    = pool_alloc (edge_predicate_pool);
+    = edge_predicate_pool.allocate ();
   *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
     = true_predicate ();
 
@@ -1968,7 +1965,7 @@ compute_bb_predicates (struct cgraph_node *node,
 	      if (!bb->aux)
 		{
 		  done = false;
-		  bb->aux = pool_alloc (edge_predicate_pool);
+		  bb->aux = edge_predicate_pool.allocate ();
 		  *((struct predicate *) bb->aux) = p;
 		}
 	      else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
@@ -2864,12 +2861,12 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early)
       edge_iterator ei;
 
       if (bb->aux)
-	pool_free (edge_predicate_pool, bb->aux);
+	edge_predicate_pool.remove ((predicate *)bb->aux);
       bb->aux = NULL;
       FOR_EACH_EDGE (e, ei, bb->succs)
 	{
 	  if (e->aux)
-	    pool_free (edge_predicate_pool, e->aux);
+	    edge_predicate_pool.remove ((predicate *) e->aux);
 	  e->aux = NULL;
 	}
     }
@@ -4460,7 +4457,5 @@ inline_free_summary (void)
   inline_summaries->release ();
   inline_summaries = NULL;
   inline_edge_summary_vec.release ();
-  if (edge_predicate_pool)
-    free_alloc_pool (edge_predicate_pool);
-  edge_predicate_pool = 0;
+  edge_predicate_pool.release ();
 }
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 31/35] Change use to type-based pool allocator in ipa-prop.c and ipa-cp.c.
  2015-05-27 14:20 ` [PATCH 31/35] Change use to type-based pool allocator in ipa-prop.c and ipa-cp.c mliska
@ 2015-05-29 14:09   ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 14:09 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 6655 bytes --]

On 05/27/2015 03:56 PM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* ipa-cp.c (ipcp_value::add_source): Use new type-based pool allocator.
> 	(allocate_and_init_ipcp_value): Likewise.
> 	(ipcp_lattice::add_value): Likewise.
> 	(merge_agg_lats_step): Likewise.
> 	(ipcp_driver): Likewise.
> 	* ipa-prop.c (ipa_free_all_structures_after_ipa_cp): Likewise.
> 	(ipa_free_all_structures_after_iinln): Likewise.
> 	* ipa-prop.h: Likewise.
> ---
>   gcc/ipa-cp.c   | 37 +++++++++++++++++--------------------
>   gcc/ipa-prop.c | 20 ++++++++------------
>   gcc/ipa-prop.h | 19 +++++++++++++++----
>   3 files changed, 40 insertions(+), 36 deletions(-)
>
> diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
> index 356f402..0c3f885 100644
> --- a/gcc/ipa-cp.c
> +++ b/gcc/ipa-cp.c
> @@ -291,10 +291,17 @@ public:
>
>   /* Allocation pools for values and their sources in ipa-cp.  */
>
> -alloc_pool ipcp_cst_values_pool;
> -alloc_pool ipcp_poly_ctx_values_pool;
> -alloc_pool ipcp_sources_pool;
> -alloc_pool ipcp_agg_lattice_pool;
> +pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool ("IPA-CP constant values",
> +						       32);
> +
> +pool_allocator<ipcp_value<ipa_polymorphic_call_context> > ipcp_poly_ctx_values_pool
> +  ("IPA-CP polymorphic contexts", 32);
> +
> +pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool
> +  ("IPA-CP value sources", 64);
> +
> +pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
> +  ("IPA_CP aggregate lattices", 32);
>
>   /* Maximal count found in program.  */
>
> @@ -1147,7 +1154,7 @@ ipcp_value<valtype>::add_source (cgraph_edge *cs, ipcp_value *src_val,
>   {
>     ipcp_value_source<valtype> *src;
>
> -  src = new (pool_alloc (ipcp_sources_pool)) ipcp_value_source<valtype>;
> +  src = new (ipcp_sources_pool.allocate ()) ipcp_value_source<valtype>;
>     src->offset = offset;
>     src->cs = cs;
>     src->val = src_val;
> @@ -1165,7 +1172,7 @@ allocate_and_init_ipcp_value (tree source)
>   {
>     ipcp_value<tree> *val;
>
> -  val = new (pool_alloc (ipcp_cst_values_pool)) ipcp_value<tree>;
> +  val = ipcp_cst_values_pool.allocate ();
>     memset (val, 0, sizeof (*val));
>     val->value = source;
>     return val;
> @@ -1179,8 +1186,8 @@ allocate_and_init_ipcp_value (ipa_polymorphic_call_context source)
>   {
>     ipcp_value<ipa_polymorphic_call_context> *val;
>
> -  val = new (pool_alloc (ipcp_poly_ctx_values_pool))
> -    ipcp_value<ipa_polymorphic_call_context>;
> +  // TODO
> +  val = ipcp_poly_ctx_values_pool.allocate ();
>     memset (val, 0, sizeof (*val));
>     val->value = source;
>     return val;
> @@ -1229,7 +1236,7 @@ ipcp_lattice<valtype>::add_value (valtype newval, cgraph_edge *cs,
>   	    {
>   	      ipcp_value_source<valtype> *src = val->sources;
>   	      val->sources = src->next;
> -	      pool_free (ipcp_sources_pool, src);
> +	      ipcp_sources_pool.remove ((ipcp_value_source<tree>*)src);
>   	    }
>   	}
>
> @@ -1599,7 +1606,7 @@ merge_agg_lats_step (struct ipcp_param_lattices *dest_plats,
>         if (dest_plats->aggs_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS))
>   	return false;
>         dest_plats->aggs_count++;
> -      new_al = (struct ipcp_agg_lattice *) pool_alloc (ipcp_agg_lattice_pool);
> +      new_al = ipcp_agg_lattice_pool.allocate ();
>         memset (new_al, 0, sizeof (*new_al));
>
>         new_al->offset = offset;
> @@ -4463,16 +4470,6 @@ ipcp_driver (void)
>     edge_removal_hook_holder =
>       symtab->add_edge_removal_hook (&ipcp_edge_removal_hook, NULL);
>
> -  ipcp_cst_values_pool = create_alloc_pool ("IPA-CP constant values",
> -					    sizeof (ipcp_value<tree>), 32);
> -  ipcp_poly_ctx_values_pool = create_alloc_pool
> -    ("IPA-CP polymorphic contexts",
> -     sizeof (ipcp_value<ipa_polymorphic_call_context>), 32);
> -  ipcp_sources_pool = create_alloc_pool ("IPA-CP value sources",
> -					 sizeof (ipcp_value_source<tree>), 64);
> -  ipcp_agg_lattice_pool = create_alloc_pool ("IPA_CP aggregate lattices",
> -					     sizeof (struct ipcp_agg_lattice),
> -					     32);
>     if (dump_file)
>       {
>         fprintf (dump_file, "\nIPA structures before propagation:\n");
> diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
> index 80ce6b8..e90502b 100644
> --- a/gcc/ipa-prop.c
> +++ b/gcc/ipa-prop.c
> @@ -3669,10 +3669,10 @@ ipa_free_all_structures_after_ipa_cp (void)
>       {
>         ipa_free_all_edge_args ();
>         ipa_free_all_node_params ();
> -      free_alloc_pool (ipcp_sources_pool);
> -      free_alloc_pool (ipcp_cst_values_pool);
> -      free_alloc_pool (ipcp_poly_ctx_values_pool);
> -      free_alloc_pool (ipcp_agg_lattice_pool);
> +      ipcp_sources_pool.release ();
> +      ipcp_cst_values_pool.release ();
> +      ipcp_poly_ctx_values_pool.release ();
> +      ipcp_agg_lattice_pool.release ();
>         ipa_unregister_cgraph_hooks ();
>         ipa_refdesc_pool.release ();
>       }
> @@ -3687,14 +3687,10 @@ ipa_free_all_structures_after_iinln (void)
>     ipa_free_all_edge_args ();
>     ipa_free_all_node_params ();
>     ipa_unregister_cgraph_hooks ();
> -  if (ipcp_sources_pool)
> -    free_alloc_pool (ipcp_sources_pool);
> -  if (ipcp_cst_values_pool)
> -    free_alloc_pool (ipcp_cst_values_pool);
> -  if (ipcp_poly_ctx_values_pool)
> -    free_alloc_pool (ipcp_poly_ctx_values_pool);
> -  if (ipcp_agg_lattice_pool)
> -    free_alloc_pool (ipcp_agg_lattice_pool);
> +  ipcp_sources_pool.release ();
> +  ipcp_cst_values_pool.release ();
> +  ipcp_poly_ctx_values_pool.release ();
> +  ipcp_agg_lattice_pool.release ();
>     ipa_refdesc_pool.release ();
>   }
>
> diff --git a/gcc/ipa-prop.h b/gcc/ipa-prop.h
> index 0488254..e6725aa 100644
> --- a/gcc/ipa-prop.h
> +++ b/gcc/ipa-prop.h
> @@ -595,10 +595,21 @@ void ipa_print_node_jump_functions (FILE *f, struct cgraph_node *node);
>   void ipa_print_all_jump_functions (FILE * f);
>   void ipcp_verify_propagated_values (void);
>
> -extern alloc_pool ipcp_cst_values_pool;
> -extern alloc_pool ipcp_poly_ctx_values_pool;
> -extern alloc_pool ipcp_sources_pool;
> -extern alloc_pool ipcp_agg_lattice_pool;
> +template <typename value>
> +class ipcp_value;
> +
> +extern pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool;
> +extern pool_allocator<ipcp_value<ipa_polymorphic_call_context> >
> +  ipcp_poly_ctx_values_pool;
> +
> +template <typename valtype>
> +class ipcp_value_source;
> +
> +extern pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
> +
> +class ipcp_agg_lattice;
> +
> +extern pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
>
>   /* Operation to be performed for the parameter in ipa_parm_adjustment
>      below.  */
>

v2

[-- Attachment #2: 0030-Change-use-to-type-based-pool-allocator-in-ipa-prop..patch --]
[-- Type: text/x-patch, Size: 6469 bytes --]

From b2108a6639ba1c725bcf1775479d05ddb20fd6b3 Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:55 +0200
Subject: [PATCH 30/32] Change use to type-based pool allocator in ipa-prop.c
 and ipa-cp.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* ipa-cp.c (ipcp_value::add_source): Use new type-based pool allocator.
	(allocate_and_init_ipcp_value): Likewise.
	(ipcp_lattice::add_value): Likewise.
	(merge_agg_lats_step): Likewise.
	(ipcp_driver): Likewise.
	* ipa-prop.c (ipa_free_all_structures_after_ipa_cp): Likewise.
	(ipa_free_all_structures_after_iinln): Likewise.
	* ipa-prop.h: Likewise.
---
 gcc/ipa-cp.c   | 37 +++++++++++++++++--------------------
 gcc/ipa-prop.c | 20 ++++++++------------
 gcc/ipa-prop.h | 19 +++++++++++++++----
 3 files changed, 40 insertions(+), 36 deletions(-)

diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 356f402..9f812fa 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -291,10 +291,17 @@ public:
 
 /* Allocation pools for values and their sources in ipa-cp.  */
 
-alloc_pool ipcp_cst_values_pool;
-alloc_pool ipcp_poly_ctx_values_pool;
-alloc_pool ipcp_sources_pool;
-alloc_pool ipcp_agg_lattice_pool;
+pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool
+  ("IPA-CP constant values", 32);
+
+pool_allocator<ipcp_value<ipa_polymorphic_call_context> >
+  ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts", 32);
+
+pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool
+  ("IPA-CP value sources", 64);
+
+pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
+  ("IPA_CP aggregate lattices", 32);
 
 /* Maximal count found in program.  */
 
@@ -1147,7 +1154,7 @@ ipcp_value<valtype>::add_source (cgraph_edge *cs, ipcp_value *src_val,
 {
   ipcp_value_source<valtype> *src;
 
-  src = new (pool_alloc (ipcp_sources_pool)) ipcp_value_source<valtype>;
+  src = new (ipcp_sources_pool.allocate ()) ipcp_value_source<valtype>;
   src->offset = offset;
   src->cs = cs;
   src->val = src_val;
@@ -1165,7 +1172,7 @@ allocate_and_init_ipcp_value (tree source)
 {
   ipcp_value<tree> *val;
 
-  val = new (pool_alloc (ipcp_cst_values_pool)) ipcp_value<tree>;
+  val = ipcp_cst_values_pool.allocate ();
   memset (val, 0, sizeof (*val));
   val->value = source;
   return val;
@@ -1179,8 +1186,8 @@ allocate_and_init_ipcp_value (ipa_polymorphic_call_context source)
 {
   ipcp_value<ipa_polymorphic_call_context> *val;
 
-  val = new (pool_alloc (ipcp_poly_ctx_values_pool))
-    ipcp_value<ipa_polymorphic_call_context>;
+  // TODO
+  val = ipcp_poly_ctx_values_pool.allocate ();
   memset (val, 0, sizeof (*val));
   val->value = source;
   return val;
@@ -1229,7 +1236,7 @@ ipcp_lattice<valtype>::add_value (valtype newval, cgraph_edge *cs,
 	    {
 	      ipcp_value_source<valtype> *src = val->sources;
 	      val->sources = src->next;
-	      pool_free (ipcp_sources_pool, src);
+	      ipcp_sources_pool.remove ((ipcp_value_source<tree>*)src);
 	    }
 	}
 
@@ -1599,7 +1606,7 @@ merge_agg_lats_step (struct ipcp_param_lattices *dest_plats,
       if (dest_plats->aggs_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS))
 	return false;
       dest_plats->aggs_count++;
-      new_al = (struct ipcp_agg_lattice *) pool_alloc (ipcp_agg_lattice_pool);
+      new_al = ipcp_agg_lattice_pool.allocate ();
       memset (new_al, 0, sizeof (*new_al));
 
       new_al->offset = offset;
@@ -4463,16 +4470,6 @@ ipcp_driver (void)
   edge_removal_hook_holder =
     symtab->add_edge_removal_hook (&ipcp_edge_removal_hook, NULL);
 
-  ipcp_cst_values_pool = create_alloc_pool ("IPA-CP constant values",
-					    sizeof (ipcp_value<tree>), 32);
-  ipcp_poly_ctx_values_pool = create_alloc_pool
-    ("IPA-CP polymorphic contexts",
-     sizeof (ipcp_value<ipa_polymorphic_call_context>), 32);
-  ipcp_sources_pool = create_alloc_pool ("IPA-CP value sources",
-					 sizeof (ipcp_value_source<tree>), 64);
-  ipcp_agg_lattice_pool = create_alloc_pool ("IPA_CP aggregate lattices",
-					     sizeof (struct ipcp_agg_lattice),
-					     32);
   if (dump_file)
     {
       fprintf (dump_file, "\nIPA structures before propagation:\n");
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 80ce6b8..e90502b 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -3669,10 +3669,10 @@ ipa_free_all_structures_after_ipa_cp (void)
     {
       ipa_free_all_edge_args ();
       ipa_free_all_node_params ();
-      free_alloc_pool (ipcp_sources_pool);
-      free_alloc_pool (ipcp_cst_values_pool);
-      free_alloc_pool (ipcp_poly_ctx_values_pool);
-      free_alloc_pool (ipcp_agg_lattice_pool);
+      ipcp_sources_pool.release ();
+      ipcp_cst_values_pool.release ();
+      ipcp_poly_ctx_values_pool.release ();
+      ipcp_agg_lattice_pool.release ();
       ipa_unregister_cgraph_hooks ();
       ipa_refdesc_pool.release ();
     }
@@ -3687,14 +3687,10 @@ ipa_free_all_structures_after_iinln (void)
   ipa_free_all_edge_args ();
   ipa_free_all_node_params ();
   ipa_unregister_cgraph_hooks ();
-  if (ipcp_sources_pool)
-    free_alloc_pool (ipcp_sources_pool);
-  if (ipcp_cst_values_pool)
-    free_alloc_pool (ipcp_cst_values_pool);
-  if (ipcp_poly_ctx_values_pool)
-    free_alloc_pool (ipcp_poly_ctx_values_pool);
-  if (ipcp_agg_lattice_pool)
-    free_alloc_pool (ipcp_agg_lattice_pool);
+  ipcp_sources_pool.release ();
+  ipcp_cst_values_pool.release ();
+  ipcp_poly_ctx_values_pool.release ();
+  ipcp_agg_lattice_pool.release ();
   ipa_refdesc_pool.release ();
 }
 
diff --git a/gcc/ipa-prop.h b/gcc/ipa-prop.h
index 0488254..e6725aa 100644
--- a/gcc/ipa-prop.h
+++ b/gcc/ipa-prop.h
@@ -595,10 +595,21 @@ void ipa_print_node_jump_functions (FILE *f, struct cgraph_node *node);
 void ipa_print_all_jump_functions (FILE * f);
 void ipcp_verify_propagated_values (void);
 
-extern alloc_pool ipcp_cst_values_pool;
-extern alloc_pool ipcp_poly_ctx_values_pool;
-extern alloc_pool ipcp_sources_pool;
-extern alloc_pool ipcp_agg_lattice_pool;
+template <typename value>
+class ipcp_value;
+
+extern pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool;
+extern pool_allocator<ipcp_value<ipa_polymorphic_call_context> >
+  ipcp_poly_ctx_values_pool;
+
+template <typename valtype>
+class ipcp_value_source;
+
+extern pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
+
+class ipcp_agg_lattice;
+
+extern pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
 
 /* Operation to be performed for the parameter in ipa_parm_adjustment
    below.  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 35/35] Remove old pool allocator.
  2015-05-27 19:40   ` Jeff Law
@ 2015-05-29 14:11     ` Martin Liška
  0 siblings, 0 replies; 108+ messages in thread
From: Martin Liška @ 2015-05-29 14:11 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 696 bytes --]

On 05/27/2015 08:24 PM, Jeff Law wrote:
> On 05/27/2015 07:56 AM, mliska wrote:
>> gcc/ChangeLog:
>>
>> 2015-04-30  Martin Liska  <mliska@suse.cz>
>>
>>     * alloc-pool.c (create_alloc_pool): Remove.
>>     (empty_alloc_pool): Likewise.
>>     (free_alloc_pool): Likewise.
>>     (free_alloc_pool_if_empty): Likewise.
>>     (pool_alloc): Likewise.
>>     (pool_free): Likewise.
>>     * alloc-pool.h: Remove old declarations.
> So, the remaining patches to use the type based pool allocator are OK as long as they have the same overall structure as the patches that have already been OK.   You've got something goofy in #32/#34, which I'll assume you'll sort out sensibly.
>
> OK.
>
> jeff

v2

[-- Attachment #2: 0032-Remove-old-pool-allocator.patch --]
[-- Type: text/x-patch, Size: 11166 bytes --]

From 92bb6d5c6375ca1dc2f3d09807ff34c45c4e60fc Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:56 +0200
Subject: [PATCH 32/32] Remove old pool allocator.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* alloc-pool.c (create_alloc_pool): Remove.
	(empty_alloc_pool): Likewise.
	(free_alloc_pool): Likewise.
	(free_alloc_pool_if_empty): Likewise.
	(pool_alloc): Likewise.
	(pool_free): Likewise.
	* alloc-pool.h: Remove old declarations.
---
 gcc/alloc-pool.c | 274 -------------------------------------------------------
 gcc/alloc-pool.h |  51 -----------
 2 files changed, 325 deletions(-)

diff --git a/gcc/alloc-pool.c b/gcc/alloc-pool.c
index 0bea7a6..78bc305 100644
--- a/gcc/alloc-pool.c
+++ b/gcc/alloc-pool.c
@@ -27,39 +27,6 @@ along with GCC; see the file COPYING3.  If not see
 
 ALLOC_POOL_ID_TYPE last_id;
 
-#define align_eight(x) (((x+7) >> 3) << 3)
-
-/* The internal allocation object.  */
-typedef struct allocation_object_def
-{
-#ifdef ENABLE_CHECKING
-  /* The ID of alloc pool which the object was allocated from.  */
-  ALLOC_POOL_ID_TYPE id;
-#endif
-
-  union
-    {
-      /* The data of the object.  */
-      char data[1];
-
-      /* Because we want any type of data to be well aligned after the ID,
-	 the following elements are here.  They are never accessed so
-	 the allocated object may be even smaller than this structure.
-	 We do not care about alignment for floating-point types.  */
-      char *align_p;
-      int64_t align_i;
-    } u;
-} allocation_object;
-
-/* Convert a pointer to allocation_object from a pointer to user data.  */
-#define ALLOCATION_OBJECT_PTR_FROM_USER_PTR(X)				\
-   ((allocation_object *) (((char *) (X))				\
-			   - offsetof (allocation_object, u.data)))
-
-/* Convert a pointer to user data from a pointer to allocation_object.  */
-#define USER_PTR_FROM_ALLOCATION_OBJECT_PTR(X)				\
-   ((void *) (((allocation_object *) (X))->u.data))
-
 /* Hashtable mapping alloc_pool names to descriptors.  */
 hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
 
@@ -72,247 +39,6 @@ allocate_pool_descriptor (const char *name)
   return &alloc_pool_hash->get_or_insert (name);
 }
 
-
-/* Create a pool of things of size SIZE, with NUM in each block we
-   allocate.  */
-
-alloc_pool
-create_alloc_pool (const char *name, size_t size, size_t num)
-{
-  alloc_pool pool;
-  size_t header_size;
-
-  gcc_checking_assert (name);
-
-  /* Make size large enough to store the list header.  */
-  if (size < sizeof (alloc_pool_list))
-    size = sizeof (alloc_pool_list);
-
-  /* Now align the size to a multiple of 4.  */
-  size = align_eight (size);
-
-#ifdef ENABLE_CHECKING
-  /* Add the aligned size of ID.  */
-  size += offsetof (allocation_object, u.data);
-#endif
-
-  /* Um, we can't really allocate 0 elements per block.  */
-  gcc_checking_assert (num);
-
-  /* Allocate memory for the pool structure.  */
-  pool = XNEW (struct alloc_pool_def);
-
-  /* Now init the various pieces of our pool structure.  */
-  pool->name = /*xstrdup (name)*/name;
-  pool->elt_size = size;
-  pool->elts_per_block = num;
-
-  if (GATHER_STATISTICS)
-    {
-      struct alloc_pool_descriptor *desc = allocate_pool_descriptor (name);
-      desc->elt_size = size;
-      desc->created++;
-    }
-
-  /* List header size should be a multiple of 8.  */
-  header_size = align_eight (sizeof (struct alloc_pool_list_def));
-
-  pool->block_size = (size * num) + header_size;
-  pool->returned_free_list = NULL;
-  pool->virgin_free_list = NULL;
-  pool->virgin_elts_remaining = 0;
-  pool->elts_allocated = 0;
-  pool->elts_free = 0;
-  pool->blocks_allocated = 0;
-  pool->block_list = NULL;
-
-#ifdef ENABLE_CHECKING
-  /* Increase the last used ID and use it for this pool.
-     ID == 0 is used for free elements of pool so skip it.  */
-  last_id++;
-  if (last_id == 0)
-    last_id++;
-
-  pool->id = last_id;
-#endif
-
-  return (pool);
-}
-
-/* Free all memory allocated for the given memory pool.  */
-void
-empty_alloc_pool (alloc_pool pool)
-{
-  alloc_pool_list block, next_block;
-
-  gcc_checking_assert (pool);
-
-  /* Free each block allocated to the pool.  */
-  for (block = pool->block_list; block != NULL; block = next_block)
-    {
-      next_block = block->next;
-      free (block);
-    }
-
-  if (GATHER_STATISTICS)
-    {
-      struct alloc_pool_descriptor *desc = allocate_pool_descriptor (pool->name);
-      desc->current -= (pool->elts_allocated - pool->elts_free) * pool->elt_size;
-    }
-
-  pool->returned_free_list = NULL;
-  pool->virgin_free_list = NULL;
-  pool->virgin_elts_remaining = 0;
-  pool->elts_allocated = 0;
-  pool->elts_free = 0;
-  pool->blocks_allocated = 0;
-  pool->block_list = NULL;
-}
-
-/* Free all memory allocated for the given memory pool and the pool itself.  */
-void
-free_alloc_pool (alloc_pool pool)
-{
-  /* First empty the pool.  */
-  empty_alloc_pool (pool);
-#ifdef ENABLE_CHECKING
-  memset (pool, 0xaf, sizeof (*pool));
-#endif
-  /* Lastly, free the pool.  */
-  free (pool);
-}
-
-/* Frees the alloc_pool, if it is empty and zero *POOL in this case.  */
-void
-free_alloc_pool_if_empty (alloc_pool *pool)
-{
-  if ((*pool)->elts_free == (*pool)->elts_allocated)
-    {
-      free_alloc_pool (*pool);
-      *pool = NULL;
-    }
-}
-
-/* Allocates one element from the pool specified.  */
-void *
-pool_alloc (alloc_pool pool)
-{
-  alloc_pool_list header;
-#ifdef ENABLE_VALGRIND_ANNOTATIONS
-  int size;
-#endif
-
-  if (GATHER_STATISTICS)
-    {
-      struct alloc_pool_descriptor *desc = allocate_pool_descriptor (pool->name);
-
-      desc->allocated += pool->elt_size;
-      desc->current += pool->elt_size;
-      if (desc->peak < desc->current)
-	desc->peak = desc->current;
-    }
-
-  gcc_checking_assert (pool);
-#ifdef ENABLE_VALGRIND_ANNOTATIONS
-  size = pool->elt_size - offsetof (allocation_object, u.data);
-#endif
-
-  /* If there are no more free elements, make some more!.  */
-  if (!pool->returned_free_list)
-    {
-      char *block;
-      if (!pool->virgin_elts_remaining)
-	{
-	  alloc_pool_list block_header;
-
-	  /* Make the block.  */
-	  block = XNEWVEC (char, pool->block_size);
-	  block_header = (alloc_pool_list) block;
-	  block += align_eight (sizeof (struct alloc_pool_list_def));
-
-	  /* Throw it on the block list.  */
-	  block_header->next = pool->block_list;
-	  pool->block_list = block_header;
-
-	  /* Make the block available for allocation.  */
-	  pool->virgin_free_list = block;
-	  pool->virgin_elts_remaining = pool->elts_per_block;
-
-	  /* Also update the number of elements we have free/allocated, and
-	     increment the allocated block count.  */
-	  pool->elts_allocated += pool->elts_per_block;
-	  pool->elts_free += pool->elts_per_block;
-	  pool->blocks_allocated += 1;
-	}
-
-
-      /* We now know that we can take the first elt off the virgin list and
-	 put it on the returned list. */
-      block = pool->virgin_free_list;
-      header = (alloc_pool_list) USER_PTR_FROM_ALLOCATION_OBJECT_PTR (block);
-      header->next = NULL;
-#ifdef ENABLE_CHECKING
-      /* Mark the element to be free.  */
-      ((allocation_object *) block)->id = 0;
-#endif
-      VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
-      pool->returned_free_list = header;
-      pool->virgin_free_list += pool->elt_size;
-      pool->virgin_elts_remaining--;
-
-    }
-
-  /* Pull the first free element from the free list, and return it.  */
-  header = pool->returned_free_list;
-  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
-  pool->returned_free_list = header->next;
-  pool->elts_free--;
-
-#ifdef ENABLE_CHECKING
-  /* Set the ID for element.  */
-  ALLOCATION_OBJECT_PTR_FROM_USER_PTR (header)->id = pool->id;
-#endif
-  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
-
-  return ((void *) header);
-}
-
-/* Puts PTR back on POOL's free list.  */
-void
-pool_free (alloc_pool pool, void *ptr)
-{
-  alloc_pool_list header;
-#if defined(ENABLE_VALGRIND_ANNOTATIONS) || defined(ENABLE_CHECKING)
-  int size;
-  size = pool->elt_size - offsetof (allocation_object, u.data);
-#endif
-
-#ifdef ENABLE_CHECKING
-  gcc_assert (ptr
-	      /* Check if we free more than we allocated, which is Bad (TM).  */
-	      && pool->elts_free < pool->elts_allocated
-	      /* Check whether the PTR was allocated from POOL.  */
-	      && pool->id == ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id);
-
-  memset (ptr, 0xaf, size);
-
-  /* Mark the element to be free.  */
-  ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id = 0;
-#endif
-
-  header = (alloc_pool_list) ptr;
-  header->next = pool->returned_free_list;
-  pool->returned_free_list = header;
-  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr, size));
-  pool->elts_free++;
-
-  if (GATHER_STATISTICS)
-    {
-      struct alloc_pool_descriptor *desc = allocate_pool_descriptor (pool->name);
-      desc->current -= pool->elt_size;
-    }
-}
-
 /* Output per-alloc_pool statistics.  */
 
 /* Used to accumulate statistics about alloc_pool sizes.  */
diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index ec671dc..6508726 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -22,46 +22,6 @@ along with GCC; see the file COPYING3.  If not see
 
 #include "hash-map.h"
 
-typedef unsigned long ALLOC_POOL_ID_TYPE;
-
-typedef struct alloc_pool_list_def
-{
-  struct alloc_pool_list_def *next;
-}
- *alloc_pool_list;
-
-typedef struct alloc_pool_def
-{
-  const char *name;
-  ALLOC_POOL_ID_TYPE id;
-  size_t elts_per_block;
-
-  /* These are the elements that have been allocated at least once and freed.  */
-  alloc_pool_list returned_free_list;
-
-  /* These are the elements that have not yet been allocated out of
-     the last block obtained from XNEWVEC.  */
-  char* virgin_free_list;
-
-  /* The number of elements in the virgin_free_list that can be
-     allocated before needing another block.  */
-  size_t virgin_elts_remaining;
-
-  size_t elts_allocated;
-  size_t elts_free;
-  size_t blocks_allocated;
-  alloc_pool_list block_list;
-  size_t block_size;
-  size_t elt_size;
-}
- *alloc_pool;
-
-extern alloc_pool create_alloc_pool (const char *, size_t, size_t);
-extern void free_alloc_pool (alloc_pool);
-extern void empty_alloc_pool (alloc_pool);
-extern void free_alloc_pool_if_empty (alloc_pool *);
-extern void *pool_alloc (alloc_pool) ATTRIBUTE_MALLOC;
-extern void pool_free (alloc_pool, void *);
 extern void dump_alloc_pool_statistics (void);
 
 typedef unsigned long ALLOC_POOL_ID_TYPE;
@@ -76,21 +36,10 @@ public:
      potentially IGNORE_TYPE_SIZE.  */
   pool_allocator (const char *name, size_t num, size_t extra_size = 0,
 		  bool ignore_type_size = false);
-
-  /* Default destuctor.  */
   ~pool_allocator ();
-
-  /* Release internal data structures.  */
   void release ();
-
-  /* Release internal data structures if the pool has not allocated
-     an object.  */
   void release_if_empty ();
-
-  /* Allocate a new object.  */
   T *allocate () ATTRIBUTE_MALLOC;
-
-  /* Release OBJECT that must come from the pool.  */
   void remove (T *object);
 
 private:
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 01/35] Introduce new type-based pool allocator.
  2015-05-29 13:33       ` Martin Liška
@ 2015-05-30  5:14         ` Jeff Law
  2015-06-02 10:10         ` Andreas Schwab
  1 sibling, 0 replies; 108+ messages in thread
From: Jeff Law @ 2015-05-30  5:14 UTC (permalink / raw)
  To: Martin Liška, gcc-patches

On 05/29/2015 07:31 AM, Martin Liška wrote:
> On 05/28/2015 07:15 PM, Jeff Law wrote:
>> On 05/28/2015 06:49 AM, Martin Liška wrote:
>> .
>>>
>>> This mechanism has been just adapted. I find it quite useful as we have
>>> examples in source code where we
>>> allocate same struct/class types from a various pool. For debugging
>>> purpose, it helps to identify if
>>> release operation is called for a correct pool.
>> I saw that you were following existing practice for the pools in the
>> removal patch. I still don't like it as it makes mixing and matching
>> objects harder when debugging gcc and if the structure is exposed for
>> plugins, then we've got an unnecessary ABI plugin breakage.
>>
>> I certainly understand how it's useful -- I'm not questioning that.
>> I'm questioning changing the size of structures on ENABLE_CHECKING.
>>
>> My first inclination would be to include all that stuff
>> unconditionally.  If that's too much overhead, then perhaps include
>> the structure member, but not bother with any of the bookkeeping
>> except for ENABLE_CHECKING.
>
> Hi.
>
> Updated version of patch removes ENABLE_CHECKING in the struct definition.
>
> News in the patchset I'm going to re-send:
> + Changelog entries are fixed for spaces
> + Each patch passes ./contrib/check_GNU_style.sh script
> + pool_allocator::pool_allocator is a trivial constructor and first
> allocation launches initialization
> + some patches are squashed as were mentioned multiple time
> (ira-color.c, ira-build.c)
>
> The patch set survives x86_64-linux-pc boostrap, I'm going to re-run
> regression tests.
Sounds perfect.   Once the regression testing is done, the whole set is 
fine for the trunk.

Thanks for tackling this.

jeff

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 01/35] Introduce new type-based pool allocator.
  2015-05-29 13:33       ` Martin Liška
  2015-05-30  5:14         ` Jeff Law
@ 2015-06-02 10:10         ` Andreas Schwab
  2015-06-02 13:57           ` Martin Liška
  1 sibling, 1 reply; 108+ messages in thread
From: Andreas Schwab @ 2015-06-02 10:10 UTC (permalink / raw)
  To: Martin Liška; +Cc: Jeff Law, gcc-patches

In file included from ../../gcc/stmt.c:78:0:
../../gcc/alloc-pool.h: In function 'void expand_sjlj_dispatch_table(rtx, vec<t\
ree_node*>)':
../../gcc/alloc-pool.h:303:4: error: 'case_node_pool.pool_allocator<case_node>:\
:m_block_size' may be used uninitialized in this function [-Werror=maybe-uninit\
ialized]
    block = XNEWVEC (char, m_block_size);
    ^
../../gcc/stmt.c:1339:33: note: 'case_node_pool.pool_allocator<case_node>::m_bl\
ock_size' was declared here
       pool_allocator<case_node> case_node_pool ("struct sjlj_case pool",
                                 ^

Andreas.

-- 
Andreas Schwab, schwab@linux-m68k.org
GPG Key fingerprint = 58CA 54C7 6D53 942B 1756  01D3 44D5 214B 8276 4ED5
"And now for something completely different."

^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 01/35] Introduce new type-based pool allocator.
  2015-06-02 10:10         ` Andreas Schwab
@ 2015-06-02 13:57           ` Martin Liška
  2015-06-02 14:00             ` Richard Biener
  0 siblings, 1 reply; 108+ messages in thread
From: Martin Liška @ 2015-06-02 13:57 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 796 bytes --]

On 06/02/2015 11:48 AM, Andreas Schwab wrote:
> In file included from ../../gcc/stmt.c:78:0:
> ../../gcc/alloc-pool.h: In function 'void expand_sjlj_dispatch_table(rtx, vec<t\
> ree_node*>)':
> ../../gcc/alloc-pool.h:303:4: error: 'case_node_pool.pool_allocator<case_node>:\
> :m_block_size' may be used uninitialized in this function [-Werror=maybe-uninit\
> ialized]
>     block = XNEWVEC (char, m_block_size);
>     ^
> ../../gcc/stmt.c:1339:33: note: 'case_node_pool.pool_allocator<case_node>::m_bl\
> ock_size' was declared here
>        pool_allocator<case_node> case_node_pool ("struct sjlj_case pool",
>                                  ^
> 
> Andreas.
> 

Hi.

This patch for the issue which has been tested on x86_64-unknown-linux-pc and
can bootstrap.

Ready for trunk?
Thanks,
Martin

[-- Attachment #2: 0001-Pool-allocator-fallout-fix-uninialized-class-members.patch --]
[-- Type: text/x-patch, Size: 1471 bytes --]

From 57355c1e271accc3e35dd5df9d5393ee783d765b Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Tue, 2 Jun 2015 13:26:05 +0200
Subject: [PATCH] Pool allocator fallout: fix uninialized class members.

gcc/ChangeLog:

2015-06-02  Martin Liska  <mliska@suse.cz>

	* alloc-pool.h (pool_allocator::pool_allocator): Set implicit
	values to avoid -Wmaybe-uninitialized errors.
---
 gcc/alloc-pool.h | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index 96a1342..ddb2a91 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -159,11 +159,11 @@ template <typename T>
 inline
 pool_allocator<T>::pool_allocator (const char *name, size_t num,
 				   size_t extra_size, bool ignore_type_size):
-  m_name (name), m_elts_per_block (num), m_returned_free_list (NULL),
+  m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
   m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
   m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
-  m_ignore_type_size (ignore_type_size), m_extra_size (extra_size),
-  m_initialized (false) {}
+  m_block_size (0), m_ignore_type_size (ignore_type_size),
+  m_extra_size (extra_size), m_initialized (false) {}
 
 /* Initialize a pool allocator.  */
 
@@ -215,7 +215,6 @@ pool_allocator<T>::initialize ()
 
   m_id = last_id;
 #endif
-
 }
 
 /* Free all memory allocated for the given memory pool.  */
-- 
2.1.4


^ permalink raw reply	[flat|nested] 108+ messages in thread

* Re: [PATCH 01/35] Introduce new type-based pool allocator.
  2015-06-02 13:57           ` Martin Liška
@ 2015-06-02 14:00             ` Richard Biener
  0 siblings, 0 replies; 108+ messages in thread
From: Richard Biener @ 2015-06-02 14:00 UTC (permalink / raw)
  To: Martin Liška; +Cc: GCC Patches

On Tue, Jun 2, 2015 at 3:52 PM, Martin Liška <mliska@suse.cz> wrote:
> On 06/02/2015 11:48 AM, Andreas Schwab wrote:
>> In file included from ../../gcc/stmt.c:78:0:
>> ../../gcc/alloc-pool.h: In function 'void expand_sjlj_dispatch_table(rtx, vec<t\
>> ree_node*>)':
>> ../../gcc/alloc-pool.h:303:4: error: 'case_node_pool.pool_allocator<case_node>:\
>> :m_block_size' may be used uninitialized in this function [-Werror=maybe-uninit\
>> ialized]
>>     block = XNEWVEC (char, m_block_size);
>>     ^
>> ../../gcc/stmt.c:1339:33: note: 'case_node_pool.pool_allocator<case_node>::m_bl\
>> ock_size' was declared here
>>        pool_allocator<case_node> case_node_pool ("struct sjlj_case pool",
>>                                  ^
>>
>> Andreas.
>>
>
> Hi.
>
> This patch for the issue which has been tested on x86_64-unknown-linux-pc and
> can bootstrap.
>
> Ready for trunk?

Ok.

Richard.

> Thanks,
> Martin

^ permalink raw reply	[flat|nested] 108+ messages in thread

end of thread, other threads:[~2015-06-02 14:00 UTC | newest]

Thread overview: 108+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-27 14:09 [PATCH 01/35] Introduce new type-based pool allocator mliska
2015-05-27 14:00 ` [PATCH 12/35] Change use to type-based pool allocator in cselib.c mliska
2015-05-29 13:38   ` Martin Liška
2015-05-27 14:00 ` [PATCH 10/35] Change use to type-based pool allocator in cfg.c mliska
2015-05-27 18:01   ` Jeff Law
2015-05-29 13:34     ` Martin Liška
2015-05-27 14:00 ` [PATCH 06/35] Change use to type-based pool allocator in ira-color.c mliska
2015-05-27 14:00 ` [PATCH 09/35] Change use to type-based pool allocator in c-format.c mliska
2015-05-27 14:16   ` Jakub Jelinek
2015-05-27 18:01   ` Jeff Law
2015-05-29 13:35     ` Martin Liška
2015-05-27 14:00 ` [PATCH 03/35] Change use to type-based pool allocator in lra-lives.c mliska
2015-05-27 17:53   ` Jeff Law
2015-05-29 13:34     ` Martin Liška
2015-05-28  0:48   ` Trevor Saunders
2015-05-27 14:00 ` [PATCH 04/35] Change use to type-based pool allocator in lra.c mliska
2015-05-27 17:55   ` Jeff Law
2015-05-29 13:34     ` Martin Liška
2015-05-27 14:07 ` [PATCH 19/35] Change use to type-based pool allocator in sel-sched-ir.c mliska
2015-05-27 18:12   ` Jeff Law
2015-05-29 13:40     ` Martin Liška
2015-05-27 14:09 ` [PATCH 02/35] Change use to type-based pool allocator in et-forest.c mliska
2015-05-27 17:50   ` Jeff Law
2015-05-29 13:33     ` Martin Liška
2015-05-27 14:15 ` [PATCH 05/35] Change use to type-based pool allocator in ira-color.c mliska
2015-05-27 17:59   ` Jeff Law
2015-05-29 13:34     ` Martin Liška
2015-05-27 14:17 ` [PATCH 35/35] Remove old pool allocator mliska
2015-05-27 19:40   ` Jeff Law
2015-05-29 14:11     ` Martin Liška
2015-05-27 14:17 ` [PATCH 32/35] Change use to type-based pool allocator in ira-build.c mliska
2015-05-27 19:34   ` Jeff Law
2015-05-29 13:44     ` Martin Liška
2015-05-27 14:17 ` [PATCH 34/35] " mliska
2015-05-27 14:17 ` [PATCH 21/35] Change use to type-based pool allocator in regcprop.c mliska
2015-05-27 18:14   ` Jeff Law
2015-05-29 13:40     ` Martin Liška
2015-05-27 14:17 ` [PATCH 23/35] Change use to type-based pool allocator in tree-ssa-pre.c mliska
2015-05-27 18:59   ` Jeff Law
2015-05-29 13:41     ` Martin Liška
2015-05-27 14:17 ` [PATCH 28/35] Change use to type-based pool allocator in ipa-profile.c mliska
2015-05-27 18:18   ` Jeff Law
2015-05-29 13:42     ` Martin Liška
2015-05-27 14:18 ` [PATCH 27/35] Change use to type-based pool allocator in tree-ssa-structalias.c mliska
2015-05-27 18:20   ` Jeff Law
2015-05-29 13:42     ` Martin Liška
2015-05-27 14:19 ` [PATCH 08/35] Change use to type-based pool allocator in asan.c mliska
2015-05-27 18:01   ` Jeff Law
2015-05-27 14:19 ` [PATCH 25/35] Change use to type-based pool allocator in tree-ssa-sccvn.c mliska
2015-05-27 18:16   ` Jeff Law
2015-05-29 13:41     ` Martin Liška
2015-05-27 14:19 ` [PATCH 11/35] Change use to type-based pool allocator in sh.c mliska
2015-05-27 18:03   ` Jeff Law
2015-05-29 13:37     ` Martin Liška
2015-05-27 14:19 ` [PATCH 14/35] Change use to type-based pool allocator in df-scan.c mliska
2015-05-29 13:38   ` Martin Liška
2015-05-27 14:20 ` [PATCH 31/35] Change use to type-based pool allocator in ipa-prop.c and ipa-cp.c mliska
2015-05-29 14:09   ` Martin Liška
2015-05-27 14:20 ` [PATCH 29/35] Change use to type-based pool allocator in ipa-prop.c mliska
2015-05-27 18:22   ` Jeff Law
2015-05-29 13:42     ` Martin Liška
2015-05-27 14:20 ` [PATCH 07/35] Change use to type-based pool allocator in var-tracking.c mliska
2015-05-29 13:34   ` Martin Liška
2015-05-27 14:21 ` [PATCH 20/35] Change use to type-based pool allocator in ira-build.c mliska
2015-05-27 18:15   ` Jeff Law
2015-05-29 13:39     ` Martin Liška
2015-05-27 14:21 ` [PATCH 30/35] Change use to type-based pool allocator in ipa-inline-analysis.c mliska
2015-05-29 14:06   ` Martin Liška
2015-05-27 14:21 ` [PATCH 24/35] Change use to type-based pool allocator in tree-ssa-reassoc.c mliska
2015-05-27 18:15   ` Jeff Law
2015-05-29 13:41     ` Martin Liška
2015-05-27 14:21 ` [PATCH 16/35] Change use to type-based pool allocator in tree-sra.c mliska
2015-05-27 18:11   ` Jeff Law
2015-05-29 13:39     ` Martin Liška
2015-05-27 14:21 ` [PATCH 15/35] Change use to type-based pool allocator in dse.c mliska
2015-05-29 13:38   ` Martin Liška
2015-05-27 14:21 ` [PATCH 26/35] Change use to type-based pool allocator in tree-ssa-strlen.c mliska
2015-05-27 18:17   ` Jeff Law
2015-05-29 13:42   ` Martin Liška
2015-05-27 14:21 ` [PATCH 18/35] Change use to type-based pool allocator in stmt.c mliska
2015-05-27 18:13   ` Jeff Law
2015-05-29 13:39     ` Martin Liška
2015-05-27 14:42 ` [PATCH 22/35] Change use to type-based pool allocator in sched-deps.c mliska
2015-05-27 18:16   ` Jeff Law
2015-05-29 13:40     ` Martin Liška
2015-05-27 14:55 ` [PATCH 17/35] Change use to type-based pool allocator in tree-ssa-math-opts.c mliska
2015-05-27 18:12   ` Jeff Law
2015-05-29 13:39     ` Martin Liška
2015-05-27 14:58 ` [PATCH 33/35] Change use to type-based pool allocator in ira-color.c mliska
2015-05-27 18:24   ` Jeff Law
2015-05-28 11:23   ` Statically-allocated objects with non-trivial ctors (was Re: [PATCH 33/35] Change use to type-based pool allocator in ira-color.c.) David Malcolm
2015-05-28 17:38     ` Jeff Law
2015-05-28 18:30       ` Richard Biener
2015-05-28 18:34         ` Jakub Jelinek
2015-05-28 19:25           ` Martin Liška
2015-05-28 20:42             ` Trevor Saunders
2015-05-29  5:16     ` Trevor Saunders
2015-05-27 15:04 ` [PATCH 13/35] Change use to type-based pool allocator in df-problems.c mliska
2015-05-27 18:05   ` Jeff Law
2015-05-29 13:37     ` Martin Liška
2015-05-27 17:50 ` [PATCH 01/35] Introduce new type-based pool allocator Jeff Law
2015-05-28 13:27   ` Martin Liška
2015-05-28 18:04     ` Jeff Law
2015-05-29 13:33       ` Martin Liška
2015-05-30  5:14         ` Jeff Law
2015-06-02 10:10         ` Andreas Schwab
2015-06-02 13:57           ` Martin Liška
2015-06-02 14:00             ` Richard Biener

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).