public inbox for gdb-patches@sourceware.org
 help / color / mirror / Atom feed
* [PATCH v3 07/12] btrace: Remove struct btrace_thread_info::{begin,end}.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (5 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 04/12] btrace: Use function segment index in call iterator Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-10  3:06   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 12/12] btrace: Store function segments as objects Tim Wiederhake
                   ` (4 subsequent siblings)
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

These are no longer needed and might hold invalid addresses once we change the
vector of function segment pointers into a vector of function segment objects
where a reallocation of the vector changes the address of its elements.

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (ftrace_new_function, ftrace_new_call, ftrace_new_tailcall,
	ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
	ftrace_update_function, ftrace_compute_global_level_offset,
	btrace_compute_ftrace_bts, ftrace_add_pt, btrace_compute_ftrace_pt,
	btrace_stitch_bts, btrace_fetch, btrace_clear, btrace_insn_number,
	btrace_insn_end, btrace_is_empty): Remove references to
	btrace_thread_info::begin and btrace_thread_info::end.
	* btrace.h (struct btrace_thread_info): Remove BEGIN and END.
	* record-btrace.c (record_btrace_start_replaying): Remove reference to
	btrace_thread_info::begin.

---
 gdb/btrace.c        | 194 +++++++++++++++++++++++++---------------------------
 gdb/btrace.h        |   9 ---
 gdb/record-btrace.c |   2 +-
 3 files changed, 94 insertions(+), 111 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 1bd11f0..1e45c09 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -212,16 +212,14 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *bfun, *prev;
+  struct btrace_function *bfun;
 
-  prev = btinfo->end;
   bfun = XCNEW (struct btrace_function);
 
   bfun->msym = mfun;
   bfun->sym = fun;
-  bfun->flow.prev = prev;
 
-  if (prev == NULL)
+  if (btinfo->functions.empty ())
     {
       /* Start counting at one.  */
       bfun->number = 1;
@@ -229,8 +227,11 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
     }
   else
     {
+      struct btrace_function *prev = btinfo->functions.back ();
+
       gdb_assert (prev->flow.next == NULL);
       prev->flow.next = bfun;
+      bfun->flow.prev = prev;
 
       bfun->number = prev->number + 1;
       bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
@@ -238,7 +239,7 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
     }
 
   btinfo->functions.push_back (bfun);
-  return btinfo->end = bfun;
+  return bfun;
 }
 
 /* Update the UP field of a function segment.  */
@@ -286,10 +287,11 @@ ftrace_new_call (struct btrace_thread_info *btinfo,
 		 struct minimal_symbol *mfun,
 		 struct symbol *fun)
 {
-  struct btrace_function *caller = btinfo->end;
+  const unsigned int length = btinfo->functions.size ();
   struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
 
-  bfun->up = caller;
+  if (length != 0)
+    bfun->up = btinfo->functions[length - 1];
   bfun->level += 1;
 
   ftrace_debug (bfun, "new call");
@@ -306,10 +308,11 @@ ftrace_new_tailcall (struct btrace_thread_info *btinfo,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *caller = btinfo->end;
+  const unsigned int length = btinfo->functions.size ();
   struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
 
-  bfun->up = caller;
+  if (length != 0)
+    bfun->up = btinfo->functions[length - 1];
   bfun->level += 1;
   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
 
@@ -385,7 +388,7 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
-  struct btrace_function *prev = btinfo->end;
+  struct btrace_function *prev = btinfo->functions.back ();
   struct btrace_function *bfun, *caller;
 
   bfun = ftrace_new_function (btinfo, mfun, fun);
@@ -465,7 +468,7 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
-  struct btrace_function *prev = btinfo->end;
+  struct btrace_function *prev = btinfo->functions.back ();
   struct btrace_function *bfun;
 
   /* This is an unexplained function switch.  We can't really be sure about the
@@ -487,15 +490,17 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
 static struct btrace_function *
 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
 {
-  struct btrace_function *prev = btinfo->end;
   struct btrace_function *bfun;
 
-  /* We hijack prev if it was empty.  */
-  if (prev != NULL && prev->errcode == 0
-      && VEC_empty (btrace_insn_s, prev->insn))
-    bfun = prev;
-  else
+  if (btinfo->functions.empty ())
     bfun = ftrace_new_function (btinfo, NULL, NULL);
+  else
+    {
+      /* We hijack the previous function segment if it was empty.  */
+      bfun = btinfo->functions.back ();
+      if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
+	bfun = ftrace_new_function (btinfo, NULL, NULL);
+    }
 
   bfun->errcode = errcode;
 
@@ -516,7 +521,7 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
   struct minimal_symbol *mfun;
   struct symbol *fun;
   struct btrace_insn *last;
-  struct btrace_function *bfun = btinfo->end;
+  struct btrace_function *bfun;
 
   /* Try to determine the function we're in.  We use both types of symbols
      to avoid surprises when we sometimes get a full symbol and sometimes
@@ -528,8 +533,13 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
   if (fun == NULL && mfun == NULL)
     DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
 
-  /* If we didn't have a function or if we had a gap before, we create one.  */
-  if (bfun == NULL || bfun->errcode != 0)
+  /* If we didn't have a function, we create one.  */
+  if (btinfo->functions.empty ())
+    return ftrace_new_function (btinfo, mfun, fun);
+
+  /* If we had a gap before, we create a function.  */
+  bfun = btinfo->functions.back ();
+  if (bfun->errcode != 0)
     return ftrace_new_function (btinfo, mfun, fun);
 
   /* Check the last instruction, if we have one.
@@ -685,26 +695,27 @@ ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
 static void
 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
 {
-  struct btrace_function *bfun, *end;
+  unsigned int i, length;
   int level;
 
   if (btinfo == NULL)
     return;
 
-  bfun = btinfo->begin;
-  if (bfun == NULL)
+  if (btinfo->functions.empty ())
     return;
 
-  /* The last function segment contains the current instruction, which is not
-     really part of the trace.  If it contains just this one instruction, we
-     stop when we reach it; otherwise, we let the below loop run to the end.  */
-  end = btinfo->end;
-  if (VEC_length (btrace_insn_s, end->insn) > 1)
-    end = NULL;
-
   level = INT_MAX;
-  for (; bfun != end; bfun = bfun->flow.next)
-    level = std::min (level, bfun->level);
+  length = btinfo->functions.size ();
+  for (const auto &bfun: btinfo->functions)
+    {
+      /* The last function segment contains the current instruction, which is
+	 not really part of the trace.  If it contains just this one
+	 instruction, we ignore the segment.  */
+      if (bfun->number == length && VEC_length (btrace_insn_s, bfun->insn) == 1)
+	continue;
+
+      level = std::min (level, bfun->level);
+    }
 
   DEBUG_FTRACE ("setting global level offset: %d", -level);
   btinfo->level = -level;
@@ -986,18 +997,19 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 			   VEC (bfun_s) **gaps)
 {
   struct btrace_thread_info *btinfo;
-  struct btrace_function *begin, *end;
   struct gdbarch *gdbarch;
   unsigned int blk;
   int level;
 
   gdbarch = target_gdbarch ();
   btinfo = &tp->btrace;
-  begin = btinfo->begin;
-  end = btinfo->end;
-  level = begin != NULL ? -btinfo->level : INT_MAX;
   blk = VEC_length (btrace_block_s, btrace->blocks);
 
+  if (btinfo->functions.empty ())
+    level = INT_MAX;
+  else
+    level = -btinfo->level;
+
   while (blk != 0)
     {
       btrace_block_s *block;
@@ -1010,6 +1022,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 
       for (;;)
 	{
+	  struct btrace_function *bfun;
 	  struct btrace_insn insn;
 	  int size;
 
@@ -1017,27 +1030,23 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  if (block->end < pc)
 	    {
 	      /* Indicate the gap in the trace.  */
-	      end = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
-	      if (begin == NULL)
-		begin = end;
+	      bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
 
-	      VEC_safe_push (bfun_s, *gaps, end);
+	      VEC_safe_push (bfun_s, *gaps, bfun);
 
 	      warning (_("Recorded trace may be corrupted at instruction "
-			 "%u (pc = %s)."), end->insn_offset - 1,
+			 "%u (pc = %s)."), bfun->insn_offset - 1,
 		       core_addr_to_string_nz (pc));
 
 	      break;
 	    }
 
-	  end = ftrace_update_function (btinfo, pc);
-	  if (begin == NULL)
-	    begin = end;
+	  bfun = ftrace_update_function (btinfo, pc);
 
 	  /* Maintain the function level offset.
 	     For all but the last block, we do it here.  */
 	  if (blk != 0)
-	    level = std::min (level, end->level);
+	    level = std::min (level, bfun->level);
 
 	  size = 0;
 	  TRY
@@ -1054,7 +1063,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  insn.iclass = ftrace_classify_insn (gdbarch, pc);
 	  insn.flags = 0;
 
-	  ftrace_update_insns (end, &insn);
+	  ftrace_update_insns (bfun, &insn);
 
 	  /* We're done once we pushed the instruction at the end.  */
 	  if (block->end == pc)
@@ -1065,12 +1074,12 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	    {
 	      /* Indicate the gap in the trace.  We just added INSN so we're
 		 not at the beginning.  */
-	      end = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
+	      bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
 
-	      VEC_safe_push (bfun_s, *gaps, end);
+	      VEC_safe_push (bfun_s, *gaps, bfun);
 
 	      warning (_("Recorded trace may be incomplete at instruction %u "
-			 "(pc = %s)."), end->insn_offset - 1,
+			 "(pc = %s)."), bfun->insn_offset - 1,
 		       core_addr_to_string_nz (pc));
 
 	      break;
@@ -1085,13 +1094,10 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	     and is not really part of the execution history, it shouldn't
 	     affect the level.  */
 	  if (blk == 0)
-	    level = std::min (level, end->level);
+	    level = std::min (level, bfun->level);
 	}
     }
 
-  btinfo->begin = begin;
-  btinfo->end = end;
-
   /* LEVEL is the minimal function level of all btrace function segments.
      Define the global level offset to -LEVEL so all function levels are
      normalized to start at zero.  */
@@ -1148,16 +1154,13 @@ pt_btrace_insn (const struct pt_insn &insn)
 static void
 ftrace_add_pt (struct btrace_thread_info *btinfo,
 	       struct pt_insn_decoder *decoder,
-	       struct btrace_function **pbegin,
-	       struct btrace_function **pend, int *plevel,
+	       int *plevel,
 	       VEC (bfun_s) **gaps)
 {
-  struct btrace_function *begin, *end, *upd;
+  struct btrace_function *bfun;
   uint64_t offset;
   int errcode;
 
-  begin = *pbegin;
-  end = *pend;
   for (;;)
     {
       struct pt_insn insn;
@@ -1178,7 +1181,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 	    break;
 
 	  /* Look for gaps in the trace - unless we're at the beginning.  */
-	  if (begin != NULL)
+	  if (!btinfo->functions.empty ())
 	    {
 	      /* Tracing is disabled and re-enabled each time we enter the
 		 kernel.  Most times, we continue from the same instruction we
@@ -1187,64 +1190,53 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 		 from some other instruction.  Indicate this as a trace gap.  */
 	      if (insn.enabled)
 		{
-		  *pend = end = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
+		  bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
 
-		  VEC_safe_push (bfun_s, *gaps, end);
+		  VEC_safe_push (bfun_s, *gaps, bfun);
 
 		  pt_insn_get_offset (decoder, &offset);
 
 		  warning (_("Non-contiguous trace at instruction %u (offset "
 			     "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
-			   end->insn_offset - 1, offset, insn.ip);
+			   bfun->insn_offset - 1, offset, insn.ip);
 		}
 	    }
 
 	  /* Indicate trace overflows.  */
 	  if (insn.resynced)
 	    {
-	      *pend = end = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
-	      if (begin == NULL)
-		*pbegin = begin = end;
+	      bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
 
-	      VEC_safe_push (bfun_s, *gaps, end);
+	      VEC_safe_push (bfun_s, *gaps, bfun);
 
 	      pt_insn_get_offset (decoder, &offset);
 
 	      warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
-			 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
+			 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1,
 		       offset, insn.ip);
 	    }
 
-	  upd = ftrace_update_function (btinfo, insn.ip);
-	  if (upd != end)
-	    {
-	      *pend = end = upd;
-
-	      if (begin == NULL)
-		*pbegin = begin = upd;
-	    }
+	  bfun = ftrace_update_function (btinfo, insn.ip);
 
 	  /* Maintain the function level offset.  */
-	  *plevel = std::min (*plevel, end->level);
+	  *plevel = std::min (*plevel, bfun->level);
 
 	  btrace_insn btinsn = pt_btrace_insn (insn);
-	  ftrace_update_insns (end, &btinsn);
+	  ftrace_update_insns (bfun, &btinsn);
 	}
 
       if (errcode == -pte_eos)
 	break;
 
       /* Indicate the gap in the trace.  */
-      *pend = end = ftrace_new_gap (btinfo, errcode);
-      if (begin == NULL)
-	*pbegin = begin = end;
+      bfun = ftrace_new_gap (btinfo, errcode);
 
-      VEC_safe_push (bfun_s, *gaps, end);
+      VEC_safe_push (bfun_s, *gaps, bfun);
 
       pt_insn_get_offset (decoder, &offset);
 
       warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
-		 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
+		 ", pc = 0x%" PRIx64 "): %s."), errcode, bfun->insn_offset - 1,
 	       offset, insn.ip, pt_errstr (pt_errcode (errcode)));
     }
 }
@@ -1326,7 +1318,10 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
     return;
 
   btinfo = &tp->btrace;
-  level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
+  if (btinfo->functions.empty ())
+    level = INT_MAX;
+  else
+    level = -btinfo->level;
 
   pt_config_init(&config);
   config.begin = btrace->data;
@@ -1359,17 +1354,18 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
 	error (_("Failed to configure the Intel Processor Trace decoder: "
 		 "%s."), pt_errstr (pt_errcode (errcode)));
 
-      ftrace_add_pt (btinfo, decoder, &btinfo->begin, &btinfo->end, &level,
-		     gaps);
+      ftrace_add_pt (btinfo, decoder, &level, gaps);
     }
   CATCH (error, RETURN_MASK_ALL)
     {
       /* Indicate a gap in the trace if we quit trace processing.  */
-      if (error.reason == RETURN_QUIT && btinfo->end != NULL)
+      if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
 	{
-	  btinfo->end = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
+	  struct btrace_function *bfun;
+
+	  bfun = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
 
-	  VEC_safe_push (bfun_s, *gaps, btinfo->end);
+	  VEC_safe_push (bfun_s, *gaps, bfun);
 	}
 
       btrace_finalize_ftrace_pt (decoder, tp, level);
@@ -1596,10 +1592,11 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
   btrace_block_s *first_new_block;
 
   btinfo = &tp->btrace;
-  last_bfun = btinfo->end;
-  gdb_assert (last_bfun != NULL);
+  gdb_assert (!btinfo->functions.empty ());
   gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
 
+  last_bfun = btinfo->functions.back ();
+
   /* If the existing trace ends with a gap, we just glue the traces
      together.  We need to drop the last (i.e. chronologically first) block
      of the new trace,  though, since we can't fill in the start address.*/
@@ -1664,7 +1661,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
      of just that one instruction.  If we remove it, we might turn the now
      empty btrace function segment into a gap.  But we don't want gaps at
      the beginning.  To avoid this, we remove the entire old trace.  */
-  if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
+  if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
     btrace_clear (tp);
 
   return 0;
@@ -1827,7 +1824,7 @@ btrace_fetch (struct thread_info *tp)
   make_cleanup_btrace_data (&btrace);
 
   /* Let's first try to extend the trace we already have.  */
-  if (btinfo->end != NULL)
+  if (!btinfo->functions.empty ())
     {
       errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
       if (errcode == 0)
@@ -1896,8 +1893,6 @@ btrace_clear (struct thread_info *tp)
     }
 
   btinfo->functions.clear ();
-  btinfo->begin = NULL;
-  btinfo->end = NULL;
   btinfo->ngaps = 0;
 
   /* Must clear the maint data before - it depends on BTINFO->DATA.  */
@@ -2286,10 +2281,7 @@ void
 btrace_insn_begin (struct btrace_insn_iterator *it,
 		   const struct btrace_thread_info *btinfo)
 {
-  const struct btrace_function *bfun;
-
-  bfun = btinfo->begin;
-  if (bfun == NULL)
+  if (btinfo->functions.empty ())
     error (_("No trace."));
 
   it->btinfo = btinfo;
@@ -2306,10 +2298,10 @@ btrace_insn_end (struct btrace_insn_iterator *it,
   const struct btrace_function *bfun;
   unsigned int length;
 
-  bfun = btinfo->end;
-  if (bfun == NULL)
+  if (btinfo->functions.empty ())
     error (_("No trace."));
 
+  bfun = btinfo->functions.back ();
   length = VEC_length (btrace_insn_s, bfun->insn);
 
   /* The last function may either be a gap or it contains the current
@@ -2752,7 +2744,7 @@ btrace_is_empty (struct thread_info *tp)
 
   btinfo = &tp->btrace;
 
-  if (btinfo->begin == NULL)
+  if (btinfo->functions.empty ())
     return 1;
 
   btrace_insn_begin (&begin, btinfo);
diff --git a/gdb/btrace.h b/gdb/btrace.h
index ca79667..b3dbe82 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -331,15 +331,6 @@ struct btrace_thread_info
   /* The raw branch trace data for the below branch trace.  */
   struct btrace_data data;
 
-  /* The current branch trace for this thread (both inclusive).
-
-     The last instruction of END is the current instruction, which is not
-     part of the execution history.
-     Both will be NULL if there is no branch trace available.  If there is
-     branch trace available, both will be non-NULL.  */
-  struct btrace_function *begin;
-  struct btrace_function *end;
-
   /* Vector of pointer to decoded function segments.  These are in execution
      order with the first element == BEGIN and the last element == END.  */
   std::vector<btrace_function *> functions;
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index ec940f6..5c230e7 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1908,7 +1908,7 @@ record_btrace_start_replaying (struct thread_info *tp)
   replay = NULL;
 
   /* We can't start replaying without trace.  */
-  if (btinfo->begin == NULL)
+  if (btinfo->functions.empty ())
     return NULL;
 
   /* GDB stores the current frame_id when stepping in order to detects steps
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 06/12] btrace: Remove constant arguments.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (10 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 09/12] btrace: Remove struct btrace_thread_info::flow Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-10  2:45   ` Simon Marchi
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (ftrace_new_function, ftrace_new_call, ftrace_new_tailcall,
	ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
	ftrace_update_function): Remove arguments that implicitly were always
	BTINFO->END.
	(btrace_compute_ftrace_bts, ftrace_add_pt, btrace_compute_ftrace_pt):
	Don't pass BTINFO->END.

---
 gdb/btrace.c | 85 ++++++++++++++++++++++++++++--------------------------------
 1 file changed, 40 insertions(+), 45 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index cb30dcf..1bd11f0 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -202,19 +202,19 @@ ftrace_function_switched (const struct btrace_function *bfun,
   return 0;
 }
 
-/* Allocate and initialize a new branch trace function segment.
+/* Allocate and initialize a new branch trace function segment at the end of
+   the trace.
    BTINFO is the branch trace information for the current thread.
-   PREV is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
 ftrace_new_function (struct btrace_thread_info *btinfo,
-		     struct btrace_function *prev,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *bfun;
+  struct btrace_function *bfun, *prev;
 
+  prev = btinfo->end;
   bfun = XCNEW (struct btrace_function);
 
   bfun->msym = mfun;
@@ -238,7 +238,7 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
     }
 
   btinfo->functions.push_back (bfun);
-  return bfun;
+  return btinfo->end = bfun;
 }
 
 /* Update the UP field of a function segment.  */
@@ -277,20 +277,18 @@ ftrace_fixup_caller (struct btrace_function *bfun,
     ftrace_update_caller (next, caller, flags);
 }
 
-/* Add a new function segment for a call.
+/* Add a new function segment for a call at the end of the trace.
    BTINFO is the branch trace information for the current thread.
-   CALLER is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
 ftrace_new_call (struct btrace_thread_info *btinfo,
-		 struct btrace_function *caller,
 		 struct minimal_symbol *mfun,
 		 struct symbol *fun)
 {
-  struct btrace_function *bfun;
+  struct btrace_function *caller = btinfo->end;
+  struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
 
-  bfun = ftrace_new_function (btinfo, caller, mfun, fun);
   bfun->up = caller;
   bfun->level += 1;
 
@@ -299,20 +297,18 @@ ftrace_new_call (struct btrace_thread_info *btinfo,
   return bfun;
 }
 
-/* Add a new function segment for a tail call.
+/* Add a new function segment for a tail call at the end of the trace.
    BTINFO is the branch trace information for the current thread.
-   CALLER is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
-		     struct btrace_function *caller,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *bfun;
+  struct btrace_function *caller = btinfo->end;
+  struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
 
-  bfun = ftrace_new_function (btinfo, caller, mfun, fun);
   bfun->up = caller;
   bfun->level += 1;
   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
@@ -379,20 +375,20 @@ ftrace_find_call (struct btrace_function *bfun)
   return bfun;
 }
 
-/* Add a continuation segment for a function into which we return.
+/* Add a continuation segment for a function into which we return at the end of
+   the trace.
    BTINFO is the branch trace information for the current thread.
-   PREV is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
 ftrace_new_return (struct btrace_thread_info *btinfo,
-		   struct btrace_function *prev,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
+  struct btrace_function *prev = btinfo->end;
   struct btrace_function *bfun, *caller;
 
-  bfun = ftrace_new_function (btinfo, prev, mfun, fun);
+  bfun = ftrace_new_function (btinfo, mfun, fun);
 
   /* It is important to start at PREV's caller.  Otherwise, we might find
      PREV itself, if PREV is a recursive function.  */
@@ -460,22 +456,21 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
   return bfun;
 }
 
-/* Add a new function segment for a function switch.
+/* Add a new function segment for a function switch at the end of the trace.
    BTINFO is the branch trace information for the current thread.
-   PREV is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
 ftrace_new_switch (struct btrace_thread_info *btinfo,
-		   struct btrace_function *prev,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
+  struct btrace_function *prev = btinfo->end;
   struct btrace_function *bfun;
 
   /* This is an unexplained function switch.  We can't really be sure about the
      call stack, yet the best I can think of right now is to preserve it.  */
-  bfun = ftrace_new_function (btinfo, prev, mfun, fun);
+  bfun = ftrace_new_function (btinfo, mfun, fun);
   bfun->up = prev->up;
   bfun->flags = prev->flags;
 
@@ -484,15 +479,15 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
   return bfun;
 }
 
-/* Add a new function segment for a gap in the trace due to a decode error.
+/* Add a new function segment for a gap in the trace due to a decode error at
+   the end of the trace.
    BTINFO is the branch trace information for the current thread.
-   PREV is the chronologically preceding function segment.
    ERRCODE is the format-specific error code.  */
 
 static struct btrace_function *
-ftrace_new_gap (struct btrace_thread_info *btinfo,
-		struct btrace_function *prev, int errcode)
+ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
 {
+  struct btrace_function *prev = btinfo->end;
   struct btrace_function *bfun;
 
   /* We hijack prev if it was empty.  */
@@ -500,7 +495,7 @@ ftrace_new_gap (struct btrace_thread_info *btinfo,
       && VEC_empty (btrace_insn_s, prev->insn))
     bfun = prev;
   else
-    bfun = ftrace_new_function (btinfo, prev, NULL, NULL);
+    bfun = ftrace_new_function (btinfo, NULL, NULL);
 
   bfun->errcode = errcode;
 
@@ -515,13 +510,13 @@ ftrace_new_gap (struct btrace_thread_info *btinfo,
    Return the chronologically latest function segment, never NULL.  */
 
 static struct btrace_function *
-ftrace_update_function (struct btrace_thread_info *btinfo,
-			struct btrace_function *bfun, CORE_ADDR pc)
+ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
 {
   struct bound_minimal_symbol bmfun;
   struct minimal_symbol *mfun;
   struct symbol *fun;
   struct btrace_insn *last;
+  struct btrace_function *bfun = btinfo->end;
 
   /* Try to determine the function we're in.  We use both types of symbols
      to avoid surprises when we sometimes get a full symbol and sometimes
@@ -535,7 +530,7 @@ ftrace_update_function (struct btrace_thread_info *btinfo,
 
   /* If we didn't have a function or if we had a gap before, we create one.  */
   if (bfun == NULL || bfun->errcode != 0)
-    return ftrace_new_function (btinfo, bfun, mfun, fun);
+    return ftrace_new_function (btinfo, mfun, fun);
 
   /* Check the last instruction, if we have one.
      We do this check first, since it allows us to fill in the call stack
@@ -563,9 +558,9 @@ ftrace_update_function (struct btrace_thread_info *btinfo,
 	       different frame id's.  This will confuse stepping.  */
 	    fname = ftrace_print_function_name (bfun);
 	    if (strcmp (fname, "_dl_runtime_resolve") == 0)
-	      return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, mfun, fun);
 
-	    return ftrace_new_return (btinfo, bfun, mfun, fun);
+	    return ftrace_new_return (btinfo, mfun, fun);
 	  }
 
 	case BTRACE_INSN_CALL:
@@ -573,7 +568,7 @@ ftrace_update_function (struct btrace_thread_info *btinfo,
 	  if (last->pc + last->size == pc)
 	    break;
 
-	  return ftrace_new_call (btinfo, bfun, mfun, fun);
+	  return ftrace_new_call (btinfo, mfun, fun);
 
 	case BTRACE_INSN_JUMP:
 	  {
@@ -583,13 +578,13 @@ ftrace_update_function (struct btrace_thread_info *btinfo,
 
 	    /* A jump to the start of a function is (typically) a tail call.  */
 	    if (start == pc)
-	      return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, mfun, fun);
 
 	    /* If we can't determine the function for PC, we treat a jump at
 	       the end of the block as tail call if we're switching functions
 	       and as an intra-function branch if we don't.  */
 	    if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
-	      return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, mfun, fun);
 
 	    break;
 	  }
@@ -604,7 +599,7 @@ ftrace_update_function (struct btrace_thread_info *btinfo,
 		    ftrace_print_function_name (bfun),
 		    ftrace_print_filename (bfun));
 
-      return ftrace_new_switch (btinfo, bfun, mfun, fun);
+      return ftrace_new_switch (btinfo, mfun, fun);
     }
 
   return bfun;
@@ -1022,7 +1017,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  if (block->end < pc)
 	    {
 	      /* Indicate the gap in the trace.  */
-	      end = ftrace_new_gap (btinfo, end, BDE_BTS_OVERFLOW);
+	      end = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
 	      if (begin == NULL)
 		begin = end;
 
@@ -1035,7 +1030,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	      break;
 	    }
 
-	  end = ftrace_update_function (btinfo, end, pc);
+	  end = ftrace_update_function (btinfo, pc);
 	  if (begin == NULL)
 	    begin = end;
 
@@ -1070,7 +1065,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	    {
 	      /* Indicate the gap in the trace.  We just added INSN so we're
 		 not at the beginning.  */
-	      end = ftrace_new_gap (btinfo, end, BDE_BTS_INSN_SIZE);
+	      end = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
 
 	      VEC_safe_push (bfun_s, *gaps, end);
 
@@ -1192,7 +1187,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 		 from some other instruction.  Indicate this as a trace gap.  */
 	      if (insn.enabled)
 		{
-		  *pend = end = ftrace_new_gap (btinfo, end, BDE_PT_DISABLED);
+		  *pend = end = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
 
 		  VEC_safe_push (bfun_s, *gaps, end);
 
@@ -1207,7 +1202,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 	  /* Indicate trace overflows.  */
 	  if (insn.resynced)
 	    {
-	      *pend = end = ftrace_new_gap (btinfo, end, BDE_PT_OVERFLOW);
+	      *pend = end = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
 	      if (begin == NULL)
 		*pbegin = begin = end;
 
@@ -1220,7 +1215,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 		       offset, insn.ip);
 	    }
 
-	  upd = ftrace_update_function (btinfo, end, insn.ip);
+	  upd = ftrace_update_function (btinfo, insn.ip);
 	  if (upd != end)
 	    {
 	      *pend = end = upd;
@@ -1240,7 +1235,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 	break;
 
       /* Indicate the gap in the trace.  */
-      *pend = end = ftrace_new_gap (btinfo, end, errcode);
+      *pend = end = ftrace_new_gap (btinfo, errcode);
       if (begin == NULL)
 	*pbegin = begin = end;
 
@@ -1372,7 +1367,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
       /* Indicate a gap in the trace if we quit trace processing.  */
       if (error.reason == RETURN_QUIT && btinfo->end != NULL)
 	{
-	  btinfo->end = ftrace_new_gap (btinfo, btinfo->end, BDE_PT_USER_QUIT);
+	  btinfo->end = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
 
 	  VEC_safe_push (bfun_s, *gaps, btinfo->end);
 	}
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 08/12] btrace: Replace struct btrace_thread_info::up.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
  2017-05-09  7:01 ` [PATCH v3 03/12] btrace: Add btinfo to instruction interator Tim Wiederhake
  2017-05-09  7:01 ` [PATCH v3 10/12] btrace: Replace struct btrace_thread_info::segment Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-10  3:26   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 11/12] btrace: Remove bfun_s vector Tim Wiederhake
                   ` (8 subsequent siblings)
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This used to hold a function segment pointer.  Change it to hold an index into
the vector of function segments instead.

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (ftrace_find_call_by_number): New function.
	(ftrace_update_caller, ftrace_new_call, ftrace_new_tailcall,
	ftrace_get_caller, ftrace_find_call, ftrace_new_return,
	ftrace_match_backtrace, ftrace_connect_bfun, ftrace_connect_backtrace,
	ftrace_bridge_gap, btrace_bridge_gaps): Use btrace_function::up as an
	index.
	* btrace.h (struct btrace_function): Turn UP into an index.
	* python/py-record-btrace.c (btpy_call_up): Use btrace_function::up
	as an index.
	* record-btrace.c (record_btrace_frame_unwind_stop_reason,
	record_btrace_frame_prev_register, record_btrace_frame_sniffer,
	record_btrace_tailcall_frame_sniffe): Same.

---
 gdb/btrace.c                  | 141 ++++++++++++++++++++++++++----------------
 gdb/btrace.h                  |   6 +-
 gdb/python/py-record-btrace.c |   4 +-
 gdb/record-btrace.c           |  18 +++---
 4 files changed, 106 insertions(+), 63 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 1e45c09..31837aa 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -156,6 +156,19 @@ ftrace_call_num_insn (const struct btrace_function* bfun)
   return VEC_length (btrace_insn_s, bfun->insn);
 }
 
+/* Return the function segment with the given NUMBER or NULL if no such segment
+   exists.  BTINFO is the branch trace information for the current thread.  */
+
+static struct btrace_function *
+ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
+			    unsigned int number)
+{
+  if (number == 0 || number > btinfo->functions.size ())
+    return NULL;
+
+  return btinfo->functions[number - 1];
+}
+
 /* Return non-zero if BFUN does not match MFUN and FUN,
    return zero otherwise.  */
 
@@ -249,10 +262,10 @@ ftrace_update_caller (struct btrace_function *bfun,
 		      struct btrace_function *caller,
 		      enum btrace_function_flag flags)
 {
-  if (bfun->up != NULL)
+  if (bfun->up != 0)
     ftrace_debug (bfun, "updating caller");
 
-  bfun->up = caller;
+  bfun->up = caller->number;
   bfun->flags = flags;
 
   ftrace_debug (bfun, "set caller");
@@ -290,8 +303,7 @@ ftrace_new_call (struct btrace_thread_info *btinfo,
   const unsigned int length = btinfo->functions.size ();
   struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
 
-  if (length != 0)
-    bfun->up = btinfo->functions[length - 1];
+  bfun->up = length;
   bfun->level += 1;
 
   ftrace_debug (bfun, "new call");
@@ -311,8 +323,7 @@ ftrace_new_tailcall (struct btrace_thread_info *btinfo,
   const unsigned int length = btinfo->functions.size ();
   struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
 
-  if (length != 0)
-    bfun->up = btinfo->functions[length - 1];
+  bfun->up = length;
   bfun->level += 1;
   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
 
@@ -322,26 +333,30 @@ ftrace_new_tailcall (struct btrace_thread_info *btinfo,
 }
 
 /* Return the caller of BFUN or NULL if there is none.  This function skips
-   tail calls in the call chain.  */
+   tail calls in the call chain.  BTINFO is the branch trace information for
+   the current thread.  */
 static struct btrace_function *
-ftrace_get_caller (struct btrace_function *bfun)
+ftrace_get_caller (struct btrace_thread_info *btinfo,
+		   struct btrace_function *bfun)
 {
-  for (; bfun != NULL; bfun = bfun->up)
+  for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
     if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
-      return bfun->up;
+      return ftrace_find_call_by_number (btinfo, bfun->up);
 
   return NULL;
 }
 
 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
-   symbol information.  */
+   symbol information.  BTINFO is the branch trace information for the current
+   thread.  */
 
 static struct btrace_function *
-ftrace_find_caller (struct btrace_function *bfun,
+ftrace_find_caller (struct btrace_thread_info *btinfo,
+		    struct btrace_function *bfun,
 		    struct minimal_symbol *mfun,
 		    struct symbol *fun)
 {
-  for (; bfun != NULL; bfun = bfun->up)
+  for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
     {
       /* Skip functions with incompatible symbol information.  */
       if (ftrace_function_switched (bfun, mfun, fun))
@@ -356,12 +371,14 @@ ftrace_find_caller (struct btrace_function *bfun,
 
 /* Find the innermost caller in the back trace of BFUN, skipping all
    function segments that do not end with a call instruction (e.g.
-   tail calls ending with a jump).  */
+   tail calls ending with a jump).  BTINFO is the branch trace information for
+   the current thread.  */
 
 static struct btrace_function *
-ftrace_find_call (struct btrace_function *bfun)
+ftrace_find_call (struct btrace_thread_info *btinfo,
+		  struct btrace_function *bfun)
 {
-  for (; bfun != NULL; bfun = bfun->up)
+  for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
     {
       struct btrace_insn *last;
 
@@ -395,7 +412,8 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 
   /* It is important to start at PREV's caller.  Otherwise, we might find
      PREV itself, if PREV is a recursive function.  */
-  caller = ftrace_find_caller (prev->up, mfun, fun);
+  caller = ftrace_find_call_by_number (btinfo, prev->up);
+  caller = ftrace_find_caller (btinfo, caller, mfun, fun);
   if (caller != NULL)
     {
       /* The caller of PREV is the preceding btrace function segment in this
@@ -420,7 +438,8 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 	 wrong or that the call is simply not included in the trace.  */
 
       /* Let's search for some actual call.  */
-      caller = ftrace_find_call (prev->up);
+      caller = ftrace_find_call_by_number (btinfo, prev->up);
+      caller = ftrace_find_call (btinfo, caller);
       if (caller == NULL)
 	{
 	  /* There is no call in PREV's back trace.  We assume that the
@@ -428,8 +447,8 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 
 	  /* Let's find the topmost function and add a new caller for it.
 	     This should handle a series of initial tail calls.  */
-	  while (prev->up != NULL)
-	    prev = prev->up;
+	  while (prev->up != 0)
+	    prev = ftrace_find_call_by_number (btinfo, prev->up);
 
 	  bfun->level = prev->level - 1;
 
@@ -449,7 +468,7 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 	     on the same level as they are.
 	     This should handle things like schedule () correctly where we're
 	     switching contexts.  */
-	  prev->up = bfun;
+	  prev->up = bfun->number;
 	  prev->flags = BFUN_UP_LINKS_TO_RET;
 
 	  ftrace_debug (bfun, "new return - unknown caller");
@@ -654,10 +673,11 @@ ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
 
 /* Try to match the back trace at LHS to the back trace at RHS.  Returns the
    number of matching function segments or zero if the back traces do not
-   match.  */
+   match.  BTINFO is the branch trace information for the current thread.  */
 
 static int
-ftrace_match_backtrace (struct btrace_function *lhs,
+ftrace_match_backtrace (struct btrace_thread_info *btinfo,
+			struct btrace_function *lhs,
 			struct btrace_function *rhs)
 {
   int matches;
@@ -667,8 +687,8 @@ ftrace_match_backtrace (struct btrace_function *lhs,
       if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
 	return 0;
 
-      lhs = ftrace_get_caller (lhs);
-      rhs = ftrace_get_caller (rhs);
+      lhs = ftrace_get_caller (btinfo, lhs);
+      rhs = ftrace_get_caller (btinfo, rhs);
     }
 
   return matches;
@@ -722,10 +742,12 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
 }
 
 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
-   ftrace_connect_backtrace.  */
+   ftrace_connect_backtrace.  BTINFO is the branch trace information for the
+   current thread.  */
 
 static void
-ftrace_connect_bfun (struct btrace_function *prev,
+ftrace_connect_bfun (struct btrace_thread_info *btinfo,
+		     struct btrace_function *prev,
 		     struct btrace_function *next)
 {
   DEBUG_FTRACE ("connecting...");
@@ -743,20 +765,26 @@ ftrace_connect_bfun (struct btrace_function *prev,
   ftrace_fixup_level (next, prev->level - next->level);
 
   /* If we run out of back trace for one, let's use the other's.  */
-  if (prev->up == NULL)
+  if (prev->up == 0)
     {
-      if (next->up != NULL)
+      const btrace_function_flags flags = next->flags;
+
+      next = ftrace_find_call_by_number (btinfo, next->up);
+      if (next != NULL)
 	{
 	  DEBUG_FTRACE ("using next's callers");
-	  ftrace_fixup_caller (prev, next->up, next->flags);
+	  ftrace_fixup_caller (prev, next, flags);
 	}
     }
-  else if (next->up == NULL)
+  else if (next->up == 0)
     {
-      if (prev->up != NULL)
+      const btrace_function_flags flags = prev->flags;
+
+      prev = ftrace_find_call_by_number (btinfo, prev->up);
+      if (prev != NULL)
 	{
 	  DEBUG_FTRACE ("using prev's callers");
-	  ftrace_fixup_caller (next, prev->up, prev->flags);
+	  ftrace_fixup_caller (next, prev, flags);
 	}
     }
   else
@@ -774,26 +802,29 @@ ftrace_connect_bfun (struct btrace_function *prev,
       if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
 	{
 	  struct btrace_function *caller;
-	  btrace_function_flags flags;
+	  btrace_function_flags next_flags, prev_flags;
 
 	  /* We checked NEXT->UP above so CALLER can't be NULL.  */
-	  caller = next->up;
-	  flags = next->flags;
+	  caller = ftrace_find_call_by_number (btinfo, next->up);
+	  next_flags = next->flags;
+	  prev_flags = prev->flags;
 
 	  DEBUG_FTRACE ("adding prev's tail calls to next");
 
-	  ftrace_fixup_caller (next, prev->up, prev->flags);
+	  prev = ftrace_find_call_by_number (btinfo, prev->up);
+	  ftrace_fixup_caller (next, prev, prev_flags);
 
-	  for (prev = prev->up; prev != NULL; prev = prev->up)
+	  for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
+								  prev->up))
 	    {
 	      /* At the end of PREV's back trace, continue with CALLER.  */
-	      if (prev->up == NULL)
+	      if (prev->up == 0)
 		{
 		  DEBUG_FTRACE ("fixing up link for tailcall chain");
 		  ftrace_debug (prev, "..top");
 		  ftrace_debug (caller, "..up");
 
-		  ftrace_fixup_caller (prev, caller, flags);
+		  ftrace_fixup_caller (prev, caller, next_flags);
 
 		  /* If we skipped any tail calls, this may move CALLER to a
 		     different function level.
@@ -821,10 +852,12 @@ ftrace_connect_bfun (struct btrace_function *prev,
 
 /* Connect function segments on the same level in the back trace at LHS and RHS.
    The back traces at LHS and RHS are expected to match according to
-   ftrace_match_backtrace.  */
+   ftrace_match_backtrace.  BTINFO is the branch trace information for the
+   current thread.  */
 
 static void
-ftrace_connect_backtrace (struct btrace_function *lhs,
+ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
+			  struct btrace_function *lhs,
 			  struct btrace_function *rhs)
 {
   while (lhs != NULL && rhs != NULL)
@@ -837,20 +870,22 @@ ftrace_connect_backtrace (struct btrace_function *lhs,
       prev = lhs;
       next = rhs;
 
-      lhs = ftrace_get_caller (lhs);
-      rhs = ftrace_get_caller (rhs);
+      lhs = ftrace_get_caller (btinfo, lhs);
+      rhs = ftrace_get_caller (btinfo, rhs);
 
-      ftrace_connect_bfun (prev, next);
+      ftrace_connect_bfun (btinfo, prev, next);
     }
 }
 
 /* Bridge the gap between two function segments left and right of a gap if their
-   respective back traces match in at least MIN_MATCHES functions.
+   respective back traces match in at least MIN_MATCHES functions.  BTINFO is
+   the branch trace information for the current thread.
 
    Returns non-zero if the gap could be bridged, zero otherwise.  */
 
 static int
-ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
+ftrace_bridge_gap (struct btrace_thread_info *btinfo,
+		   struct btrace_function *lhs, struct btrace_function *rhs,
 		   int min_matches)
 {
   struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
@@ -866,12 +901,14 @@ ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
   /* We search the back traces of LHS and RHS for valid connections and connect
      the two functon segments that give the longest combined back trace.  */
 
-  for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
-    for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
+  for (cand_l = lhs; cand_l != NULL;
+       cand_l = ftrace_get_caller (btinfo, cand_l))
+    for (cand_r = rhs; cand_r != NULL;
+	 cand_r = ftrace_get_caller (btinfo, cand_r))
       {
 	int matches;
 
-	matches = ftrace_match_backtrace (cand_l, cand_r);
+	matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
 	if (best_matches < matches)
 	  {
 	    best_matches = matches;
@@ -898,7 +935,7 @@ ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
      BEST_L to BEST_R as they will already be on the same level.  */
   ftrace_fixup_level (rhs, best_l->level - best_r->level);
 
-  ftrace_connect_backtrace (best_l, best_r);
+  ftrace_connect_backtrace (btinfo, best_l, best_r);
 
   return best_matches;
 }
@@ -956,7 +993,7 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 	      if (rhs == NULL)
 		continue;
 
-	      bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
+	      bridged = ftrace_bridge_gap (&tp->btrace, lhs, rhs, min_matches);
 
 	      /* Keep track of gaps we were not able to bridge and try again.
 		 If we just pushed them to the end of GAPS we would risk an
diff --git a/gdb/btrace.h b/gdb/btrace.h
index b3dbe82..4096aaa 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -154,8 +154,10 @@ struct btrace_function
   /* The previous and next function in control flow order.  */
   struct btrace_func_link flow;
 
-  /* The directly preceding function segment in a (fake) call stack.  */
-  struct btrace_function *up;
+  /* The function segment number of the directly preceding function segment in
+     a (fake) call stack.  Will be zero if there is no such function segment in
+     the record.  */
+  unsigned int up;
 
   /* The instructions in this function segment.
      The instruction vector will be empty if the function segment
diff --git a/gdb/python/py-record-btrace.c b/gdb/python/py-record-btrace.c
index d684561..9dd2199 100644
--- a/gdb/python/py-record-btrace.c
+++ b/gdb/python/py-record-btrace.c
@@ -398,11 +398,11 @@ recpy_bt_func_up (PyObject *self, void *closure)
   if (func == NULL)
     return NULL;
 
-  if (func->up == NULL)
+  if (func->up == 0)
     Py_RETURN_NONE;
 
   return recpy_func_new (((recpy_element_object *) self)->ptid,
-			 RECORD_METHOD_BTRACE, func->up->number);
+			 RECORD_METHOD_BTRACE, func->up);
 }
 
 /* Implementation of RecordFunctionSegment.prev [RecordFunctionSegment] for
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 5c230e7..a27521c 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1570,7 +1570,7 @@ record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
   bfun = cache->bfun;
   gdb_assert (bfun != NULL);
 
-  if (bfun->up == NULL)
+  if (bfun->up == 0)
     return UNWIND_UNAVAILABLE;
 
   return UNWIND_NO_REASON;
@@ -1629,11 +1629,12 @@ record_btrace_frame_prev_register (struct frame_info *this_frame,
   bfun = cache->bfun;
   gdb_assert (bfun != NULL);
 
-  caller = bfun->up;
-  if (caller == NULL)
+  if (bfun->up == 0)
     throw_error (NOT_AVAILABLE_ERROR,
 		 _("No caller in btrace record history"));
 
+  caller = cache->tp->btrace.functions[bfun->up - 1];
+
   if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
     {
       insn = VEC_index (btrace_insn_s, caller->insn, 0);
@@ -1686,7 +1687,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
 
       callee = btrace_get_frame_function (next);
       if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
-	bfun = callee->up;
+	bfun = tp->btrace.functions[callee->up - 1];
     }
 
   if (bfun == NULL)
@@ -1713,6 +1714,7 @@ record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
 {
   const struct btrace_function *bfun, *callee;
   struct btrace_frame_cache *cache;
+  struct thread_info *tinfo;
   struct frame_info *next;
 
   next = get_next_frame (this_frame);
@@ -1726,16 +1728,18 @@ record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
   if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
     return 0;
 
-  bfun = callee->up;
-  if (bfun == NULL)
+  if (callee->up == 0)
     return 0;
 
+  tinfo = find_thread_ptid (inferior_ptid);
+  bfun = tinfo->btrace.functions[callee->up - 1];
+
   DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
 	 btrace_get_bfun_name (bfun), bfun->level);
 
   /* This is our frame.  Initialize the frame cache.  */
   cache = bfcache_new (this_frame);
-  cache->tp = find_thread_ptid (inferior_ptid);
+  cache->tp = tinfo;
   cache->bfun = bfun;
 
   *this_cache = cache;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 12/12] btrace: Store function segments as objects.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (6 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 07/12] btrace: Remove struct btrace_thread_info::{begin,end} Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-10  5:10   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 01/12] btrace: Use std::vector in struct btrace_thread_information Tim Wiederhake
                   ` (3 subsequent siblings)
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:
	* btrace.c:
	* btrace.h:
	* record-btrace.c:

---
 gdb/btrace.c        | 94 ++++++++++++++++++++++++++---------------------------
 gdb/btrace.h        |  7 ++--
 gdb/record-btrace.c | 10 +++---
 3 files changed, 56 insertions(+), 55 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 7b82000..4c7020d 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -155,14 +155,27 @@ ftrace_call_num_insn (const struct btrace_function* bfun)
 /* Return the function segment with the given NUMBER or NULL if no such segment
    exists.  BTINFO is the branch trace information for the current thread.  */
 
-static struct btrace_function *
+const static struct btrace_function *
 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
 			    unsigned int number)
 {
   if (number == 0 || number > btinfo->functions.size ())
     return NULL;
 
-  return btinfo->functions[number - 1];
+  return &btinfo->functions[number - 1];
+}
+
+/* Return the function segment with the given NUMBER or NULL if no such segment
+   exists.  BTINFO is the branch trace information for the current thread.  */
+
+static struct btrace_function *
+ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
+			    unsigned int number)
+{
+  if (number == 0 || number > btinfo->functions.size ())
+    return NULL;
+
+  return &btinfo->functions[number - 1];
 }
 
 /* Return non-zero if BFUN does not match MFUN and FUN,
@@ -214,37 +227,33 @@ ftrace_function_switched (const struct btrace_function *bfun,
 /* Allocate and initialize a new branch trace function segment at the end of
    the trace.
    BTINFO is the branch trace information for the current thread.
-   MFUN and FUN are the symbol information we have for this function.  */
+   MFUN and FUN are the symbol information we have for this function.
+   This invalidates all struct btrace_function pointer currently held.  */
 
 static struct btrace_function *
 ftrace_new_function (struct btrace_thread_info *btinfo,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *bfun;
-
-  bfun = XCNEW (struct btrace_function);
-
-  bfun->msym = mfun;
-  bfun->sym = fun;
+  struct btrace_function bfun {mfun, fun, 0, 0, 0, NULL, 0, 0, 0, 0, 0};
 
   if (btinfo->functions.empty ())
     {
       /* Start counting at one.  */
-      bfun->number = 1;
-      bfun->insn_offset = 1;
+      bfun.number = 1;
+      bfun.insn_offset = 1;
     }
   else
     {
-      struct btrace_function *prev = btinfo->functions.back ();
+      struct btrace_function *prev = &btinfo->functions.back ();
 
-      bfun->number = prev->number + 1;
-      bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
-      bfun->level = prev->level;
+      bfun.number = prev->number + 1;
+      bfun.insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
+      bfun.level = prev->level;
     }
 
-  btinfo->functions.push_back (bfun);
-  return bfun;
+  btinfo->functions.push_back (std::move (bfun));
+  return &btinfo->functions.back ();
 }
 
 /* Update the UP field of a function segment.  */
@@ -406,10 +415,10 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
-  struct btrace_function *prev = btinfo->functions.back ();
-  struct btrace_function *bfun, *caller;
+  struct btrace_function *prev, *bfun, *caller;
 
   bfun = ftrace_new_function (btinfo, mfun, fun);
+  prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
 
   /* It is important to start at PREV's caller.  Otherwise, we might find
      PREV itself, if PREV is a recursive function.  */
@@ -488,12 +497,12 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
-  struct btrace_function *prev = btinfo->functions.back ();
-  struct btrace_function *bfun;
+  struct btrace_function *prev, *bfun;
 
   /* This is an unexplained function switch.  We can't really be sure about the
      call stack, yet the best I can think of right now is to preserve it.  */
   bfun = ftrace_new_function (btinfo, mfun, fun);
+  prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
   bfun->up = prev->up;
   bfun->flags = prev->flags;
 
@@ -518,7 +527,7 @@ ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
   else
     {
       /* We hijack the previous function segment if it was empty.  */
-      bfun = btinfo->functions.back ();
+      bfun = &btinfo->functions.back ();
       if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
 	bfun = ftrace_new_function (btinfo, NULL, NULL);
     }
@@ -559,7 +568,7 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
     return ftrace_new_function (btinfo, mfun, fun);
 
   /* If we had a gap before, we create a function.  */
-  bfun = btinfo->functions.back ();
+  bfun = &btinfo->functions.back ();
   if (bfun->errcode != 0)
     return ftrace_new_function (btinfo, mfun, fun);
 
@@ -738,10 +747,10 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
       /* The last function segment contains the current instruction, which is
 	 not really part of the trace.  If it contains just this one
 	 instruction, we ignore the segment.  */
-      if (bfun->number == length && VEC_length (btrace_insn_s, bfun->insn) == 1)
+      if (bfun.number == length && VEC_length (btrace_insn_s, bfun.insn) == 1)
 	continue;
 
-      level = std::min (level, bfun->level);
+      level = std::min (level, bfun.level);
     }
 
   DEBUG_FTRACE ("setting global level offset: %d", -level);
@@ -1610,7 +1619,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
   gdb_assert (!btinfo->functions.empty ());
   gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
 
-  last_bfun = btinfo->functions.back ();
+  last_bfun = &btinfo->functions.back ();
 
   /* If the existing trace ends with a gap, we just glue the traces
      together.  We need to drop the last (i.e. chronologically first) block
@@ -1902,10 +1911,7 @@ btrace_clear (struct thread_info *tp)
 
   btinfo = &tp->btrace;
   for (auto &bfun : btinfo->functions)
-    {
-      VEC_free (btrace_insn_s, bfun->insn);
-      xfree (bfun);
-    }
+    VEC_free (btrace_insn_s, bfun.insn);
 
   btinfo->functions.clear ();
   btinfo->ngaps = 0;
@@ -2254,7 +2260,7 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
   unsigned int index, end;
 
   index = it->insn_index;
-  bfun = it->btinfo->functions[it->call_index];
+  bfun = &it->btinfo->functions[it->call_index];
 
   /* Check if the iterator points to a gap in the trace.  */
   if (bfun->errcode != 0)
@@ -2273,10 +2279,7 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
 int
 btrace_insn_get_error (const struct btrace_insn_iterator *it)
 {
-  const struct btrace_function *bfun;
-
-  bfun = it->btinfo->functions[it->call_index];
-  return bfun->errcode;
+  return it->btinfo->functions[it->call_index].errcode;
 }
 
 /* See btrace.h.  */
@@ -2284,10 +2287,7 @@ btrace_insn_get_error (const struct btrace_insn_iterator *it)
 unsigned int
 btrace_insn_number (const struct btrace_insn_iterator *it)
 {
-  const struct btrace_function *bfun;
-
-  bfun = it->btinfo->functions[it->call_index];
-  return bfun->insn_offset + it->insn_index;
+  return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
 }
 
 /* See btrace.h.  */
@@ -2316,7 +2316,7 @@ btrace_insn_end (struct btrace_insn_iterator *it,
   if (btinfo->functions.empty ())
     error (_("No trace."));
 
-  bfun = btinfo->functions.back ();
+  bfun = &btinfo->functions.back ();
   length = VEC_length (btrace_insn_s, bfun->insn);
 
   /* The last function may either be a gap or it contains the current
@@ -2338,7 +2338,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
   const struct btrace_function *bfun;
   unsigned int index, steps;
 
-  bfun = it->btinfo->functions[it->call_index];
+  bfun = &it->btinfo->functions[it->call_index];
   steps = 0;
   index = it->insn_index;
 
@@ -2420,7 +2420,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
   const struct btrace_function *bfun;
   unsigned int index, steps;
 
-  bfun = it->btinfo->functions[it->call_index];
+  bfun = &it->btinfo->functions[it->call_index];
   steps = 0;
   index = it->insn_index;
 
@@ -2507,12 +2507,12 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
       return 0;
 
   lower = 0;
-  bfun = btinfo->functions[lower];
+  bfun = &btinfo->functions[lower];
   if (number < bfun->insn_offset)
     return 0;
 
   upper = btinfo->functions.size () - 1;
-  bfun = btinfo->functions[upper];
+  bfun = &btinfo->functions[upper];
   if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
     return 0;
 
@@ -2521,7 +2521,7 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
     {
       const unsigned int average = lower + (upper - lower) / 2;
 
-      bfun = btinfo->functions[average];
+      bfun = &btinfo->functions[average];
 
       if (number < bfun->insn_offset)
 	{
@@ -2555,7 +2555,7 @@ btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
   if (btinfo->functions.empty ())
     return 0;
 
-  bfun = btinfo->functions.back ();
+  bfun = &btinfo->functions.back ();
   if (bfun->errcode != 0)
     return 0;
 
@@ -2570,7 +2570,7 @@ btrace_call_get (const struct btrace_call_iterator *it)
   if (it->index >= it->btinfo->functions.size ())
     return NULL;
 
-  return it->btinfo->functions[it->index];
+  return &it->btinfo->functions[it->index];
 }
 
 /* See btrace.h.  */
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 0acd2fb..d400a3c 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -325,9 +325,10 @@ struct btrace_thread_info
   /* The raw branch trace data for the below branch trace.  */
   struct btrace_data data;
 
-  /* Vector of pointer to decoded function segments.  These are in execution
-     order with the first element == BEGIN and the last element == END.  */
-  std::vector<btrace_function *> functions;
+  /* Vector of decoded function segments in execution flow order.  Note that
+     the numbering for btrace function segments starts with 1, so function
+     segment i will be at index (i - 1).  */
+  std::vector<btrace_function> functions;
 
   /* The function level offset.  When added to each function's LEVEL,
      this normalizes the function levels such that the smallest level
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index a66d32a..d00ffce 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1592,7 +1592,7 @@ record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
   gdb_assert (bfun != NULL);
 
   while (bfun->prev != 0)
-    bfun = cache->tp->btrace.functions[bfun->prev - 1];
+    bfun = &cache->tp->btrace.functions[bfun->prev - 1];
 
   code = get_frame_func (this_frame);
   special = bfun->number;
@@ -1633,7 +1633,7 @@ record_btrace_frame_prev_register (struct frame_info *this_frame,
     throw_error (NOT_AVAILABLE_ERROR,
 		 _("No caller in btrace record history"));
 
-  caller = cache->tp->btrace.functions[bfun->up - 1];
+  caller = &cache->tp->btrace.functions[bfun->up - 1];
 
   if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
     {
@@ -1679,7 +1679,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
 
       replay = tp->btrace.replay;
       if (replay != NULL)
-	bfun = replay->btinfo->functions[replay->call_index];
+	bfun = &replay->btinfo->functions[replay->call_index];
     }
   else
     {
@@ -1687,7 +1687,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
 
       callee = btrace_get_frame_function (next);
       if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
-	bfun = tp->btrace.functions[callee->up - 1];
+	bfun = &tp->btrace.functions[callee->up - 1];
     }
 
   if (bfun == NULL)
@@ -1732,7 +1732,7 @@ record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
     return 0;
 
   tinfo = find_thread_ptid (inferior_ptid);
-  bfun = tinfo->btrace.functions[callee->up - 1];
+  bfun = &tinfo->btrace.functions[callee->up - 1];
 
   DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
 	 btrace_get_bfun_name (bfun), bfun->level);
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 05/12] btrace: Use function segment index in insn iterator.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (3 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 11/12] btrace: Remove bfun_s vector Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-10  2:20   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 04/12] btrace: Use function segment index in call iterator Tim Wiederhake
                   ` (6 subsequent siblings)
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

Remove FUNCTION pointer in struct btrace_insn_iterator and use an index into
the list of function segments instead.

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c: (btrace_insn_get, btrace_insn_get_error, btrace_insn_number,
	btrace_insn_begin, btrace_insn_end, btrace_insn_next, btrace_insn_prev,
	btrace_find_insn_by_number): Replace function segment pointer with
	index.
	(btrace_insn_cmp): Simplify.
	* btrace.h: (struct btrace_insn_iterator) Rename index to
	insn_index.  Replace function segment pointer with index into function
	segment vector.
	* record-btrace.c (record_btrace_call_history): Replace function
	segment pointer use with index.
	(record_btrace_frame_sniffer): Retrieve function call segment through
	vector.
	(record_btrace_set_replay): Remove defunc't safety check.

---
 gdb/btrace.c        | 59 +++++++++++++++++++++++++++++++++--------------------
 gdb/btrace.h        |  7 +++----
 gdb/record-btrace.c |  6 +++---
 3 files changed, 43 insertions(+), 29 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 5615aad..cb30dcf 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -2248,8 +2248,8 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
   const struct btrace_function *bfun;
   unsigned int index, end;
 
-  index = it->index;
-  bfun = it->function;
+  index = it->insn_index;
+  bfun = it->btinfo->functions[it->call_index];
 
   /* Check if the iterator points to a gap in the trace.  */
   if (bfun->errcode != 0)
@@ -2268,7 +2268,10 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
 int
 btrace_insn_get_error (const struct btrace_insn_iterator *it)
 {
-  return it->function->errcode;
+  const struct btrace_function *bfun;
+
+  bfun = it->btinfo->functions[it->call_index];
+  return bfun->errcode;
 }
 
 /* See btrace.h.  */
@@ -2276,7 +2279,10 @@ btrace_insn_get_error (const struct btrace_insn_iterator *it)
 unsigned int
 btrace_insn_number (const struct btrace_insn_iterator *it)
 {
-  return it->function->insn_offset + it->index;
+  const struct btrace_function *bfun;
+
+  bfun = it->btinfo->functions[it->call_index];
+  return bfun->insn_offset + it->insn_index;
 }
 
 /* See btrace.h.  */
@@ -2292,8 +2298,8 @@ btrace_insn_begin (struct btrace_insn_iterator *it,
     error (_("No trace."));
 
   it->btinfo = btinfo;
-  it->function = bfun;
-  it->index = 0;
+  it->call_index = 0;
+  it->insn_index = 0;
 }
 
 /* See btrace.h.  */
@@ -2318,8 +2324,8 @@ btrace_insn_end (struct btrace_insn_iterator *it,
     length -= 1;
 
   it->btinfo = btinfo;
-  it->function = bfun;
-  it->index = length;
+  it->call_index = bfun->number - 1;
+  it->insn_index = length;
 }
 
 /* See btrace.h.  */
@@ -2330,9 +2336,9 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
   const struct btrace_function *bfun;
   unsigned int index, steps;
 
-  bfun = it->function;
+  bfun = it->btinfo->functions[it->call_index];
   steps = 0;
-  index = it->index;
+  index = it->insn_index;
 
   while (stride != 0)
     {
@@ -2398,8 +2404,8 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
     }
 
   /* Update the iterator.  */
-  it->function = bfun;
-  it->index = index;
+  it->call_index = bfun->number - 1;
+  it->insn_index = index;
 
   return steps;
 }
@@ -2412,9 +2418,9 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
   const struct btrace_function *bfun;
   unsigned int index, steps;
 
-  bfun = it->function;
+  bfun = it->btinfo->functions[it->call_index];
   steps = 0;
-  index = it->index;
+  index = it->insn_index;
 
   while (stride != 0)
     {
@@ -2456,8 +2462,8 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
     }
 
   /* Update the iterator.  */
-  it->function = bfun;
-  it->index = index;
+  it->call_index = bfun->number - 1;
+  it->insn_index = index;
 
   return steps;
 }
@@ -2468,12 +2474,21 @@ int
 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
 		 const struct btrace_insn_iterator *rhs)
 {
-  unsigned int lnum, rnum;
+  gdb_assert (lhs->btinfo == rhs->btinfo);
 
-  lnum = btrace_insn_number (lhs);
-  rnum = btrace_insn_number (rhs);
+  if (lhs->call_index > rhs->call_index)
+    return 1;
+
+  if (lhs->call_index < rhs->call_index)
+    return -1;
+
+  if (lhs->insn_index > rhs->insn_index)
+    return 1;
+
+  if (lhs->insn_index < rhs->insn_index)
+    return -1;
 
-  return (int) (lnum - rnum);
+  return 0;
 }
 
 /* See btrace.h.  */
@@ -2522,8 +2537,8 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
     }
 
   it->btinfo = btinfo;
-  it->function = bfun;
-  it->index = number - bfun->insn_offset;
+  it->call_index = bfun->number - 1;
+  it->insn_index = number - bfun->insn_offset;
   return 1;
 }
 
diff --git a/gdb/btrace.h b/gdb/btrace.h
index ef2c781..ca79667 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -195,12 +195,11 @@ struct btrace_insn_iterator
   /* The branch trace information for this thread.  Will never be NULL.  */
   const struct btrace_thread_info *btinfo;
 
-  /* The branch trace function segment containing the instruction.
-     Will never be NULL.  */
-  const struct btrace_function *function;
+  /* The index of the function segment in BTINFO->FUNCTIONS.  */
+  unsigned int call_index;
 
   /* The index into the function segment's instruction vector.  */
-  unsigned int index;
+  unsigned int insn_index;
 };
 
 /* A branch trace function call iterator.  */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 86a4b1e..ec940f6 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1102,7 +1102,7 @@ record_btrace_call_history (struct target_ops *self, int size, int int_flags)
       if (replay != NULL)
 	{
 	  begin.btinfo = btinfo;
-	  begin.index = replay->function->number - 1;
+	  begin.index = replay->call_index;
 	}
       else
 	btrace_call_end (&begin, btinfo);
@@ -1678,7 +1678,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
 
       replay = tp->btrace.replay;
       if (replay != NULL)
-	bfun = replay->function;
+	bfun = replay->btinfo->functions[replay->call_index];
     }
   else
     {
@@ -2691,7 +2691,7 @@ record_btrace_set_replay (struct thread_info *tp,
 
   btinfo = &tp->btrace;
 
-  if (it == NULL || it->function == NULL)
+  if (it == NULL)
     record_btrace_stop_replaying (tp);
   else
     {
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 09/12] btrace: Remove struct btrace_thread_info::flow.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (9 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 02/12] btrace: Transfer ownership of pointers Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-10  3:46   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 06/12] btrace: Remove constant arguments Tim Wiederhake
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This used to hold a pair of pointers to the previous and next function segment
in execution flow order.  It is no longer necessary as the previous and next
function segments now are simply the previous and next elements in the vector
of function segments.

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (ftrace_new_function, ftrace_fixup_level,
	ftrace_connect_bfun, ftrace_bridge_gap, btrace_bridge_gaps,
	btrace_insn_next, btrace_insn_prev): Remove references to
	btrace_thread_info::flow.
	* btrace.h (struct btrace_function): Remove FLOW.

---
 gdb/btrace.c | 44 ++++++++++++++++++++++++--------------------
 gdb/btrace.h |  3 ---
 2 files changed, 24 insertions(+), 23 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 31837aa..f57bbf9 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -242,10 +242,6 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
     {
       struct btrace_function *prev = btinfo->functions.back ();
 
-      gdb_assert (prev->flow.next == NULL);
-      prev->flow.next = bfun;
-      bfun->flow.prev = prev;
-
       bfun->number = prev->number + 1;
       bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
       bfun->level = prev->level;
@@ -694,10 +690,12 @@ ftrace_match_backtrace (struct btrace_thread_info *btinfo,
   return matches;
 }
 
-/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.  */
+/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
+   BTINFO is the branch trace information for the current thread.  */
 
 static void
-ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
+ftrace_fixup_level (struct btrace_thread_info *btinfo,
+		    struct btrace_function *bfun, int adjustment)
 {
   if (adjustment == 0)
     return;
@@ -705,8 +703,11 @@ ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
   DEBUG_FTRACE ("fixup level (%+d)", adjustment);
   ftrace_debug (bfun, "..bfun");
 
-  for (; bfun != NULL; bfun = bfun->flow.next)
-    bfun->level += adjustment;
+  while (bfun != NULL)
+    {
+      bfun->level += adjustment;
+      bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
+    }
 }
 
 /* Recompute the global level offset.  Traverse the function trace and compute
@@ -762,7 +763,7 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
   next->segment.prev = prev;
 
   /* We may have moved NEXT to a different function level.  */
-  ftrace_fixup_level (next, prev->level - next->level);
+  ftrace_fixup_level (btinfo, next, prev->level - next->level);
 
   /* If we run out of back trace for one, let's use the other's.  */
   if (prev->up == 0)
@@ -835,7 +836,8 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
 
 		     Otherwise we will fix up CALLER's level when we connect it
 		     to PREV's caller in the next iteration.  */
-		  ftrace_fixup_level (caller, prev->level - caller->level - 1);
+		  ftrace_fixup_level (btinfo, caller,
+				      prev->level - caller->level - 1);
 		  break;
 		}
 
@@ -933,7 +935,7 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
      To catch this, we already fix up the level here where we can start at RHS
      instead of at BEST_R.  We will ignore the level fixup when connecting
      BEST_L to BEST_R as they will already be on the same level.  */
-  ftrace_fixup_level (rhs, best_l->level - best_r->level);
+  ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
 
   ftrace_connect_backtrace (btinfo, best_l, best_r);
 
@@ -946,12 +948,14 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
 static void
 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 {
+  struct btrace_thread_info *btinfo;
   VEC (bfun_s) *remaining;
   struct cleanup *old_chain;
   int min_matches;
 
   DEBUG ("bridge gaps");
 
+  btinfo = &tp->btrace;
   remaining = NULL;
   old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
 
@@ -980,20 +984,20 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 		 all but the leftmost gap in such a sequence.
 
 		 Also ignore gaps at the beginning of the trace.  */
-	      lhs = gap->flow.prev;
+	      lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
 	      if (lhs == NULL || lhs->errcode != 0)
 		continue;
 
 	      /* Skip gaps to the right.  */
-	      for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
-		if (rhs->errcode == 0)
-		  break;
+	      rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
+	      while (rhs != NULL && rhs->errcode != 0)
+		rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
 
 	      /* Ignore gaps at the end of the trace.  */
 	      if (rhs == NULL)
 		continue;
 
-	      bridged = ftrace_bridge_gap (&tp->btrace, lhs, rhs, min_matches);
+	      bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
 
 	      /* Keep track of gaps we were not able to bridge and try again.
 		 If we just pushed them to the end of GAPS we would risk an
@@ -1023,7 +1027,7 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 
   /* We may omit this in some cases.  Not sure it is worth the extra
      complication, though.  */
-  ftrace_compute_global_level_offset (&tp->btrace);
+  ftrace_compute_global_level_offset (btinfo);
 }
 
 /* Compute the function branch trace from BTS trace.  */
@@ -2376,7 +2380,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
 	{
 	  const struct btrace_function *next;
 
-	  next = bfun->flow.next;
+	  next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
 	  if (next == NULL)
 	    break;
 
@@ -2406,7 +2410,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
 	{
 	  const struct btrace_function *next;
 
-	  next = bfun->flow.next;
+	  next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
 	  if (next == NULL)
 	    {
 	      /* We stepped past the last function.
@@ -2455,7 +2459,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
 	{
 	  const struct btrace_function *prev;
 
-	  prev = bfun->flow.prev;
+	  prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
 	  if (prev == NULL)
 	    break;
 
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 4096aaa..b1940bb 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -151,9 +151,6 @@ struct btrace_function
      two segments: one before the call and another after the return.  */
   struct btrace_func_link segment;
 
-  /* The previous and next function in control flow order.  */
-  struct btrace_func_link flow;
-
   /* The function segment number of the directly preceding function segment in
      a (fake) call stack.  Will be zero if there is no such function segment in
      the record.  */
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 02/12] btrace: Transfer ownership of pointers.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (8 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 01/12] btrace: Use std::vector in struct btrace_thread_information Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-09 12:21   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 09/12] btrace: Remove struct btrace_thread_info::flow Tim Wiederhake
  2017-05-09  7:01 ` [PATCH v3 06/12] btrace: Remove constant arguments Tim Wiederhake
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

Directly insert new btrace_function pointers into the vector and have the
vector own these pointers.  This allows us to later retrieve these objects by
their number directly after creation whereas at the moment we have to wait
until the vector is fully populated.

This requires to pull btrace_thread_info through different functions but
cleans up the code for freeing the trace.

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (ftrace_new_function): Add btrace_thread_info to arguments
	and save pointers directly.
	(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return,
	ftrace_new_switch, ftrace_new_gap, ftrace_update_function,
	ftrace_add_pt): Add btrace_thread_info to arguments.  Adjust for
	changed signature of functions.
	(btrace_compute_ftrace_pt): Adjust for changed signature of functions.
	(btrace_fetch): Remove code that adds btrace_function pointers to
	vector of btrace_functions.
	(btrace_clear): Simplify freeing vector of btrace_functions.

---
 gdb/btrace.c | 101 +++++++++++++++++++++++++++++++----------------------------
 1 file changed, 53 insertions(+), 48 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 46a4d8d..57788ac 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -203,11 +203,13 @@ ftrace_function_switched (const struct btrace_function *bfun,
 }
 
 /* Allocate and initialize a new branch trace function segment.
+   BTINFO is the branch trace information for the current thread.
    PREV is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_function (struct btrace_function *prev,
+ftrace_new_function (struct btrace_thread_info *btinfo,
+		     struct btrace_function *prev,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
@@ -235,6 +237,7 @@ ftrace_new_function (struct btrace_function *prev,
       bfun->level = prev->level;
     }
 
+  btinfo->functions.push_back (bfun);
   return bfun;
 }
 
@@ -275,17 +278,19 @@ ftrace_fixup_caller (struct btrace_function *bfun,
 }
 
 /* Add a new function segment for a call.
+   BTINFO is the branch trace information for the current thread.
    CALLER is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_call (struct btrace_function *caller,
+ftrace_new_call (struct btrace_thread_info *btinfo,
+		 struct btrace_function *caller,
 		 struct minimal_symbol *mfun,
 		 struct symbol *fun)
 {
   struct btrace_function *bfun;
 
-  bfun = ftrace_new_function (caller, mfun, fun);
+  bfun = ftrace_new_function (btinfo, caller, mfun, fun);
   bfun->up = caller;
   bfun->level += 1;
 
@@ -295,17 +300,19 @@ ftrace_new_call (struct btrace_function *caller,
 }
 
 /* Add a new function segment for a tail call.
+   BTINFO is the branch trace information for the current thread.
    CALLER is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_tailcall (struct btrace_function *caller,
+ftrace_new_tailcall (struct btrace_thread_info *btinfo,
+		     struct btrace_function *caller,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
   struct btrace_function *bfun;
 
-  bfun = ftrace_new_function (caller, mfun, fun);
+  bfun = ftrace_new_function (btinfo, caller, mfun, fun);
   bfun->up = caller;
   bfun->level += 1;
   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
@@ -373,17 +380,19 @@ ftrace_find_call (struct btrace_function *bfun)
 }
 
 /* Add a continuation segment for a function into which we return.
+   BTINFO is the branch trace information for the current thread.
    PREV is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_return (struct btrace_function *prev,
+ftrace_new_return (struct btrace_thread_info *btinfo,
+		   struct btrace_function *prev,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
   struct btrace_function *bfun, *caller;
 
-  bfun = ftrace_new_function (prev, mfun, fun);
+  bfun = ftrace_new_function (btinfo, prev, mfun, fun);
 
   /* It is important to start at PREV's caller.  Otherwise, we might find
      PREV itself, if PREV is a recursive function.  */
@@ -452,11 +461,13 @@ ftrace_new_return (struct btrace_function *prev,
 }
 
 /* Add a new function segment for a function switch.
+   BTINFO is the branch trace information for the current thread.
    PREV is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_switch (struct btrace_function *prev,
+ftrace_new_switch (struct btrace_thread_info *btinfo,
+		   struct btrace_function *prev,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
@@ -464,7 +475,7 @@ ftrace_new_switch (struct btrace_function *prev,
 
   /* This is an unexplained function switch.  We can't really be sure about the
      call stack, yet the best I can think of right now is to preserve it.  */
-  bfun = ftrace_new_function (prev, mfun, fun);
+  bfun = ftrace_new_function (btinfo, prev, mfun, fun);
   bfun->up = prev->up;
   bfun->flags = prev->flags;
 
@@ -474,11 +485,13 @@ ftrace_new_switch (struct btrace_function *prev,
 }
 
 /* Add a new function segment for a gap in the trace due to a decode error.
+   BTINFO is the branch trace information for the current thread.
    PREV is the chronologically preceding function segment.
    ERRCODE is the format-specific error code.  */
 
 static struct btrace_function *
-ftrace_new_gap (struct btrace_function *prev, int errcode)
+ftrace_new_gap (struct btrace_thread_info *btinfo,
+		struct btrace_function *prev, int errcode)
 {
   struct btrace_function *bfun;
 
@@ -487,7 +500,7 @@ ftrace_new_gap (struct btrace_function *prev, int errcode)
       && VEC_empty (btrace_insn_s, prev->insn))
     bfun = prev;
   else
-    bfun = ftrace_new_function (prev, NULL, NULL);
+    bfun = ftrace_new_function (btinfo, prev, NULL, NULL);
 
   bfun->errcode = errcode;
 
@@ -496,12 +509,14 @@ ftrace_new_gap (struct btrace_function *prev, int errcode)
   return bfun;
 }
 
-/* Update BFUN with respect to the instruction at PC.  This may create new
-   function segments.
+/* Update BFUN with respect to the instruction at PC.  BTINFO is the branch
+   trace information for the current thread.  This may create new function
+   segments.
    Return the chronologically latest function segment, never NULL.  */
 
 static struct btrace_function *
-ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
+ftrace_update_function (struct btrace_thread_info *btinfo,
+			struct btrace_function *bfun, CORE_ADDR pc)
 {
   struct bound_minimal_symbol bmfun;
   struct minimal_symbol *mfun;
@@ -520,7 +535,7 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 
   /* If we didn't have a function or if we had a gap before, we create one.  */
   if (bfun == NULL || bfun->errcode != 0)
-    return ftrace_new_function (bfun, mfun, fun);
+    return ftrace_new_function (btinfo, bfun, mfun, fun);
 
   /* Check the last instruction, if we have one.
      We do this check first, since it allows us to fill in the call stack
@@ -548,9 +563,9 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 	       different frame id's.  This will confuse stepping.  */
 	    fname = ftrace_print_function_name (bfun);
 	    if (strcmp (fname, "_dl_runtime_resolve") == 0)
-	      return ftrace_new_tailcall (bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
 
-	    return ftrace_new_return (bfun, mfun, fun);
+	    return ftrace_new_return (btinfo, bfun, mfun, fun);
 	  }
 
 	case BTRACE_INSN_CALL:
@@ -558,7 +573,7 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 	  if (last->pc + last->size == pc)
 	    break;
 
-	  return ftrace_new_call (bfun, mfun, fun);
+	  return ftrace_new_call (btinfo, bfun, mfun, fun);
 
 	case BTRACE_INSN_JUMP:
 	  {
@@ -568,13 +583,13 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 
 	    /* A jump to the start of a function is (typically) a tail call.  */
 	    if (start == pc)
-	      return ftrace_new_tailcall (bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
 
 	    /* If we can't determine the function for PC, we treat a jump at
 	       the end of the block as tail call if we're switching functions
 	       and as an intra-function branch if we don't.  */
 	    if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
-	      return ftrace_new_tailcall (bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
 
 	    break;
 	  }
@@ -589,7 +604,7 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 		    ftrace_print_function_name (bfun),
 		    ftrace_print_filename (bfun));
 
-      return ftrace_new_switch (bfun, mfun, fun);
+      return ftrace_new_switch (btinfo, bfun, mfun, fun);
     }
 
   return bfun;
@@ -1007,7 +1022,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  if (block->end < pc)
 	    {
 	      /* Indicate the gap in the trace.  */
-	      end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
+	      end = ftrace_new_gap (btinfo, end, BDE_BTS_OVERFLOW);
 	      if (begin == NULL)
 		begin = end;
 
@@ -1020,7 +1035,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	      break;
 	    }
 
-	  end = ftrace_update_function (end, pc);
+	  end = ftrace_update_function (btinfo, end, pc);
 	  if (begin == NULL)
 	    begin = end;
 
@@ -1055,7 +1070,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	    {
 	      /* Indicate the gap in the trace.  We just added INSN so we're
 		 not at the beginning.  */
-	      end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
+	      end = ftrace_new_gap (btinfo, end, BDE_BTS_INSN_SIZE);
 
 	      VEC_safe_push (bfun_s, *gaps, end);
 
@@ -1133,10 +1148,11 @@ pt_btrace_insn (const struct pt_insn &insn)
 }
 
 
-/* Add function branch trace using DECODER.  */
+/* Add function branch trace to BTINFO using DECODER.  */
 
 static void
-ftrace_add_pt (struct pt_insn_decoder *decoder,
+ftrace_add_pt (struct btrace_thread_info *btinfo,
+	       struct pt_insn_decoder *decoder,
 	       struct btrace_function **pbegin,
 	       struct btrace_function **pend, int *plevel,
 	       VEC (bfun_s) **gaps)
@@ -1176,7 +1192,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 		 from some other instruction.  Indicate this as a trace gap.  */
 	      if (insn.enabled)
 		{
-		  *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
+		  *pend = end = ftrace_new_gap (btinfo, end, BDE_PT_DISABLED);
 
 		  VEC_safe_push (bfun_s, *gaps, end);
 
@@ -1191,7 +1207,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	  /* Indicate trace overflows.  */
 	  if (insn.resynced)
 	    {
-	      *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
+	      *pend = end = ftrace_new_gap (btinfo, end, BDE_PT_OVERFLOW);
 	      if (begin == NULL)
 		*pbegin = begin = end;
 
@@ -1204,7 +1220,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 		       offset, insn.ip);
 	    }
 
-	  upd = ftrace_update_function (end, insn.ip);
+	  upd = ftrace_update_function (btinfo, end, insn.ip);
 	  if (upd != end)
 	    {
 	      *pend = end = upd;
@@ -1224,7 +1240,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	break;
 
       /* Indicate the gap in the trace.  */
-      *pend = end = ftrace_new_gap (end, errcode);
+      *pend = end = ftrace_new_gap (btinfo, end, errcode);
       if (begin == NULL)
 	*pbegin = begin = end;
 
@@ -1348,14 +1364,15 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
 	error (_("Failed to configure the Intel Processor Trace decoder: "
 		 "%s."), pt_errstr (pt_errcode (errcode)));
 
-      ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
+      ftrace_add_pt (btinfo, decoder, &btinfo->begin, &btinfo->end, &level,
+		     gaps);
     }
   CATCH (error, RETURN_MASK_ALL)
     {
       /* Indicate a gap in the trace if we quit trace processing.  */
       if (error.reason == RETURN_QUIT && btinfo->end != NULL)
 	{
-	  btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
+	  btinfo->end = ftrace_new_gap (btinfo, btinfo->end, BDE_PT_USER_QUIT);
 
 	  VEC_safe_push (bfun_s, *gaps, btinfo->end);
 	}
@@ -1850,19 +1867,13 @@ btrace_fetch (struct thread_info *tp)
   /* Compute the trace, provided we have any.  */
   if (!btrace_data_empty (&btrace))
     {
-      struct btrace_function *bfun;
-
       /* Store the raw trace data.  The stored data will be cleared in
 	 btrace_clear, so we always append the new trace.  */
       btrace_data_append (&btinfo->data, &btrace);
       btrace_maint_clear (btinfo);
 
-      btinfo->functions.clear ();
       btrace_clear_history (btinfo);
       btrace_compute_ftrace (tp, &btrace);
-
-      for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
-	btinfo->functions.push_back (bfun);
     }
 
   do_cleanups (cleanup);
@@ -1874,7 +1885,6 @@ void
 btrace_clear (struct thread_info *tp)
 {
   struct btrace_thread_info *btinfo;
-  struct btrace_function *it, *trash;
 
   DEBUG ("clear thread %s (%s)", print_thread_id (tp),
 	 target_pid_to_str (tp->ptid));
@@ -1884,18 +1894,13 @@ btrace_clear (struct thread_info *tp)
   reinit_frame_cache ();
 
   btinfo = &tp->btrace;
-  btinfo->functions.clear ();
-
-  it = btinfo->begin;
-  while (it != NULL)
+  for (auto &bfun : btinfo->functions)
     {
-      trash = it;
-      it = it->flow.next;
-
-      VEC_free (btrace_insn_s, trash->insn);
-      xfree (trash);
+      VEC_free (btrace_insn_s, bfun->insn);
+      xfree (bfun);
     }
 
+  btinfo->functions.clear ();
   btinfo->begin = NULL;
   btinfo->end = NULL;
   btinfo->ngaps = 0;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 10/12] btrace: Replace struct btrace_thread_info::segment.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
  2017-05-09  7:01 ` [PATCH v3 03/12] btrace: Add btinfo to instruction interator Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-10  4:14   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 08/12] btrace: Replace struct btrace_thread_info::up Tim Wiederhake
                   ` (9 subsequent siblings)
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This used to hold a pair of pointers to the previous and next function segment
that belong to this function call.  Replace with a pair of indices into the
vector of function segments.

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (ftrace_fixup_caller, ftrace_new_return, ftrace_connect_bfun,
	ftrace_bridge_gap): Replace references to btrace_thread_info::segment
	with btrace_thread_info::next_segment and
	btrace_thread_info::prev_segment.
	* btrace.h: Remove struct btrace_func_link.
	(struct btrace_function): Replace pair of function segment pointers
	with pair of indices.
	* python/py-record-btrace.c (btpy_call_prev_sibling,
	btpy_call_next_sibling): Replace references to
	btrace_thread_info::segment with btrace_thread_info::next_segment and
	btrace_thread_info::prev_segment.
	* record-btrace.c (record_btrace_frame_this_id): Same.

---
 gdb/btrace.c                  | 47 ++++++++++++++++++++++++++-----------------
 gdb/btrace.h                  | 17 ++++++----------
 gdb/python/py-record-btrace.c |  8 ++++----
 gdb/record-btrace.c           |  4 ++--
 4 files changed, 40 insertions(+), 36 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index f57bbf9..921cb64 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -271,20 +271,29 @@ ftrace_update_caller (struct btrace_function *bfun,
 /* Fix up the caller for all segments of a function.  */
 
 static void
-ftrace_fixup_caller (struct btrace_function *bfun,
+ftrace_fixup_caller (struct btrace_thread_info *btinfo,
+		     struct btrace_function *bfun,
 		     struct btrace_function *caller,
 		     enum btrace_function_flag flags)
 {
-  struct btrace_function *prev, *next;
+  unsigned int prev, next;
 
+  prev = bfun->prev;
+  next = bfun->next;
   ftrace_update_caller (bfun, caller, flags);
 
   /* Update all function segments belonging to the same function.  */
-  for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
-    ftrace_update_caller (prev, caller, flags);
+  for (; prev != 0; prev = bfun->prev)
+    {
+      bfun = ftrace_find_call_by_number (btinfo, prev);
+      ftrace_update_caller (bfun, caller, flags);
+    }
 
-  for (next = bfun->segment.next; next != NULL; next = next->segment.next)
-    ftrace_update_caller (next, caller, flags);
+  for (; next != 0; next = bfun->next)
+    {
+      bfun = ftrace_find_call_by_number (btinfo, next);
+      ftrace_update_caller (bfun, caller, flags);
+    }
 }
 
 /* Add a new function segment for a call at the end of the trace.
@@ -414,10 +423,10 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
     {
       /* The caller of PREV is the preceding btrace function segment in this
 	 function instance.  */
-      gdb_assert (caller->segment.next == NULL);
+      gdb_assert (caller->next == 0);
 
-      caller->segment.next = bfun;
-      bfun->segment.prev = caller;
+      caller->next = bfun->number;
+      bfun->prev = caller->number;
 
       /* Maintain the function level.  */
       bfun->level = caller->level;
@@ -449,7 +458,7 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 	  bfun->level = prev->level - 1;
 
 	  /* Fix up the call stack for PREV.  */
-	  ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
+	  ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
 
 	  ftrace_debug (bfun, "new return - no caller");
 	}
@@ -756,11 +765,11 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
   ftrace_debug (next, "..next");
 
   /* The function segments are not yet connected.  */
-  gdb_assert (prev->segment.next == NULL);
-  gdb_assert (next->segment.prev == NULL);
+  gdb_assert (prev->next == 0);
+  gdb_assert (next->prev == 0);
 
-  prev->segment.next = next;
-  next->segment.prev = prev;
+  prev->next = next->number;
+  next->prev = prev->number;
 
   /* We may have moved NEXT to a different function level.  */
   ftrace_fixup_level (btinfo, next, prev->level - next->level);
@@ -774,7 +783,7 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
       if (next != NULL)
 	{
 	  DEBUG_FTRACE ("using next's callers");
-	  ftrace_fixup_caller (prev, next, flags);
+	  ftrace_fixup_caller (btinfo, prev, next, flags);
 	}
     }
   else if (next->up == 0)
@@ -785,7 +794,7 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
       if (prev != NULL)
 	{
 	  DEBUG_FTRACE ("using prev's callers");
-	  ftrace_fixup_caller (next, prev, flags);
+	  ftrace_fixup_caller (btinfo, next, prev, flags);
 	}
     }
   else
@@ -813,7 +822,7 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
 	  DEBUG_FTRACE ("adding prev's tail calls to next");
 
 	  prev = ftrace_find_call_by_number (btinfo, prev->up);
-	  ftrace_fixup_caller (next, prev, prev_flags);
+	  ftrace_fixup_caller (btinfo, next, prev, prev_flags);
 
 	  for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
 								  prev->up))
@@ -825,7 +834,7 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
 		  ftrace_debug (prev, "..top");
 		  ftrace_debug (caller, "..up");
 
-		  ftrace_fixup_caller (prev, caller, next_flags);
+		  ftrace_fixup_caller (btinfo, prev, caller, next_flags);
 
 		  /* If we skipped any tail calls, this may move CALLER to a
 		     different function level.
@@ -948,7 +957,7 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
 static void
 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 {
-  struct btrace_thread_info *btinfo;
+  struct btrace_thread_info *btinfo = &tp->btrace;
   VEC (bfun_s) *remaining;
   struct cleanup *old_chain;
   int min_matches;
diff --git a/gdb/btrace.h b/gdb/btrace.h
index b1940bb..0acd2fb 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -85,13 +85,6 @@ struct btrace_insn
 typedef struct btrace_insn btrace_insn_s;
 DEF_VEC_O (btrace_insn_s);
 
-/* A doubly-linked list of branch trace function segments.  */
-struct btrace_func_link
-{
-  struct btrace_function *prev;
-  struct btrace_function *next;
-};
-
 /* Flags for btrace function segments.  */
 enum btrace_function_flag
 {
@@ -146,10 +139,12 @@ struct btrace_function
   struct minimal_symbol *msym;
   struct symbol *sym;
 
-  /* The previous and next segment belonging to the same function.
-     If a function calls another function, the former will have at least
-     two segments: one before the call and another after the return.  */
-  struct btrace_func_link segment;
+  /* The function segment numbers of the previous and next segment belonging to
+     the same function.  If a function calls another function, the former will
+     have at least two segments: one before the call and another after the
+     return.  Will be zero if there is no such function segment.  */
+  unsigned int prev;
+  unsigned int next;
 
   /* The function segment number of the directly preceding function segment in
      a (fake) call stack.  Will be zero if there is no such function segment in
diff --git a/gdb/python/py-record-btrace.c b/gdb/python/py-record-btrace.c
index 9dd2199..cd2be9f 100644
--- a/gdb/python/py-record-btrace.c
+++ b/gdb/python/py-record-btrace.c
@@ -416,11 +416,11 @@ recpy_bt_func_prev (PyObject *self, void *closure)
   if (func == NULL)
     return NULL;
 
-  if (func->segment.prev == NULL)
+  if (func->prev == 0)
     Py_RETURN_NONE;
 
   return recpy_func_new (((recpy_element_object *) self)->ptid,
-			 RECORD_METHOD_BTRACE, func->segment.prev->number);
+			 RECORD_METHOD_BTRACE, func->prev);
 }
 
 /* Implementation of RecordFunctionSegment.next [RecordFunctionSegment] for
@@ -434,11 +434,11 @@ recpy_bt_func_next (PyObject *self, void *closure)
   if (func == NULL)
     return NULL;
 
-  if (func->segment.next == NULL)
+  if (func->next == 0)
     Py_RETURN_NONE;
 
   return recpy_func_new (((recpy_element_object *) self)->ptid,
-			 RECORD_METHOD_BTRACE, func->segment.next->number);
+			 RECORD_METHOD_BTRACE, func->next);
 }
 
 /* Implementation of BtraceList.__len__ (self) -> int.  */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index a27521c..a66d32a 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1591,8 +1591,8 @@ record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
   bfun = cache->bfun;
   gdb_assert (bfun != NULL);
 
-  while (bfun->segment.prev != NULL)
-    bfun = bfun->segment.prev;
+  while (bfun->prev != 0)
+    bfun = cache->tp->btrace.functions[bfun->prev - 1];
 
   code = get_frame_func (this_frame);
   special = bfun->number;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 03/12] btrace: Add btinfo to instruction interator.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-09  7:01 ` [PATCH v3 10/12] btrace: Replace struct btrace_thread_info::segment Tim Wiederhake
                   ` (10 subsequent siblings)
  11 siblings, 0 replies; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This will serve as the access path to the vector of function segments once
the FUNCTION pointer in struct btrace_insn_iterator is removed.

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (btrace_insn_begin, btrace_insn_end,
	btrace_find_insn_by_number): Add btinfo to iterator.
	* btrace.h (struct btrace_insn_iterator): Add btinfo.

---
 gdb/btrace.c | 3 +++
 gdb/btrace.h | 3 +++
 2 files changed, 6 insertions(+)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 57788ac..c2d3730 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -2291,6 +2291,7 @@ btrace_insn_begin (struct btrace_insn_iterator *it,
   if (bfun == NULL)
     error (_("No trace."));
 
+  it->btinfo = btinfo;
   it->function = bfun;
   it->index = 0;
 }
@@ -2316,6 +2317,7 @@ btrace_insn_end (struct btrace_insn_iterator *it,
   if (length > 0)
     length -= 1;
 
+  it->btinfo = btinfo;
   it->function = bfun;
   it->index = length;
 }
@@ -2519,6 +2521,7 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
       break;
     }
 
+  it->btinfo = btinfo;
   it->function = bfun;
   it->index = number - bfun->insn_offset;
   return 1;
diff --git a/gdb/btrace.h b/gdb/btrace.h
index ab739ec..e567ef7 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -192,6 +192,9 @@ struct btrace_function
 /* A branch trace instruction iterator.  */
 struct btrace_insn_iterator
 {
+  /* The branch trace information for this thread.  Will never be NULL.  */
+  const struct btrace_thread_info *btinfo;
+
   /* The branch trace function segment containing the instruction.
      Will never be NULL.  */
   const struct btrace_function *function;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 01/12] btrace: Use std::vector in struct btrace_thread_information.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (7 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 12/12] btrace: Store function segments as objects Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-09 12:10   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 02/12] btrace: Transfer ownership of pointers Tim Wiederhake
                   ` (2 subsequent siblings)
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:
	* btrace.c (btrace_fetch, btrace_clear, btrace_find_insn_by_number):
	Replace VEC_* with std::vector functions.
	* btrace.h: Add include: vector. Remove typedef for DEF_VEC_P.
	(struct btrace_thread_info)<functions>: Change type to std::vector.

---
 gdb/btrace.c | 17 ++++++++---------
 gdb/btrace.h |  7 +++----
 2 files changed, 11 insertions(+), 13 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 20c977a..46a4d8d 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -1857,12 +1857,12 @@ btrace_fetch (struct thread_info *tp)
       btrace_data_append (&btinfo->data, &btrace);
       btrace_maint_clear (btinfo);
 
-      VEC_truncate (btrace_fun_p, btinfo->functions, 0);
+      btinfo->functions.clear ();
       btrace_clear_history (btinfo);
       btrace_compute_ftrace (tp, &btrace);
 
       for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
-	VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
+	btinfo->functions.push_back (bfun);
     }
 
   do_cleanups (cleanup);
@@ -1884,8 +1884,7 @@ btrace_clear (struct thread_info *tp)
   reinit_frame_cache ();
 
   btinfo = &tp->btrace;
-
-  VEC_free (btrace_fun_p, btinfo->functions);
+  btinfo->functions.clear ();
 
   it = btinfo->begin;
   while (it != NULL)
@@ -2480,16 +2479,16 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
   const struct btrace_function *bfun;
   unsigned int upper, lower;
 
-  if (VEC_empty (btrace_fun_p, btinfo->functions))
+  if (btinfo->functions.empty ())
       return 0;
 
   lower = 0;
-  bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
+  bfun = btinfo->functions[lower];
   if (number < bfun->insn_offset)
     return 0;
 
-  upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
-  bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
+  upper = btinfo->functions.size () - 1;
+  bfun = btinfo->functions[upper];
   if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
     return 0;
 
@@ -2498,7 +2497,7 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
     {
       const unsigned int average = lower + (upper - lower) / 2;
 
-      bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
+      bfun = btinfo->functions[average];
 
       if (number < bfun->insn_offset)
 	{
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 07ed10c..ab739ec 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -34,6 +34,8 @@
 #  include <intel-pt.h>
 #endif
 
+#include <vector>
+
 struct thread_info;
 struct btrace_function;
 
@@ -187,9 +189,6 @@ struct btrace_function
   btrace_function_flags flags;
 };
 
-typedef struct btrace_function *btrace_fun_p;
-DEF_VEC_P (btrace_fun_p);
-
 /* A branch trace instruction iterator.  */
 struct btrace_insn_iterator
 {
@@ -342,7 +341,7 @@ struct btrace_thread_info
 
   /* Vector of pointer to decoded function segments.  These are in execution
      order with the first element == BEGIN and the last element == END.  */
-  VEC (btrace_fun_p) *functions;
+  std::vector<btrace_function *> functions;
 
   /* The function level offset.  When added to each function's LEVEL,
      this normalizes the function levels such that the smallest level
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 04/12] btrace: Use function segment index in call iterator.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (4 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 05/12] btrace: Use function segment index in insn iterator Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-09 12:50   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 07/12] btrace: Remove struct btrace_thread_info::{begin,end} Tim Wiederhake
                   ` (5 subsequent siblings)
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

Remove FUNCTION pointer in struct btrace_call_iterator and use an index into
the list of function segments instead.

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (btrace_ends_with_single_insn): New function.
	(btrace_call_get, btrace_call_number, btrace_call_begin,
	btrace_call_end, btrace_call_next, btrace_call_prev,
	btrace_find_call_by_number): Use index into call segment vector
	instead of pointer.
	(btrace_call_cmp): Simplify.
	* btrace.h (struct btrace_call_iterator): Replace function call segment
	pointer with index into vector.
	* record-btrace.c (record_btrace_call_history): Use index instead of
	pointer.

---
 gdb/btrace.c        | 198 ++++++++++++++++++++++------------------------------
 gdb/btrace.h        |   5 +-
 gdb/record-btrace.c |   2 +-
 3 files changed, 87 insertions(+), 118 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index c2d3730..5615aad 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -2527,12 +2527,33 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
   return 1;
 }
 
+/* Returns a non-zero value if the recording ends with a function segment that
+   contains only a single (i.e. the current) instruction.  */
+
+static int
+btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
+{
+  const btrace_function *bfun;
+
+  if (btinfo->functions.empty ())
+    return 0;
+
+  bfun = btinfo->functions.back ();
+  if (bfun->errcode != 0)
+    return 0;
+
+  return ftrace_call_num_insn (bfun) == 1;
+}
+
 /* See btrace.h.  */
 
 const struct btrace_function *
 btrace_call_get (const struct btrace_call_iterator *it)
 {
-  return it->function;
+  if (it->index >= it->btinfo->functions.size ())
+    return NULL;
+
+  return it->btinfo->functions[it->index];
 }
 
 /* See btrace.h.  */
@@ -2540,28 +2561,14 @@ btrace_call_get (const struct btrace_call_iterator *it)
 unsigned int
 btrace_call_number (const struct btrace_call_iterator *it)
 {
-  const struct btrace_thread_info *btinfo;
-  const struct btrace_function *bfun;
-  unsigned int insns;
+  const unsigned int length = it->btinfo->functions.size ();
 
-  btinfo = it->btinfo;
-  bfun = it->function;
-  if (bfun != NULL)
-    return bfun->number;
-
-  /* For the end iterator, i.e. bfun == NULL, we return one more than the
-     number of the last function.  */
-  bfun = btinfo->end;
-  insns = VEC_length (btrace_insn_s, bfun->insn);
+  /* If the last function segment contains only a single instruction (i.e. the
+     current instruction), skip it.  */
+  if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
+    return length;
 
-  /* If the function contains only a single instruction (i.e. the current
-     instruction), it will be skipped and its number is already the number
-     we seek.  */
-  if (insns == 1)
-    return bfun->number;
-
-  /* Otherwise, return one more than the number of the last function.  */
-  return bfun->number + 1;
+  return it->index + 1;
 }
 
 /* See btrace.h.  */
@@ -2570,14 +2577,11 @@ void
 btrace_call_begin (struct btrace_call_iterator *it,
 		   const struct btrace_thread_info *btinfo)
 {
-  const struct btrace_function *bfun;
-
-  bfun = btinfo->begin;
-  if (bfun == NULL)
+  if (btinfo->functions.empty ())
     error (_("No trace."));
 
   it->btinfo = btinfo;
-  it->function = bfun;
+  it->index = 0;
 }
 
 /* See btrace.h.  */
@@ -2586,14 +2590,11 @@ void
 btrace_call_end (struct btrace_call_iterator *it,
 		 const struct btrace_thread_info *btinfo)
 {
-  const struct btrace_function *bfun;
-
-  bfun = btinfo->end;
-  if (bfun == NULL)
+  if (btinfo->functions.empty ())
     error (_("No trace."));
 
   it->btinfo = btinfo;
-  it->function = NULL;
+  it->index = btinfo->functions.size ();
 }
 
 /* See btrace.h.  */
@@ -2601,35 +2602,35 @@ btrace_call_end (struct btrace_call_iterator *it,
 unsigned int
 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
 {
-  const struct btrace_function *bfun;
-  unsigned int steps;
+  const unsigned int length = it->btinfo->functions.size ();
 
-  bfun = it->function;
-  steps = 0;
-  while (bfun != NULL)
+  if (it->index + stride < length - 1)
+    /* Default case: Simply advance the iterator.  */
+    it->index += stride;
+  else if (it->index + stride == length - 1)
     {
-      const struct btrace_function *next;
-      unsigned int insns;
-
-      next = bfun->flow.next;
-      if (next == NULL)
-	{
-	  /* Ignore the last function if it only contains a single
-	     (i.e. the current) instruction.  */
-	  insns = VEC_length (btrace_insn_s, bfun->insn);
-	  if (insns == 1)
-	    steps -= 1;
-	}
-
-      if (stride == steps)
-	break;
+      /* We land exactly at the last function segment.  If it contains only one
+	 instruction (i.e. the current instruction) it is not actually part of
+	 the trace.  */
+      if (btrace_ends_with_single_insn (it->btinfo))
+	it->index = length;
+      else
+	it->index = length - 1;
+    }
+  else
+    {
+      /* We land past the last function segment and have to adjust the stride.
+	 If the last function segment contains only one instruction (i.e. the
+	 current instruction) it is not actually part of the trace.  */
+      if (btrace_ends_with_single_insn (it->btinfo))
+	stride = length - it->index - 1;
+      else
+	stride = length - it->index;
 
-      bfun = next;
-      steps += 1;
+      it->index = length;
     }
 
-  it->function = bfun;
-  return steps;
+  return stride;
 }
 
 /* See btrace.h.  */
@@ -2637,48 +2638,33 @@ btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
 unsigned int
 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
 {
-  const struct btrace_thread_info *btinfo;
-  const struct btrace_function *bfun;
-  unsigned int steps;
-
-  bfun = it->function;
-  steps = 0;
+  const unsigned int length = it->btinfo->functions.size ();
+  int steps = 0;
 
-  if (bfun == NULL)
-    {
-      unsigned int insns;
-
-      btinfo = it->btinfo;
-      bfun = btinfo->end;
-      if (bfun == NULL)
-	return 0;
-
-      /* Ignore the last function if it only contains a single
-	 (i.e. the current) instruction.  */
-      insns = VEC_length (btrace_insn_s, bfun->insn);
-      if (insns == 1)
-	bfun = bfun->flow.prev;
+  gdb_assert (it->index <= length);
 
-      if (bfun == NULL)
-	return 0;
-
-      steps += 1;
-    }
+  if (stride == 0 || it->index == 0)
+    return 0;
 
-  while (steps < stride)
+  /* If we are at the end, the first step is a special case.  If the last
+     function segment contains only one instruction (i.e. the current
+     instruction) it is not actually part of the trace.  To be able to step
+     over this instruction, we need at least one more function segment.  */
+  if ((it->index == length)  && (length > 1))
     {
-      const struct btrace_function *prev;
-
-      prev = bfun->flow.prev;
-      if (prev == NULL)
-	break;
+      if (btrace_ends_with_single_insn (it->btinfo))
+	it->index = length - 2;
+      else
+	it->index = length - 1;
 
-      bfun = prev;
-      steps += 1;
+      steps = 1;
+      stride -= 1;
     }
 
-  it->function = bfun;
-  return steps;
+  stride = std::min (stride, it->index);
+
+  it->index -= stride;
+  return steps + stride;
 }
 
 /* See btrace.h.  */
@@ -2687,12 +2673,8 @@ int
 btrace_call_cmp (const struct btrace_call_iterator *lhs,
 		 const struct btrace_call_iterator *rhs)
 {
-  unsigned int lnum, rnum;
-
-  lnum = btrace_call_number (lhs);
-  rnum = btrace_call_number (rhs);
-
-  return (int) (lnum - rnum);
+  gdb_assert (lhs->btinfo == rhs->btinfo);
+  return (int) (lhs->index - rhs->index);
 }
 
 /* See btrace.h.  */
@@ -2702,26 +2684,14 @@ btrace_find_call_by_number (struct btrace_call_iterator *it,
 			    const struct btrace_thread_info *btinfo,
 			    unsigned int number)
 {
-  const struct btrace_function *bfun;
+  const unsigned int length = btinfo->functions.size ();
 
-  for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
-    {
-      unsigned int bnum;
-
-      bnum = bfun->number;
-      if (number == bnum)
-	{
-	  it->btinfo = btinfo;
-	  it->function = bfun;
-	  return 1;
-	}
-
-      /* Functions are ordered and numbered consecutively.  We could bail out
-	 earlier.  On the other hand, it is very unlikely that we search for
-	 a nonexistent function.  */
-  }
+  if ((number == 0) || (number > length))
+    return 0;
 
-  return 0;
+  it->btinfo = btinfo;
+  it->index = number - 1;
+  return 1;
 }
 
 /* See btrace.h.  */
diff --git a/gdb/btrace.h b/gdb/btrace.h
index e567ef7..ef2c781 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -209,9 +209,8 @@ struct btrace_call_iterator
   /* The branch trace information for this thread.  Will never be NULL.  */
   const struct btrace_thread_info *btinfo;
 
-  /* The branch trace function segment.
-     This will be NULL for the iterator pointing to the end of the trace.  */
-  const struct btrace_function *function;
+  /* The index of the function segment in BTINFO->FUNCTIONS.  */
+  unsigned int index;
 };
 
 /* Branch trace iteration state for "record instruction-history".  */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index d4f1bcf..86a4b1e 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1101,8 +1101,8 @@ record_btrace_call_history (struct target_ops *self, int size, int int_flags)
       replay = btinfo->replay;
       if (replay != NULL)
 	{
-	  begin.function = replay->function;
 	  begin.btinfo = btinfo;
+	  begin.index = replay->function->number - 1;
 	}
       else
 	btrace_call_end (&begin, btinfo);
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector
@ 2017-05-09  7:01 Tim Wiederhake
  2017-05-09  7:01 ` [PATCH v3 03/12] btrace: Add btinfo to instruction interator Tim Wiederhake
                   ` (11 more replies)
  0 siblings, 12 replies; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

Hi all,

this series removes the extra list of btrace function call segments in struct
btrace_thread_info.  To achieve this, the doubly linked list of function call
segments in struct btrace_thread_info is replaced by a std::vector.

V1 of this series can be found here:
https://sourceware.org/ml/gdb-patches/2017-02/msg00482.html

V2 of this series can be found here:
https://sourceware.org/ml/gdb-patches/2017-05/msg00168.html

Changes since V2:
V2 used a GDB vector to hold the function call segments.

Regards,
Tim


Tim Wiederhake (12):
  btrace: Use std::vector in struct btrace_thread_information.
  btrace: Transfer ownership of pointers.
  btrace: Add btinfo to instruction interator.
  btrace: Use function segment index in call iterator.
  btrace: Use function segment index in insn iterator.
  btrace: Remove constant arguments.
  btrace: Remove struct btrace_thread_info::{begin,end}.
  btrace: Replace struct btrace_thread_info::up.
  btrace: Remove struct btrace_thread_info::flow.
  btrace: Replace struct btrace_thread_info::segment.
  btrace: Remove bfun_s vector.
  btrace: Store function segments as objects.

 gdb/btrace.c                  | 860 +++++++++++++++++++++---------------------
 gdb/btrace.h                  |  62 ++-
 gdb/python/py-record-btrace.c |  12 +-
 gdb/record-btrace.c           |  30 +-
 4 files changed, 474 insertions(+), 490 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 11/12] btrace: Remove bfun_s vector.
  2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (2 preceding siblings ...)
  2017-05-09  7:01 ` [PATCH v3 08/12] btrace: Replace struct btrace_thread_info::up Tim Wiederhake
@ 2017-05-09  7:01 ` Tim Wiederhake
  2017-05-10  4:27   ` Simon Marchi
  2017-05-09  7:01 ` [PATCH v3 05/12] btrace: Use function segment index in insn iterator Tim Wiederhake
                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 36+ messages in thread
From: Tim Wiederhake @ 2017-05-09  7:01 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c: Remove typedef bfun_s.
	(ftrace_new_gap): Directly add gaps to the list of gaps.
	(btrace_bridge_gaps, btrace_compute_ftrace_bts, pt_btrace_insn_flags,
	ftrace_add_pt, btrace_compute_ftrace_pt, btrace_compute_ftrace_1,
	btrace_finalize_ftrace, btrace_compute_ftrace): Use std::vector
	instead of gdb VEC.

---
 gdb/btrace.c | 107 ++++++++++++++++++++---------------------------------------
 1 file changed, 36 insertions(+), 71 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 921cb64..7b82000 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -49,10 +49,6 @@ static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
 /* Control whether to skip PAD packets when computing the packet history.  */
 static int maint_btrace_pt_skip_pad = 1;
 
-/* A vector of function segments.  */
-typedef struct btrace_function * bfun_s;
-DEF_VEC_P (bfun_s);
-
 static void btrace_add_pc (struct thread_info *tp);
 
 /* Print a record debug message.  Use do ... while (0) to avoid ambiguities
@@ -512,7 +508,8 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
    ERRCODE is the format-specific error code.  */
 
 static struct btrace_function *
-ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
+ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
+		std::vector<unsigned int> &gaps)
 {
   struct btrace_function *bfun;
 
@@ -527,15 +524,15 @@ ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
     }
 
   bfun->errcode = errcode;
+  gaps.push_back (bfun->number);
 
   ftrace_debug (bfun, "new gap");
 
   return bfun;
 }
 
-/* Update BFUN with respect to the instruction at PC.  BTINFO is the branch
-   trace information for the current thread.  This may create new function
-   segments.
+/* Update the current function segment at the end of the trace in BTINFO with
+   respect to the instruction at PC.  This may create new function segments.
    Return the chronologically latest function segment, never NULL.  */
 
 static struct btrace_function *
@@ -955,19 +952,14 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
    function segments that are separated by the gap.  */
 
 static void
-btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
+btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
 {
   struct btrace_thread_info *btinfo = &tp->btrace;
-  VEC (bfun_s) *remaining;
-  struct cleanup *old_chain;
+  std::vector<unsigned int> remaining;
   int min_matches;
 
   DEBUG ("bridge gaps");
 
-  btinfo = &tp->btrace;
-  remaining = NULL;
-  old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
-
   /* We require a minimum amount of matches for bridging a gap.  The number of
      required matches will be lowered with each iteration.
 
@@ -978,16 +970,15 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
     {
       /* Let's try to bridge as many gaps as we can.  In some cases, we need to
 	 skip a gap and revisit it again after we closed later gaps.  */
-      while (!VEC_empty (bfun_s, *gaps))
+      while (!gaps.empty ())
 	{
-	  struct btrace_function *gap;
-	  unsigned int idx;
-
-	  for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
+	  for (const auto& number : gaps)
 	    {
-	      struct btrace_function *lhs, *rhs;
+	      struct btrace_function *gap, *lhs, *rhs;
 	      int bridged;
 
+	      gap = ftrace_find_call_by_number (btinfo, number);
+
 	      /* We may have a sequence of gaps if we run from one error into
 		 the next as we try to re-sync onto the trace stream.  Ignore
 		 all but the leftmost gap in such a sequence.
@@ -1012,28 +1003,24 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 		 If we just pushed them to the end of GAPS we would risk an
 		 infinite loop in case we simply cannot bridge a gap.  */
 	      if (bridged == 0)
-		VEC_safe_push (bfun_s, remaining, gap);
+		remaining.push_back (number);
 	    }
 
 	  /* Let's see if we made any progress.  */
-	  if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
+	  if (remaining.size () == gaps.size ())
 	    break;
 
-	  VEC_free (bfun_s, *gaps);
-
-	  *gaps = remaining;
-	  remaining = NULL;
+	  gaps.clear ();
+	  gaps.swap (remaining);
 	}
 
       /* We get here if either GAPS is empty or if GAPS equals REMAINING.  */
-      if (VEC_empty (bfun_s, *gaps))
+      if (gaps.empty ())
 	break;
 
-      VEC_free (bfun_s, remaining);
+      remaining.clear ();
     }
 
-  do_cleanups (old_chain);
-
   /* We may omit this in some cases.  Not sure it is worth the extra
      complication, though.  */
   ftrace_compute_global_level_offset (btinfo);
@@ -1044,7 +1031,7 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 static void
 btrace_compute_ftrace_bts (struct thread_info *tp,
 			   const struct btrace_data_bts *btrace,
-			   VEC (bfun_s) **gaps)
+			   std::vector<unsigned int> &gaps)
 {
   struct btrace_thread_info *btinfo;
   struct gdbarch *gdbarch;
@@ -1080,9 +1067,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  if (block->end < pc)
 	    {
 	      /* Indicate the gap in the trace.  */
-	      bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
-
-	      VEC_safe_push (bfun_s, *gaps, bfun);
+	      bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
 
 	      warning (_("Recorded trace may be corrupted at instruction "
 			 "%u (pc = %s)."), bfun->insn_offset - 1,
@@ -1124,9 +1109,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	    {
 	      /* Indicate the gap in the trace.  We just added INSN so we're
 		 not at the beginning.  */
-	      bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
-
-	      VEC_safe_push (bfun_s, *gaps, bfun);
+	      bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
 
 	      warning (_("Recorded trace may be incomplete at instruction %u "
 			 "(pc = %s)."), bfun->insn_offset - 1,
@@ -1205,7 +1188,7 @@ static void
 ftrace_add_pt (struct btrace_thread_info *btinfo,
 	       struct pt_insn_decoder *decoder,
 	       int *plevel,
-	       VEC (bfun_s) **gaps)
+	       std::vector<unsigned int> &gaps)
 {
   struct btrace_function *bfun;
   uint64_t offset;
@@ -1240,9 +1223,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 		 from some other instruction.  Indicate this as a trace gap.  */
 	      if (insn.enabled)
 		{
-		  bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
-
-		  VEC_safe_push (bfun_s, *gaps, bfun);
+		  bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
 
 		  pt_insn_get_offset (decoder, &offset);
 
@@ -1255,9 +1236,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 	  /* Indicate trace overflows.  */
 	  if (insn.resynced)
 	    {
-	      bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
-
-	      VEC_safe_push (bfun_s, *gaps, bfun);
+	      bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
 
 	      pt_insn_get_offset (decoder, &offset);
 
@@ -1279,9 +1258,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
 	break;
 
       /* Indicate the gap in the trace.  */
-      bfun = ftrace_new_gap (btinfo, errcode);
-
-      VEC_safe_push (bfun_s, *gaps, bfun);
+      bfun = ftrace_new_gap (btinfo, errcode, gaps);
 
       pt_insn_get_offset (decoder, &offset);
 
@@ -1357,7 +1334,7 @@ static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
 static void
 btrace_compute_ftrace_pt (struct thread_info *tp,
 			  const struct btrace_data_pt *btrace,
-			  VEC (bfun_s) **gaps)
+			  std::vector<unsigned int> &gaps)
 {
   struct btrace_thread_info *btinfo;
   struct pt_insn_decoder *decoder;
@@ -1410,13 +1387,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
     {
       /* Indicate a gap in the trace if we quit trace processing.  */
       if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
-	{
-	  struct btrace_function *bfun;
-
-	  bfun = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
-
-	  VEC_safe_push (bfun_s, *gaps, bfun);
-	}
+	ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
 
       btrace_finalize_ftrace_pt (decoder, tp, level);
 
@@ -1432,7 +1403,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
 static void
 btrace_compute_ftrace_pt (struct thread_info *tp,
 			  const struct btrace_data_pt *btrace,
-			  VEC (bfun_s) **gaps)
+			  std::vector<unsigned int> &gaps)
 {
   internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
 }
@@ -1444,7 +1415,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
 
 static void
 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
-			 VEC (bfun_s) **gaps)
+			 std::vector<unsigned int> &gaps)
 {
   DEBUG ("compute ftrace");
 
@@ -1466,11 +1437,11 @@ btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
 }
 
 static void
-btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
+btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
 {
-  if (!VEC_empty (bfun_s, *gaps))
+  if (!gaps.empty ())
     {
-      tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
+      tp->btrace.ngaps += gaps.size ();
       btrace_bridge_gaps (tp, gaps);
     }
 }
@@ -1478,27 +1449,21 @@ btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
 static void
 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
 {
-  VEC (bfun_s) *gaps;
-  struct cleanup *old_chain;
-
-  gaps = NULL;
-  old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
+  std::vector<unsigned int> gaps;
 
   TRY
     {
-      btrace_compute_ftrace_1 (tp, btrace, &gaps);
+      btrace_compute_ftrace_1 (tp, btrace, gaps);
     }
   CATCH (error, RETURN_MASK_ALL)
     {
-      btrace_finalize_ftrace (tp, &gaps);
+      btrace_finalize_ftrace (tp, gaps);
 
       throw_exception (error);
     }
   END_CATCH
 
-  btrace_finalize_ftrace (tp, &gaps);
-
-  do_cleanups (old_chain);
+  btrace_finalize_ftrace (tp, gaps);
 }
 
 /* Add an entry for the current PC.  */
-- 
2.7.4

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 01/12] btrace: Use std::vector in struct  btrace_thread_information.
  2017-05-09  7:01 ` [PATCH v3 01/12] btrace: Use std::vector in struct btrace_thread_information Tim Wiederhake
@ 2017-05-09 12:10   ` Simon Marchi
  0 siblings, 0 replies; 36+ messages in thread
From: Simon Marchi @ 2017-05-09 12:10 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 	* btrace.c (btrace_fetch, btrace_clear, btrace_find_insn_by_number):
> 	Replace VEC_* with std::vector functions.
> 	* btrace.h: Add include: vector. Remove typedef for DEF_VEC_P.
> 	(struct btrace_thread_info)<functions>: Change type to std::vector.

LGTM.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 02/12] btrace: Transfer ownership of pointers.
  2017-05-09  7:01 ` [PATCH v3 02/12] btrace: Transfer ownership of pointers Tim Wiederhake
@ 2017-05-09 12:21   ` Simon Marchi
  0 siblings, 0 replies; 36+ messages in thread
From: Simon Marchi @ 2017-05-09 12:21 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> Directly insert new btrace_function pointers into the vector and have 
> the
> vector own these pointers.  This allows us to later retrieve these 
> objects by
> their number directly after creation whereas at the moment we have to 
> wait
> until the vector is fully populated.
> 
> This requires to pull btrace_thread_info through different functions 
> but
> cleans up the code for freeing the trace.

LGTM.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 04/12] btrace: Use function segment index in call  iterator.
  2017-05-09  7:01 ` [PATCH v3 04/12] btrace: Use function segment index in call iterator Tim Wiederhake
@ 2017-05-09 12:50   ` Simon Marchi
  2017-05-09 13:14     ` Wiederhake, Tim
  0 siblings, 1 reply; 36+ messages in thread
From: Simon Marchi @ 2017-05-09 12:50 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> Remove FUNCTION pointer in struct btrace_call_iterator and use an index 
> into
> the list of function segments instead.
> 
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 
> 	* btrace.c (btrace_ends_with_single_insn): New function.
> 	(btrace_call_get, btrace_call_number, btrace_call_begin,
> 	btrace_call_end, btrace_call_next, btrace_call_prev,
> 	btrace_find_call_by_number): Use index into call segment vector
> 	instead of pointer.
> 	(btrace_call_cmp): Simplify.
> 	* btrace.h (struct btrace_call_iterator): Replace function call 
> segment
> 	pointer with index into vector.
> 	* record-btrace.c (record_btrace_call_history): Use index instead of
> 	pointer.
> 
> ---
>  gdb/btrace.c        | 198 
> ++++++++++++++++++++++------------------------------
>  gdb/btrace.h        |   5 +-
>  gdb/record-btrace.c |   2 +-
>  3 files changed, 87 insertions(+), 118 deletions(-)
> 
> diff --git a/gdb/btrace.c b/gdb/btrace.c
> index c2d3730..5615aad 100644
> --- a/gdb/btrace.c
> +++ b/gdb/btrace.c
> @@ -2527,12 +2527,33 @@ btrace_find_insn_by_number (struct
> btrace_insn_iterator *it,
>    return 1;
>  }
> 
> +/* Returns a non-zero value if the recording ends with a function 
> segment that
> +   contains only a single (i.e. the current) instruction.  */
> +
> +static int

bool?

Otherwise, it LGTM.  I'm not familiar with the btrace specific 
computations, but it made sense to me.  I trust that you guys know what 
you are doing and have tested it enough :).

If I understand correctly, btrace_function::number is 1-based?  If so, 
it would be good to mention it somewhere (if it's not already, but I 
couldn't find it).

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 04/12] btrace: Use function segment index in call  iterator.
  2017-05-09 12:50   ` Simon Marchi
@ 2017-05-09 13:14     ` Wiederhake, Tim
  2017-05-09 14:29       ` Simon Marchi
  0 siblings, 1 reply; 36+ messages in thread
From: Wiederhake, Tim @ 2017-05-09 13:14 UTC (permalink / raw)
  To: Simon Marchi; +Cc: gdb-patches, Metzger, Markus T

Hi Simon!

Thanks for the review.

> -----Original Message-----
> From: gdb-patches-owner@sourceware.org [mailto:gdb-patches-
> owner@sourceware.org] On Behalf Of Simon Marchi
> Sent: Tuesday, May 9, 2017 2:51 PM
> To: Wiederhake, Tim <tim.wiederhake@intel.com>
> Cc: gdb-patches@sourceware.org; Metzger, Markus T
> <markus.t.metzger@intel.com>
> Subject: Re: [PATCH v3 04/12] btrace: Use function segment index in call
> iterator.
> 
> On 2017-05-09 02:55, Tim Wiederhake wrote:
> > Remove FUNCTION pointer in struct btrace_call_iterator and use an index
> > into
> > the list of function segments instead.
> >
> > 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> >
> > gdb/ChangeLog:
> >
> > 	* btrace.c (btrace_ends_with_single_insn): New function.
> > 	(btrace_call_get, btrace_call_number, btrace_call_begin,
> > 	btrace_call_end, btrace_call_next, btrace_call_prev,
> > 	btrace_find_call_by_number): Use index into call segment vector
> > 	instead of pointer.
> > 	(btrace_call_cmp): Simplify.
> > 	* btrace.h (struct btrace_call_iterator): Replace function call
> > segment
> > 	pointer with index into vector.
> > 	* record-btrace.c (record_btrace_call_history): Use index instead of
> > 	pointer.
> >
> > ---
> >  gdb/btrace.c        | 198
> > ++++++++++++++++++++++------------------------------
> >  gdb/btrace.h        |   5 +-
> >  gdb/record-btrace.c |   2 +-
> >  3 files changed, 87 insertions(+), 118 deletions(-)
> >
> > diff --git a/gdb/btrace.c b/gdb/btrace.c
> > index c2d3730..5615aad 100644
> > --- a/gdb/btrace.c
> > +++ b/gdb/btrace.c
> > @@ -2527,12 +2527,33 @@ btrace_find_insn_by_number (struct
> > btrace_insn_iterator *it,
> >    return 1;
> >  }
> >
> > +/* Returns a non-zero value if the recording ends with a function
> > segment that
> > +   contains only a single (i.e. the current) instruction.  */
> > +
> > +static int
> 
> bool?

Right, changed locally.

> Otherwise, it LGTM.  I'm not familiar with the btrace specific
> computations, but it made sense to me.  I trust that you guys know what
> you are doing and have tested it enough :).

I tested the patches on i686 and x86_64 -- maybe I should have mentioned that.

> If I understand correctly, btrace_function::number is 1-based?  If so,
> it would be good to mention it somewhere (if it's not already, but I
> couldn't find it).

See the comment on btrace_thread_info::functions :).

> 
> Thanks,
> 
> Simon

Regards,
Tim
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 04/12] btrace: Use function segment index in call  iterator.
  2017-05-09 13:14     ` Wiederhake, Tim
@ 2017-05-09 14:29       ` Simon Marchi
  0 siblings, 0 replies; 36+ messages in thread
From: Simon Marchi @ 2017-05-09 14:29 UTC (permalink / raw)
  To: Wiederhake, Tim; +Cc: gdb-patches, Metzger, Markus T

On 2017-05-09 09:14, Wiederhake, Tim wrote:
>> Otherwise, it LGTM.  I'm not familiar with the btrace specific
>> computations, but it made sense to me.  I trust that you guys know 
>> what
>> you are doing and have tested it enough :).
> 
> I tested the patches on i686 and x86_64 -- maybe I should have 
> mentioned that.

Great, thanks.

>> If I understand correctly, btrace_function::number is 1-based?  If so,
>> it would be good to mention it somewhere (if it's not already, but I
>> couldn't find it).
> 
> See the comment on btrace_thread_info::functions :).

Ah ok, it comes later in the series.  I didn't have time to go that far 
yet.  I suggest throwing a "1-based" in the doc of 
btrace_function::number, because that's where I would instinctively look 
for the meaning of that field:

   /* The 1-based index of the function segment in BTINFO->FUNCTIONS.  */

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 05/12] btrace: Use function segment index in insn  iterator.
  2017-05-09  7:01 ` [PATCH v3 05/12] btrace: Use function segment index in insn iterator Tim Wiederhake
@ 2017-05-10  2:20   ` Simon Marchi
  2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 1 reply; 36+ messages in thread
From: Simon Marchi @ 2017-05-10  2:20 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> Remove FUNCTION pointer in struct btrace_insn_iterator and use an index 
> into
> the list of function segments instead.
> 
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 
> 	* btrace.c: (btrace_insn_get, btrace_insn_get_error, 
> btrace_insn_number,
> 	btrace_insn_begin, btrace_insn_end, btrace_insn_next, 
> btrace_insn_prev,
> 	btrace_find_insn_by_number): Replace function segment pointer with
> 	index.
> 	(btrace_insn_cmp): Simplify.
> 	* btrace.h: (struct btrace_insn_iterator) Rename index to
> 	insn_index.  Replace function segment pointer with index into function
> 	segment vector.
> 	* record-btrace.c (record_btrace_call_history): Replace function
> 	segment pointer use with index.
> 	(record_btrace_frame_sniffer): Retrieve function call segment through
> 	vector.
> 	(record_btrace_set_replay): Remove defunc't safety check.

Looks good, just a few comments below.

> @@ -2468,12 +2474,21 @@ int
>  btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
>  		 const struct btrace_insn_iterator *rhs)
>  {
> -  unsigned int lnum, rnum;
> +  gdb_assert (lhs->btinfo == rhs->btinfo);
> 
> -  lnum = btrace_insn_number (lhs);
> -  rnum = btrace_insn_number (rhs);
> +  if (lhs->call_index > rhs->call_index)
> +    return 1;
> +
> +  if (lhs->call_index < rhs->call_index)
> +    return -1;
> +
> +  if (lhs->insn_index > rhs->insn_index)
> +    return 1;
> +
> +  if (lhs->insn_index < rhs->insn_index)
> +    return -1;
> 
> -  return (int) (lnum - rnum);
> +  return 0;
>  }

I the number of comparisons could be reduced by doing:

   int
   btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
                    const struct btrace_insn_iterator *rhs)
   {
     gdb_assert (lhs->btinfo == rhs->btinfo);

     if (lhs->call_index != rhs->call_index)
       return lhs->call_index - rhs->call_index;

     return lhs->insn_index - rhs->insn_index;
   }

> 
>  /* See btrace.h.  */
> @@ -2522,8 +2537,8 @@ btrace_find_insn_by_number (struct
> btrace_insn_iterator *it,
>      }
> 
>    it->btinfo = btinfo;
> -  it->function = bfun;
> -  it->index = number - bfun->insn_offset;
> +  it->call_index = bfun->number - 1;
> +  it->insn_index = number - bfun->insn_offset;
>    return 1;
>  }
> 
> diff --git a/gdb/btrace.h b/gdb/btrace.h
> index ef2c781..ca79667 100644
> --- a/gdb/btrace.h
> +++ b/gdb/btrace.h
> @@ -195,12 +195,11 @@ struct btrace_insn_iterator
>    /* The branch trace information for this thread.  Will never be 
> NULL.  */
>    const struct btrace_thread_info *btinfo;
> 
> -  /* The branch trace function segment containing the instruction.
> -     Will never be NULL.  */
> -  const struct btrace_function *function;

Just an idea, you could factor out those

   it->btinfo->functions[it->call_index]

in a small helper method in btrace_insn_iterator:

btrace_function *function ()
{
   return this->btinfo->functions[this->call_index];
}

> @@ -2691,7 +2691,7 @@ record_btrace_set_replay (struct thread_info *tp,
> 
>    btinfo = &tp->btrace;
> 
> -  if (it == NULL || it->function == NULL)
> +  if (it == NULL)

Not sure, but wouldn't the equivalent check be that call_index < 
btinfo->functions.size () ?

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 06/12] btrace: Remove constant arguments.
  2017-05-09  7:01 ` [PATCH v3 06/12] btrace: Remove constant arguments Tim Wiederhake
@ 2017-05-10  2:45   ` Simon Marchi
  2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 1 reply; 36+ messages in thread
From: Simon Marchi @ 2017-05-10  2:45 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 
> 	* btrace.c (ftrace_new_function, ftrace_new_call, ftrace_new_tailcall,
> 	ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
> 	ftrace_update_function): Remove arguments that implicitly were always
> 	BTINFO->END.
> 	(btrace_compute_ftrace_bts, ftrace_add_pt, btrace_compute_ftrace_pt):
> 	Don't pass BTINFO->END.

Looks good, just a few comments below.

> diff --git a/gdb/btrace.c b/gdb/btrace.c
> index cb30dcf..1bd11f0 100644
> --- a/gdb/btrace.c
> +++ b/gdb/btrace.c
> @@ -202,19 +202,19 @@ ftrace_function_switched (const struct
> btrace_function *bfun,
>    return 0;
>  }
> 
> -/* Allocate and initialize a new branch trace function segment.
> +/* Allocate and initialize a new branch trace function segment at the 
> end of
> +   the trace.
>     BTINFO is the branch trace information for the current thread.
> -   PREV is the chronologically preceding function segment.
>     MFUN and FUN are the symbol information we have for this function.  
> */
> 
>  static struct btrace_function *
>  ftrace_new_function (struct btrace_thread_info *btinfo,
> -		     struct btrace_function *prev,
>  		     struct minimal_symbol *mfun,
>  		     struct symbol *fun)
>  {
> -  struct btrace_function *bfun;
> +  struct btrace_function *bfun, *prev;
> 
> +  prev = btinfo->end;

Note that we can now declare variables at the point we use it, and drop 
the struct keyword, like:

   btrace_function *prev = btinfo->end;

It's up to you, you can continue with your current style if you wish.

>    bfun = XCNEW (struct btrace_function);
> 
>    bfun->msym = mfun;
> @@ -238,7 +238,7 @@ ftrace_new_function (struct btrace_thread_info 
> *btinfo,
>      }
> 
>    btinfo->functions.push_back (bfun);
> -  return bfun;
> +  return btinfo->end = bfun;

Err I'm really not a fan of assignment as a side effect.

> @@ -515,13 +510,13 @@ ftrace_new_gap (struct btrace_thread_info 
> *btinfo,
>     Return the chronologically latest function segment, never NULL.  */
> 
>  static struct btrace_function *
> -ftrace_update_function (struct btrace_thread_info *btinfo,
> -			struct btrace_function *bfun, CORE_ADDR pc)
> +ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR 
> pc)

The comment of this function would need to be updated as well.

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 07/12] btrace: Remove struct  btrace_thread_info::{begin,end}.
  2017-05-09  7:01 ` [PATCH v3 07/12] btrace: Remove struct btrace_thread_info::{begin,end} Tim Wiederhake
@ 2017-05-10  3:06   ` Simon Marchi
  0 siblings, 0 replies; 36+ messages in thread
From: Simon Marchi @ 2017-05-10  3:06 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> These are no longer needed and might hold invalid addresses once we 
> change the
> vector of function segment pointers into a vector of function segment 
> objects
> where a reallocation of the vector changes the address of its elements.
> 
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 
> 	* btrace.c (ftrace_new_function, ftrace_new_call, ftrace_new_tailcall,
> 	ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
> 	ftrace_update_function, ftrace_compute_global_level_offset,
> 	btrace_compute_ftrace_bts, ftrace_add_pt, btrace_compute_ftrace_pt,
> 	btrace_stitch_bts, btrace_fetch, btrace_clear, btrace_insn_number,
> 	btrace_insn_end, btrace_is_empty): Remove references to
> 	btrace_thread_info::begin and btrace_thread_info::end.
> 	* btrace.h (struct btrace_thread_info): Remove BEGIN and END.
> 	* record-btrace.c (record_btrace_start_replaying): Remove reference to
> 	btrace_thread_info::begin.

LGTM.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 08/12] btrace: Replace struct btrace_thread_info::up.
  2017-05-09  7:01 ` [PATCH v3 08/12] btrace: Replace struct btrace_thread_info::up Tim Wiederhake
@ 2017-05-10  3:26   ` Simon Marchi
  2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 1 reply; 36+ messages in thread
From: Simon Marchi @ 2017-05-10  3:26 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> This used to hold a function segment pointer.  Change it to hold an 
> index into
> the vector of function segments instead.
> 
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 
> 	* btrace.c (ftrace_find_call_by_number): New function.
> 	(ftrace_update_caller, ftrace_new_call, ftrace_new_tailcall,
> 	ftrace_get_caller, ftrace_find_call, ftrace_new_return,
> 	ftrace_match_backtrace, ftrace_connect_bfun, ftrace_connect_backtrace,
> 	ftrace_bridge_gap, btrace_bridge_gaps): Use btrace_function::up as an
> 	index.
> 	* btrace.h (struct btrace_function): Turn UP into an index.
> 	* python/py-record-btrace.c (btpy_call_up): Use btrace_function::up
> 	as an index.
> 	* record-btrace.c (record_btrace_frame_unwind_stop_reason,
> 	record_btrace_frame_prev_register, record_btrace_frame_sniffer,
> 	record_btrace_tailcall_frame_sniffe): Same.

LGTM, just a question below.

> @@ -1629,11 +1629,12 @@ record_btrace_frame_prev_register (struct
> frame_info *this_frame,
>    bfun = cache->bfun;
>    gdb_assert (bfun != NULL);
> 
> -  caller = bfun->up;
> -  if (caller == NULL)
> +  if (bfun->up == 0)
>      throw_error (NOT_AVAILABLE_ERROR,
>  		 _("No caller in btrace record history"));
> 
> +  caller = cache->tp->btrace.functions[bfun->up - 1];
> +

Would it be good to export ftrace_find_call_by_number so it can be used 
at a few places in this file?

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 09/12] btrace: Remove struct btrace_thread_info::flow.
  2017-05-09  7:01 ` [PATCH v3 09/12] btrace: Remove struct btrace_thread_info::flow Tim Wiederhake
@ 2017-05-10  3:46   ` Simon Marchi
  2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 1 reply; 36+ messages in thread
From: Simon Marchi @ 2017-05-10  3:46 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

The title should mention btrace_function::flow instead of 
btrace_thread_info::flow.  I just realized the previous patch has the 
same issue.

On 2017-05-09 02:55, Tim Wiederhake wrote:
> This used to hold a pair of pointers to the previous and next function 
> segment
> in execution flow order.  It is no longer necessary as the previous and 
> next
> function segments now are simply the previous and next elements in the 
> vector
> of function segments.
> 
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 
> 	* btrace.c (ftrace_new_function, ftrace_fixup_level,
> 	ftrace_connect_bfun, ftrace_bridge_gap, btrace_bridge_gaps,
> 	btrace_insn_next, btrace_insn_prev): Remove references to
> 	btrace_thread_info::flow.

btrace_function::flow.

> 	* btrace.h (struct btrace_function): Remove FLOW.
> 

The patch LGTM, but I have one question.  Did you consider adding a 
backlink in btrace_function to its btrace_thread_info owner?  It would 
possible to implement gap->next () and gap->prev (), which could be used 
in many places, and would probably be more readable than

   ftrace_find_call_by_number (btinfo, gap->number - 1)
   ftrace_find_call_by_number (btinfo, gap->number + 1)

And it could possibly bring more simplifications, I didn't look in 
details.

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 10/12] btrace: Replace struct  btrace_thread_info::segment.
  2017-05-09  7:01 ` [PATCH v3 10/12] btrace: Replace struct btrace_thread_info::segment Tim Wiederhake
@ 2017-05-10  4:14   ` Simon Marchi
  2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 1 reply; 36+ messages in thread
From: Simon Marchi @ 2017-05-10  4:14 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

This title too should say btrace_function.

On 2017-05-09 02:55, Tim Wiederhake wrote:
> This used to hold a pair of pointers to the previous and next function 
> segment
> that belong to this function call.  Replace with a pair of indices into 
> the
> vector of function segments.
> 
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 
> 	* btrace.c (ftrace_fixup_caller, ftrace_new_return, 
> ftrace_connect_bfun,
> 	ftrace_bridge_gap): Replace references to btrace_thread_info::segment
> 	with btrace_thread_info::next_segment and
> 	btrace_thread_info::prev_segment.
> 	* btrace.h: Remove struct btrace_func_link.
> 	(struct btrace_function): Replace pair of function segment pointers
> 	with pair of indices.
> 	* python/py-record-btrace.c (btpy_call_prev_sibling,
> 	btpy_call_next_sibling): Replace references to
> 	btrace_thread_info::segment with btrace_thread_info::next_segment and
> 	btrace_thread_info::prev_segment.
> 	* record-btrace.c (record_btrace_frame_this_id): Same.
> 
> ---
>  gdb/btrace.c                  | 47 
> ++++++++++++++++++++++++++-----------------
>  gdb/btrace.h                  | 17 ++++++----------
>  gdb/python/py-record-btrace.c |  8 ++++----
>  gdb/record-btrace.c           |  4 ++--
>  4 files changed, 40 insertions(+), 36 deletions(-)
> 
> diff --git a/gdb/btrace.c b/gdb/btrace.c
> index f57bbf9..921cb64 100644
> --- a/gdb/btrace.c
> +++ b/gdb/btrace.c
> @@ -271,20 +271,29 @@ ftrace_update_caller (struct btrace_function 
> *bfun,
>  /* Fix up the caller for all segments of a function.  */
> 
>  static void
> -ftrace_fixup_caller (struct btrace_function *bfun,
> +ftrace_fixup_caller (struct btrace_thread_info *btinfo,
> +		     struct btrace_function *bfun,
>  		     struct btrace_function *caller,
>  		     enum btrace_function_flag flags)
>  {
> -  struct btrace_function *prev, *next;
> +  unsigned int prev, next;
> 
> +  prev = bfun->prev;
> +  next = bfun->next;
>    ftrace_update_caller (bfun, caller, flags);
> 
>    /* Update all function segments belonging to the same function.  */
> -  for (prev = bfun->segment.prev; prev != NULL; prev = 
> prev->segment.prev)
> -    ftrace_update_caller (prev, caller, flags);
> +  for (; prev != 0; prev = bfun->prev)
> +    {
> +      bfun = ftrace_find_call_by_number (btinfo, prev);
> +      ftrace_update_caller (bfun, caller, flags);
> +    }
> 
> -  for (next = bfun->segment.next; next != NULL; next = 
> next->segment.next)
> -    ftrace_update_caller (next, caller, flags);
> +  for (; next != 0; next = bfun->next)
> +    {
> +      bfun = ftrace_find_call_by_number (btinfo, next);
> +      ftrace_update_caller (bfun, caller, flags);
> +    }

Could you define the loop variables in the for like this?

   for (unsigned int prev = bfun->prev; prev != 0; prev = bfun->prev)
     {
       bfun = ftrace_find_call_by_number (btinfo, prev);
       ftrace_update_caller (bfun, caller, flags);
     }

Unless is it important to capture the value of bfun->prev/next before 
calling ftrace_update_caller?  This way their scope is limited to where 
they are used.

Btw, this is another thing that could be rewritten nicely if 
btrace_function had a backlink to btrace_thread_info, something like:

   for (btrace_function *it = bfun; it != NULL; it = it->next_segment ())
     ftrace_update_caller (it, caller, flags);

Btw #2, I thing this function could be more efficient (or maybe I don't 
understand as well as I think).  If bfun at function entry is in the 
middle of a long list of segments, it will start from there and iterate 
backwards until it hits the first segment.  But because the same bfun 
variable is reused, it will iterate forward from the start until the 
end, whereas it only needed to iterate from the original bfun.  Using a 
temporary loop variable to avoid modifying bfun would correct that.

> @@ -948,7 +957,7 @@ ftrace_bridge_gap (struct btrace_thread_info 
> *btinfo,
>  static void
>  btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
>  {
> -  struct btrace_thread_info *btinfo;
> +  struct btrace_thread_info *btinfo = &tp->btrace;

btinfo is now assigned twice in this function.

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 11/12] btrace: Remove bfun_s vector.
  2017-05-09  7:01 ` [PATCH v3 11/12] btrace: Remove bfun_s vector Tim Wiederhake
@ 2017-05-10  4:27   ` Simon Marchi
  2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 1 reply; 36+ messages in thread
From: Simon Marchi @ 2017-05-10  4:27 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 
> 	* btrace.c: Remove typedef bfun_s.
> 	(ftrace_new_gap): Directly add gaps to the list of gaps.
> 	(btrace_bridge_gaps, btrace_compute_ftrace_bts, pt_btrace_insn_flags,
> 	ftrace_add_pt, btrace_compute_ftrace_pt, btrace_compute_ftrace_1,
> 	btrace_finalize_ftrace, btrace_compute_ftrace): Use std::vector
> 	instead of gdb VEC.

Looks good, just two nits.

> @@ -527,15 +524,15 @@ ftrace_new_gap (struct btrace_thread_info
> *btinfo, int errcode)
>      }
> 
>    bfun->errcode = errcode;
> +  gaps.push_back (bfun->number);
> 
>    ftrace_debug (bfun, "new gap");
> 
>    return bfun;
>  }
> 
> -/* Update BFUN with respect to the instruction at PC.  BTINFO is the 
> branch
> -   trace information for the current thread.  This may create new 
> function
> -   segments.
> +/* Update the current function segment at the end of the trace in 
> BTINFO with
> +   respect to the instruction at PC.  This may create new function 
> segments.
>     Return the chronologically latest function segment, never NULL.  */

Ah here it is!  Please move it to the appropriate patch (#6 I believe).

> @@ -978,16 +970,15 @@ btrace_bridge_gaps (struct thread_info *tp, VEC
> (bfun_s) **gaps)
>      {
>        /* Let's try to bridge as many gaps as we can.  In some cases, 
> we need to
>  	 skip a gap and revisit it again after we closed later gaps.  */
> -      while (!VEC_empty (bfun_s, *gaps))
> +      while (!gaps.empty ())
>  	{
> -	  struct btrace_function *gap;
> -	  unsigned int idx;
> -
> -	  for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
> +	  for (const auto& number : gaps)

If you don't intend to modify number (the value in the vector), I think 
it would be actually more efficient to not use a reference to iterate on 
ints.

Also, I agree with using auto when the type it replaces is visually 
scary, but in this case I think it would be as readable and more 
informative to use the actual type:

   for (unsigned int number : gaps)

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 12/12] btrace: Store function segments as objects.
  2017-05-09  7:01 ` [PATCH v3 12/12] btrace: Store function segments as objects Tim Wiederhake
@ 2017-05-10  5:10   ` Simon Marchi
  2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 1 reply; 36+ messages in thread
From: Simon Marchi @ 2017-05-10  5:10 UTC (permalink / raw)
  To: Tim Wiederhake; +Cc: gdb-patches, markus.t.metzger

On 2017-05-09 02:55, Tim Wiederhake wrote:
> 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog:
> 	* btrace.c:
> 	* btrace.h:
> 	* record-btrace.c:

IWBN if you could be more explicit :).

> 
> ---
>  gdb/btrace.c        | 94 
> ++++++++++++++++++++++++++---------------------------
>  gdb/btrace.h        |  7 ++--
>  gdb/record-btrace.c | 10 +++---
>  3 files changed, 56 insertions(+), 55 deletions(-)
> 
> diff --git a/gdb/btrace.c b/gdb/btrace.c
> index 7b82000..4c7020d 100644
> --- a/gdb/btrace.c
> +++ b/gdb/btrace.c
> @@ -155,14 +155,27 @@ ftrace_call_num_insn (const struct 
> btrace_function* bfun)
>  /* Return the function segment with the given NUMBER or NULL if no 
> such segment
>     exists.  BTINFO is the branch trace information for the current 
> thread.  */
> 
> -static struct btrace_function *
> +const static struct btrace_function *

It would make more sense to put the "static" first.

>  ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
>  			    unsigned int number)
>  {
>    if (number == 0 || number > btinfo->functions.size ())
>      return NULL;
> 
> -  return btinfo->functions[number - 1];
> +  return &btinfo->functions[number - 1];
> +}
> +
> +/* Return the function segment with the given NUMBER or NULL if no 
> such segment
> +   exists.  BTINFO is the branch trace information for the current 
> thread.  */

It took me a surprisingly high amount of seconds to understand that this 
was a const version of the function below.  To avoid reapeating the 
comment and to make it clear it's the same thing, you can replace the 
comment of the const version to something like:

   /* A const version of the function above.  */

> +
> +static struct btrace_function *
> +ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
> +			    unsigned int number)
> +{
> +  if (number == 0 || number > btinfo->functions.size ())
> +    return NULL;
> +
> +  return &btinfo->functions[number - 1];
>  }
> 
>  /* Return non-zero if BFUN does not match MFUN and FUN,
> @@ -214,37 +227,33 @@ ftrace_function_switched (const struct
> btrace_function *bfun,
>  /* Allocate and initialize a new branch trace function segment at the 
> end of
>     the trace.
>     BTINFO is the branch trace information for the current thread.
> -   MFUN and FUN are the symbol information we have for this function.  
> */
> +   MFUN and FUN are the symbol information we have for this function.
> +   This invalidates all struct btrace_function pointer currently held. 
>  */
> 
>  static struct btrace_function *
>  ftrace_new_function (struct btrace_thread_info *btinfo,
>  		     struct minimal_symbol *mfun,
>  		     struct symbol *fun)
>  {
> -  struct btrace_function *bfun;
> -
> -  bfun = XCNEW (struct btrace_function);
> -
> -  bfun->msym = mfun;
> -  bfun->sym = fun;
> +  struct btrace_function bfun {mfun, fun, 0, 0, 0, NULL, 0, 0, 0, 0, 
> 0};

I think it would be much better to add a simple constructor to 
btrace_function.  For the fields that should simply be zero'ed, you can 
initialize fields directly, like we do in many other places (e.g. class 
inferior).

> 
>    if (btinfo->functions.empty ())
>      {
>        /* Start counting at one.  */
> -      bfun->number = 1;
> -      bfun->insn_offset = 1;
> +      bfun.number = 1;
> +      bfun.insn_offset = 1;
>      }
>    else
>      {
> -      struct btrace_function *prev = btinfo->functions.back ();
> +      struct btrace_function *prev = &btinfo->functions.back ();
> 
> -      bfun->number = prev->number + 1;
> -      bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn 
> (prev);
> -      bfun->level = prev->level;
> +      bfun.number = prev->number + 1;
> +      bfun.insn_offset = prev->insn_offset + ftrace_call_num_insn 
> (prev);
> +      bfun.level = prev->level;
>      }
> 
> -  btinfo->functions.push_back (bfun);
> -  return bfun;
> +  btinfo->functions.push_back (std::move (bfun));
> +  return &btinfo->functions.back ();

I could be mistaken, but I don't think the move is very useful here, 
since all fields of btrace_function are trivial (?).  You could use 
emplace_back instead:

   btinfo->functions.emplace_back (mfun, fun);
   btrace_function &bfun = btinfo->functions.back ();

   ...

   return &bfun;

or

   unsigned int number, insn_offset;
   unsigned int insn_offset = prev->insn_offset + ftrace_call_num_insn 
(prev);
   int level = prev->level;

   if (btinfo->functions.empty ())
     {
       /* Start counting at one.  */
       number = 1;
       insn_offset = 1;

       level = 0;
     }
   else
     {
       struct btrace_function *prev = &btinfo->functions.back ();

       number = prev->number + 1;
       insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
       level = prev->level;
     }

   btinfo->functions.emplace_back (mfun, fun, number, insn_offset, 
level);

   return &btinfo->functions.back ();

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 08/12] btrace: Replace struct btrace_thread_info::up.
  2017-05-10  3:26   ` Simon Marchi
@ 2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 0 replies; 36+ messages in thread
From: Wiederhake, Tim @ 2017-05-10 11:46 UTC (permalink / raw)
  To: Simon Marchi; +Cc: gdb-patches, Metzger, Markus T

Hi Simon,

Thanks for reviewing!

> -----Original Message-----
> From: Simon Marchi [mailto:simon.marchi@polymtl.ca]
> Sent: Wednesday, May 10, 2017 5:27 AM
> To: Wiederhake, Tim <tim.wiederhake@intel.com>
> Cc: gdb-patches@sourceware.org; Metzger, Markus T
> <markus.t.metzger@intel.com>
> Subject: Re: [PATCH v3 08/12] btrace: Replace struct
> btrace_thread_info::up.
> 
> On 2017-05-09 02:55, Tim Wiederhake wrote:
> > This used to hold a function segment pointer.  Change it to hold an
> > index into
> > the vector of function segments instead.
> >
> > 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> >
> > gdb/ChangeLog:
> >
> > 	* btrace.c (ftrace_find_call_by_number): New function.
> > 	(ftrace_update_caller, ftrace_new_call, ftrace_new_tailcall,
> > 	ftrace_get_caller, ftrace_find_call, ftrace_new_return,
> > 	ftrace_match_backtrace, ftrace_connect_bfun,
> ftrace_connect_backtrace,
> > 	ftrace_bridge_gap, btrace_bridge_gaps): Use btrace_function::up as
> an
> > 	index.
> > 	* btrace.h (struct btrace_function): Turn UP into an index.
> > 	* python/py-record-btrace.c (btpy_call_up): Use btrace_function::up
> > 	as an index.
> > 	* record-btrace.c (record_btrace_frame_unwind_stop_reason,
> > 	record_btrace_frame_prev_register, record_btrace_frame_sniffer,
> > 	record_btrace_tailcall_frame_sniffe): Same.
> 
> LGTM, just a question below.
> 
> > @@ -1629,11 +1629,12 @@ record_btrace_frame_prev_register (struct
> > frame_info *this_frame,
> >    bfun = cache->bfun;
> >    gdb_assert (bfun != NULL);
> >
> > -  caller = bfun->up;
> > -  if (caller == NULL)
> > +  if (bfun->up == 0)
> >      throw_error (NOT_AVAILABLE_ERROR,
> >  		 _("No caller in btrace record history"));
> >
> > +  caller = cache->tp->btrace.functions[bfun->up - 1];
> > +
> 
> Would it be good to export ftrace_find_call_by_number so it can be used
> at a few places in this file?

We should tackle this in a future patch set.

> Thanks,
> 
> Simon

Regards,
Tim
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 11/12] btrace: Remove bfun_s vector.
  2017-05-10  4:27   ` Simon Marchi
@ 2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 0 replies; 36+ messages in thread
From: Wiederhake, Tim @ 2017-05-10 11:46 UTC (permalink / raw)
  To: Simon Marchi; +Cc: gdb-patches, Metzger, Markus T

Hi Simon,

thanks for reviewing!

> -----Original Message-----
> From: Simon Marchi [mailto:simon.marchi@polymtl.ca]
> Sent: Wednesday, May 10, 2017 6:27 AM
> To: Wiederhake, Tim <tim.wiederhake@intel.com>
> Cc: gdb-patches@sourceware.org; Metzger, Markus T
> <markus.t.metzger@intel.com>
> Subject: Re: [PATCH v3 11/12] btrace: Remove bfun_s vector.
> 
> On 2017-05-09 02:55, Tim Wiederhake wrote:
> > 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> >
> > gdb/ChangeLog:
> >
> > 	* btrace.c: Remove typedef bfun_s.
> > 	(ftrace_new_gap): Directly add gaps to the list of gaps.
> > 	(btrace_bridge_gaps, btrace_compute_ftrace_bts,
> pt_btrace_insn_flags,
> > 	ftrace_add_pt, btrace_compute_ftrace_pt, btrace_compute_ftrace_1,
> > 	btrace_finalize_ftrace, btrace_compute_ftrace): Use std::vector
> > 	instead of gdb VEC.
> 
> Looks good, just two nits.
> 
> > @@ -527,15 +524,15 @@ ftrace_new_gap (struct btrace_thread_info
> > *btinfo, int errcode)
> >      }
> >
> >    bfun->errcode = errcode;
> > +  gaps.push_back (bfun->number);
> >
> >    ftrace_debug (bfun, "new gap");
> >
> >    return bfun;
> >  }
> >
> > -/* Update BFUN with respect to the instruction at PC.  BTINFO is the
> > branch
> > -   trace information for the current thread.  This may create new
> > function
> > -   segments.
> > +/* Update the current function segment at the end of the trace in
> > BTINFO with
> > +   respect to the instruction at PC.  This may create new function
> > segments.
> >     Return the chronologically latest function segment, never NULL.  */
> 
> Ah here it is!  Please move it to the appropriate patch (#6 I believe).

Moved.

> > @@ -978,16 +970,15 @@ btrace_bridge_gaps (struct thread_info *tp, VEC
> > (bfun_s) **gaps)
> >      {
> >        /* Let's try to bridge as many gaps as we can.  In some cases,
> > we need to
> >  	 skip a gap and revisit it again after we closed later gaps.  */
> > -      while (!VEC_empty (bfun_s, *gaps))
> > +      while (!gaps.empty ())
> >  	{
> > -	  struct btrace_function *gap;
> > -	  unsigned int idx;
> > -
> > -	  for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
> > +	  for (const auto& number : gaps)
> 
> If you don't intend to modify number (the value in the vector), I think
> it would be actually more efficient to not use a reference to iterate on
> ints.
> 
> Also, I agree with using auto when the type it replaces is visually
> scary, but in this case I think it would be as readable and more
> informative to use the actual type:
> 
>    for (unsigned int number : gaps)

Done.

> 
> Thanks,
> 
> Simon

Regards,
Tim
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 10/12] btrace: Replace struct  btrace_thread_info::segment.
  2017-05-10  4:14   ` Simon Marchi
@ 2017-05-10 11:46     ` Wiederhake, Tim
  2017-05-10 14:13       ` Simon Marchi
  0 siblings, 1 reply; 36+ messages in thread
From: Wiederhake, Tim @ 2017-05-10 11:46 UTC (permalink / raw)
  To: Simon Marchi; +Cc: gdb-patches, Metzger, Markus T

Hi Simon,

thanks for reviewing!

> -----Original Message-----
> From: Simon Marchi [mailto:simon.marchi@polymtl.ca]
> Sent: Wednesday, May 10, 2017 6:15 AM
> To: Wiederhake, Tim <tim.wiederhake@intel.com>
> Cc: gdb-patches@sourceware.org; Metzger, Markus T
> <markus.t.metzger@intel.com>
> Subject: Re: [PATCH v3 10/12] btrace: Replace struct
> btrace_thread_info::segment.
> 
> This title too should say btrace_function.

Fixed.

> On 2017-05-09 02:55, Tim Wiederhake wrote:
> > This used to hold a pair of pointers to the previous and next function
> > segment
> > that belong to this function call.  Replace with a pair of indices into
> > the
> > vector of function segments.
> >
> > 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> >
> > gdb/ChangeLog:
> >
> > 	* btrace.c (ftrace_fixup_caller, ftrace_new_return,
> > ftrace_connect_bfun,
> > 	ftrace_bridge_gap): Replace references to
> btrace_thread_info::segment
> > 	with btrace_thread_info::next_segment and
> > 	btrace_thread_info::prev_segment.
> > 	* btrace.h: Remove struct btrace_func_link.
> > 	(struct btrace_function): Replace pair of function segment pointers
> > 	with pair of indices.
> > 	* python/py-record-btrace.c (btpy_call_prev_sibling,
> > 	btpy_call_next_sibling): Replace references to
> > 	btrace_thread_info::segment with btrace_thread_info::next_segment
> and
> > 	btrace_thread_info::prev_segment.
> > 	* record-btrace.c (record_btrace_frame_this_id): Same.
> >
> > ---
> >  gdb/btrace.c                  | 47
> > ++++++++++++++++++++++++++-----------------
> >  gdb/btrace.h                  | 17 ++++++----------
> >  gdb/python/py-record-btrace.c |  8 ++++----
> >  gdb/record-btrace.c           |  4 ++--
> >  4 files changed, 40 insertions(+), 36 deletions(-)
> >
> > diff --git a/gdb/btrace.c b/gdb/btrace.c
> > index f57bbf9..921cb64 100644
> > --- a/gdb/btrace.c
> > +++ b/gdb/btrace.c
> > @@ -271,20 +271,29 @@ ftrace_update_caller (struct btrace_function
> > *bfun,
> >  /* Fix up the caller for all segments of a function.  */
> >
> >  static void
> > -ftrace_fixup_caller (struct btrace_function *bfun,
> > +ftrace_fixup_caller (struct btrace_thread_info *btinfo,
> > +		     struct btrace_function *bfun,
> >  		     struct btrace_function *caller,
> >  		     enum btrace_function_flag flags)
> >  {
> > -  struct btrace_function *prev, *next;
> > +  unsigned int prev, next;
> >
> > +  prev = bfun->prev;
> > +  next = bfun->next;
> >    ftrace_update_caller (bfun, caller, flags);
> >
> >    /* Update all function segments belonging to the same function.  */
> > -  for (prev = bfun->segment.prev; prev != NULL; prev =
> > prev->segment.prev)
> > -    ftrace_update_caller (prev, caller, flags);
> > +  for (; prev != 0; prev = bfun->prev)
> > +    {
> > +      bfun = ftrace_find_call_by_number (btinfo, prev);
> > +      ftrace_update_caller (bfun, caller, flags);
> > +    }
> >
> > -  for (next = bfun->segment.next; next != NULL; next =
> > next->segment.next)
> > -    ftrace_update_caller (next, caller, flags);
> > +  for (; next != 0; next = bfun->next)
> > +    {
> > +      bfun = ftrace_find_call_by_number (btinfo, next);
> > +      ftrace_update_caller (bfun, caller, flags);
> > +    }
> 
> Could you define the loop variables in the for like this?
> 
>    for (unsigned int prev = bfun->prev; prev != 0; prev = bfun->prev)
>      {
>        bfun = ftrace_find_call_by_number (btinfo, prev);
>        ftrace_update_caller (bfun, caller, flags);
>      }
> 
> Unless is it important to capture the value of bfun->prev/next before
> calling ftrace_update_caller?  This way their scope is limited to where
> they are used.

This is what the function would look like:
<snip>
  ftrace_update_caller (bfun, caller, flags);

  for (unsigned int i = bfun->prev; i != 0;)
    {
      struct btrace_function *tmp = ftrace_find_call_by_number (btinfo, i);
      ftrace_update_caller (tmp, caller, flags);
      i = tmp->prev;
    }
  
  for (unsigned int i = bfun->next; i != 0;)
    {
      struct btrace_function *tmp = ftrace_find_call_by_number (btinfo, i);
      ftrace_update_caller (tmp, caller, flags);
      i = tmp->next;
    }
</snip>

IMO, this isn't any better. If I pull out the struct btrace_function* tmp
declaration, it would look like this:
<snip>
  struct btrace_function *tmp;
  
  ftrace_update_caller (bfun, caller, flags);

  for (unsigned int i = bfun->prev; i != 0; i = tmp->prev)
    {
      tmp = ftrace_find_call_by_number (btinfo, i);
      ftrace_update_caller (tmp, caller, flags);
    }
  
  for (unsigned int i = bfun->next; i != 0; i = tmp->next)
    {
      tmp = ftrace_find_call_by_number (btinfo, i);
      ftrace_update_caller (tmp, caller, flags);
    }
</snip>

I'd leave it as is for now and see how this code changes in a
*drum roll* future C++-ification patch series.

> Btw, this is another thing that could be rewritten nicely if
> btrace_function had a backlink to btrace_thread_info, something like:
> 
>    for (btrace_function *it = bfun; it != NULL; it = it->next_segment ())
>      ftrace_update_caller (it, caller, flags);
> 
> Btw #2, I thing this function could be more efficient (or maybe I don't
> understand as well as I think).  If bfun at function entry is in the
> middle of a long list of segments, it will start from there and iterate
> backwards until it hits the first segment.

Correct so far.

> But because the same bfun
> variable is reused, it will iterate forward from the start

We saved PREV and NEXT beforehand and use BFUN as a temporary variable
afterwards. The second "for" loop starts at NEXT, which is one past the
original "middle of the long list of segments".

> until the
> end, whereas it only needed to iterate from the original bfun.  Using a
> temporary loop variable to avoid modifying bfun would correct that.
> > @@ -948,7 +957,7 @@ ftrace_bridge_gap (struct btrace_thread_info
> > *btinfo,
> >  static void
> >  btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
> >  {
> > -  struct btrace_thread_info *btinfo;
> > +  struct btrace_thread_info *btinfo = &tp->btrace;
> 
> btinfo is now assigned twice in this function.

Good catch, thank you!

> 
> Thanks,
> 
> Simon

Regards,
Tim
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 06/12] btrace: Remove constant arguments.
  2017-05-10  2:45   ` Simon Marchi
@ 2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 0 replies; 36+ messages in thread
From: Wiederhake, Tim @ 2017-05-10 11:46 UTC (permalink / raw)
  To: Simon Marchi; +Cc: gdb-patches, Metzger, Markus T

Hi Simon,

Thanks for reviewing!

> -----Original Message-----
> From: Simon Marchi [mailto:simon.marchi@polymtl.ca]
> Sent: Wednesday, May 10, 2017 4:46 AM
> To: Wiederhake, Tim <tim.wiederhake@intel.com>
> Cc: gdb-patches@sourceware.org; Metzger, Markus T
> <markus.t.metzger@intel.com>
> Subject: Re: [PATCH v3 06/12] btrace: Remove constant arguments.
> 
> On 2017-05-09 02:55, Tim Wiederhake wrote:
> > 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> >
> > gdb/ChangeLog:
> >
> > 	* btrace.c (ftrace_new_function, ftrace_new_call,
> ftrace_new_tailcall,
> > 	ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
> > 	ftrace_update_function): Remove arguments that implicitly were
> always
> > 	BTINFO->END.
> > 	(btrace_compute_ftrace_bts, ftrace_add_pt,
> btrace_compute_ftrace_pt):
> > 	Don't pass BTINFO->END.
> 
> Looks good, just a few comments below.
> 
> > diff --git a/gdb/btrace.c b/gdb/btrace.c
> > index cb30dcf..1bd11f0 100644
> > --- a/gdb/btrace.c
> > +++ b/gdb/btrace.c
> > @@ -202,19 +202,19 @@ ftrace_function_switched (const struct
> > btrace_function *bfun,
> >    return 0;
> >  }
> >
> > -/* Allocate and initialize a new branch trace function segment.
> > +/* Allocate and initialize a new branch trace function segment at the
> > end of
> > +   the trace.
> >     BTINFO is the branch trace information for the current thread.
> > -   PREV is the chronologically preceding function segment.
> >     MFUN and FUN are the symbol information we have for this function.
> > */
> >
> >  static struct btrace_function *
> >  ftrace_new_function (struct btrace_thread_info *btinfo,
> > -		     struct btrace_function *prev,
> >  		     struct minimal_symbol *mfun,
> >  		     struct symbol *fun)
> >  {
> > -  struct btrace_function *bfun;
> > +  struct btrace_function *bfun, *prev;
> >
> > +  prev = btinfo->end;
> 
> Note that we can now declare variables at the point we use it, and drop
> the struct keyword, like:
> 
>    btrace_function *prev = btinfo->end;
> 
> It's up to you, you can continue with your current style if you wish.

I do prefer declaring (and if at all possible, initialising) variables at the point of first usage, too. For now I'll stick with the current style to leave this kind of change to a separate C++-ification patch set.

> >    bfun = XCNEW (struct btrace_function);
> >
> >    bfun->msym = mfun;
> > @@ -238,7 +238,7 @@ ftrace_new_function (struct btrace_thread_info
> > *btinfo,
> >      }
> >
> >    btinfo->functions.push_back (bfun);
> > -  return bfun;
> > +  return btinfo->end = bfun;
> 
> Err I'm really not a fan of assignment as a side effect.

This line is removed by the very next patch in the series. Anyway, I changed it.

> 
> > @@ -515,13 +510,13 @@ ftrace_new_gap (struct btrace_thread_info
> > *btinfo,
> >     Return the chronologically latest function segment, never NULL.  */
> >
> >  static struct btrace_function *
> > -ftrace_update_function (struct btrace_thread_info *btinfo,
> > -			struct btrace_function *bfun, CORE_ADDR pc)
> > +ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR
> > pc)
> 
> The comment of this function would need to be updated as well.

Fixed.

> Thanks,
> 
> Simon

Regards,
Tim
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 12/12] btrace: Store function segments as objects.
  2017-05-10  5:10   ` Simon Marchi
@ 2017-05-10 11:46     ` Wiederhake, Tim
  2017-05-10 14:16       ` Simon Marchi
  0 siblings, 1 reply; 36+ messages in thread
From: Wiederhake, Tim @ 2017-05-10 11:46 UTC (permalink / raw)
  To: Simon Marchi; +Cc: gdb-patches, Metzger, Markus T

Hi Simon,

Thanks for reviewing!

> -----Original Message-----
> From: Simon Marchi [mailto:simon.marchi@polymtl.ca]
> Sent: Wednesday, May 10, 2017 7:09 AM
> To: Wiederhake, Tim <tim.wiederhake@intel.com>
> Cc: gdb-patches@sourceware.org; Metzger, Markus T
> <markus.t.metzger@intel.com>
> Subject: Re: [PATCH v3 12/12] btrace: Store function segments as objects.
> 
> On 2017-05-09 02:55, Tim Wiederhake wrote:
> > 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> >
> > gdb/ChangeLog:
> > 	* btrace.c:
> > 	* btrace.h:
> > 	* record-btrace.c:
> 
> IWBN if you could be more explicit :).

Whoops.

> >
> > ---
> >  gdb/btrace.c        | 94
> > ++++++++++++++++++++++++++---------------------------
> >  gdb/btrace.h        |  7 ++--
> >  gdb/record-btrace.c | 10 +++---
> >  3 files changed, 56 insertions(+), 55 deletions(-)
> >
> > diff --git a/gdb/btrace.c b/gdb/btrace.c
> > index 7b82000..4c7020d 100644
> > --- a/gdb/btrace.c
> > +++ b/gdb/btrace.c
> > @@ -155,14 +155,27 @@ ftrace_call_num_insn (const struct
> > btrace_function* bfun)
> >  /* Return the function segment with the given NUMBER or NULL if no
> > such segment
> >     exists.  BTINFO is the branch trace information for the current
> > thread.  */
> >
> > -static struct btrace_function *
> > +const static struct btrace_function *
> 
> It would make more sense to put the "static" first.

Done.

> >  ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
> >  			    unsigned int number)
> >  {
> >    if (number == 0 || number > btinfo->functions.size ())
> >      return NULL;
> >
> > -  return btinfo->functions[number - 1];
> > +  return &btinfo->functions[number - 1];
> > +}
> > +
> > +/* Return the function segment with the given NUMBER or NULL if no
> > such segment
> > +   exists.  BTINFO is the branch trace information for the current
> > thread.  */
> 
> It took me a surprisingly high amount of seconds to understand that this
> was a const version of the function below.  To avoid reapeating the
> comment and to make it clear it's the same thing, you can replace the
> comment of the const version to something like:
> 
>    /* A const version of the function above.  */

Done.

> > +
> > +static struct btrace_function *
> > +ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
> > +			    unsigned int number)
> > +{
> > +  if (number == 0 || number > btinfo->functions.size ())
> > +    return NULL;
> > +
> > +  return &btinfo->functions[number - 1];
> >  }
> >
> >  /* Return non-zero if BFUN does not match MFUN and FUN,
> > @@ -214,37 +227,33 @@ ftrace_function_switched (const struct
> > btrace_function *bfun,
> >  /* Allocate and initialize a new branch trace function segment at the
> > end of
> >     the trace.
> >     BTINFO is the branch trace information for the current thread.
> > -   MFUN and FUN are the symbol information we have for this function.
> > */
> > +   MFUN and FUN are the symbol information we have for this function.
> > +   This invalidates all struct btrace_function pointer currently held.
> >  */
> >
> >  static struct btrace_function *
> >  ftrace_new_function (struct btrace_thread_info *btinfo,
> >  		     struct minimal_symbol *mfun,
> >  		     struct symbol *fun)
> >  {
> > -  struct btrace_function *bfun;
> > -
> > -  bfun = XCNEW (struct btrace_function);
> > -
> > -  bfun->msym = mfun;
> > -  bfun->sym = fun;
> > +  struct btrace_function bfun {mfun, fun, 0, 0, 0, NULL, 0, 0, 0, 0,
> > 0};
> 
> I think it would be much better to add a simple constructor to
> btrace_function.  For the fields that should simply be zero'ed, you can
> initialize fields directly, like we do in many other places (e.g. class
> inferior).

Having a proper constructor would definitely be beneficial here.
Nevertheless, I would do such a change in a separate patch set.

> >
> >    if (btinfo->functions.empty ())
> >      {
> >        /* Start counting at one.  */
> > -      bfun->number = 1;
> > -      bfun->insn_offset = 1;
> > +      bfun.number = 1;
> > +      bfun.insn_offset = 1;
> >      }
> >    else
> >      {
> > -      struct btrace_function *prev = btinfo->functions.back ();
> > +      struct btrace_function *prev = &btinfo->functions.back ();
> >
> > -      bfun->number = prev->number + 1;
> > -      bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn
> > (prev);
> > -      bfun->level = prev->level;
> > +      bfun.number = prev->number + 1;
> > +      bfun.insn_offset = prev->insn_offset + ftrace_call_num_insn
> > (prev);
> > +      bfun.level = prev->level;
> >      }
> >
> > -  btinfo->functions.push_back (bfun);
> > -  return bfun;
> > +  btinfo->functions.push_back (std::move (bfun));
> > +  return &btinfo->functions.back ();
> 
> I could be mistaken, but I don't think the move is very useful here,
> since all fields of btrace_function are trivial (?).  You could use
> emplace_back instead:
> 
>    btinfo->functions.emplace_back (mfun, fun);
>    btrace_function &bfun = btinfo->functions.back ();
> 
>    ...
> 
>    return &bfun;
> 
> or
> 
>    unsigned int number, insn_offset;
>    unsigned int insn_offset = prev->insn_offset + ftrace_call_num_insn
> (prev);
>    int level = prev->level;
> 
>    if (btinfo->functions.empty ())
>      {
>        /* Start counting at one.  */
>        number = 1;
>        insn_offset = 1;
> 
>        level = 0;
>      }
>    else
>      {
>        struct btrace_function *prev = &btinfo->functions.back ();
> 
>        number = prev->number + 1;
>        insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
>        level = prev->level;
>      }
> 
>    btinfo->functions.emplace_back (mfun, fun, number, insn_offset,
> level);
> 
>    return &btinfo->functions.back ();

You are right, the std::move is quiete pointless. Sadly, we can't use
emplace_back() yet until btrace_function gets a constructor. Removed
the move.

> Thanks,
> 
> Simon

Regards,
Tim
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 05/12] btrace: Use function segment index in insn  iterator.
  2017-05-10  2:20   ` Simon Marchi
@ 2017-05-10 11:46     ` Wiederhake, Tim
  0 siblings, 0 replies; 36+ messages in thread
From: Wiederhake, Tim @ 2017-05-10 11:46 UTC (permalink / raw)
  To: Simon Marchi; +Cc: gdb-patches, Metzger, Markus T

Hi Simon,

Thanks for reviewing!

> -----Original Message-----
> From: Simon Marchi [mailto:simon.marchi@polymtl.ca]
> Sent: Wednesday, May 10, 2017 4:20 AM
> To: Wiederhake, Tim <tim.wiederhake@intel.com>
> Cc: gdb-patches@sourceware.org; Metzger, Markus T
> <markus.t.metzger@intel.com>
> Subject: Re: [PATCH v3 05/12] btrace: Use function segment index in insn
> iterator.
> 
> On 2017-05-09 02:55, Tim Wiederhake wrote:
> > Remove FUNCTION pointer in struct btrace_insn_iterator and use an index
> > into
> > the list of function segments instead.
> >
> > 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> >
> > gdb/ChangeLog:
> >
> > 	* btrace.c: (btrace_insn_get, btrace_insn_get_error,
> > btrace_insn_number,
> > 	btrace_insn_begin, btrace_insn_end, btrace_insn_next,
> > btrace_insn_prev,
> > 	btrace_find_insn_by_number): Replace function segment pointer with
> > 	index.
> > 	(btrace_insn_cmp): Simplify.
> > 	* btrace.h: (struct btrace_insn_iterator) Rename index to
> > 	insn_index.  Replace function segment pointer with index into
> function
> > 	segment vector.
> > 	* record-btrace.c (record_btrace_call_history): Replace function
> > 	segment pointer use with index.
> > 	(record_btrace_frame_sniffer): Retrieve function call segment
> through
> > 	vector.
> > 	(record_btrace_set_replay): Remove defunc't safety check.
> 
> Looks good, just a few comments below.
> 
> > @@ -2468,12 +2474,21 @@ int
> >  btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
> >  		 const struct btrace_insn_iterator *rhs)
> >  {
> > -  unsigned int lnum, rnum;
> > +  gdb_assert (lhs->btinfo == rhs->btinfo);
> >
> > -  lnum = btrace_insn_number (lhs);
> > -  rnum = btrace_insn_number (rhs);
> > +  if (lhs->call_index > rhs->call_index)
> > +    return 1;
> > +
> > +  if (lhs->call_index < rhs->call_index)
> > +    return -1;
> > +
> > +  if (lhs->insn_index > rhs->insn_index)
> > +    return 1;
> > +
> > +  if (lhs->insn_index < rhs->insn_index)
> > +    return -1;
> >
> > -  return (int) (lnum - rnum);
> > +  return 0;
> >  }
> 
> I the number of comparisons could be reduced by doing:
> 
>    int
>    btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
>                     const struct btrace_insn_iterator *rhs)
>    {
>      gdb_assert (lhs->btinfo == rhs->btinfo);
> 
>      if (lhs->call_index != rhs->call_index)
>        return lhs->call_index - rhs->call_index;
> 
>      return lhs->insn_index - rhs->insn_index;
>    }

You're right. Changed locally, thanks!

> 
> >
> >  /* See btrace.h.  */
> > @@ -2522,8 +2537,8 @@ btrace_find_insn_by_number (struct
> > btrace_insn_iterator *it,
> >      }
> >
> >    it->btinfo = btinfo;
> > -  it->function = bfun;
> > -  it->index = number - bfun->insn_offset;
> > +  it->call_index = bfun->number - 1;
> > +  it->insn_index = number - bfun->insn_offset;
> >    return 1;
> >  }
> >
> > diff --git a/gdb/btrace.h b/gdb/btrace.h
> > index ef2c781..ca79667 100644
> > --- a/gdb/btrace.h
> > +++ b/gdb/btrace.h
> > @@ -195,12 +195,11 @@ struct btrace_insn_iterator
> >    /* The branch trace information for this thread.  Will never be
> > NULL.  */
> >    const struct btrace_thread_info *btinfo;
> >
> > -  /* The branch trace function segment containing the instruction.
> > -     Will never be NULL.  */
> > -  const struct btrace_function *function;
> 
> Just an idea, you could factor out those
> 
>    it->btinfo->functions[it->call_index]
> 
> in a small helper method in btrace_insn_iterator:
> 
> btrace_function *function ()
> {
>    return this->btinfo->functions[this->call_index];
> }

I'd like to postpone all further C++-ifications to a separate patch set.

> > @@ -2691,7 +2691,7 @@ record_btrace_set_replay (struct thread_info *tp,
> >
> >    btinfo = &tp->btrace;
> >
> > -  if (it == NULL || it->function == NULL)
> > +  if (it == NULL)
> 
> Not sure, but wouldn't the equivalent check be that call_index <
> btinfo->functions.size () ?

The comment on btrace_insn_iterator::function used to read: "The branch trace function segment containing the instruction. Will never be NULL". The check for it->function == NULL was a defensive measure but not necessary in terms of program behavior.

> Thanks,
> 
> Simon

Regards,
Tim
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 09/12] btrace: Remove struct btrace_thread_info::flow.
  2017-05-10  3:46   ` Simon Marchi
@ 2017-05-10 11:46     ` Wiederhake, Tim
  2017-05-10 13:59       ` Simon Marchi
  0 siblings, 1 reply; 36+ messages in thread
From: Wiederhake, Tim @ 2017-05-10 11:46 UTC (permalink / raw)
  To: Simon Marchi; +Cc: gdb-patches, Metzger, Markus T

Hi Simon,

thanks for reviewing!

> -----Original Message-----
> From: Simon Marchi [mailto:simon.marchi@polymtl.ca]
> Sent: Wednesday, May 10, 2017 5:45 AM
> To: Wiederhake, Tim <tim.wiederhake@intel.com>
> Cc: gdb-patches@sourceware.org; Metzger, Markus T
> <markus.t.metzger@intel.com>
> Subject: Re: [PATCH v3 09/12] btrace: Remove struct
> btrace_thread_info::flow.
> 
> The title should mention btrace_function::flow instead of
> btrace_thread_info::flow.  I just realized the previous patch has the
> same issue.

I have no idea how this escaped me for so long. Fixed.

> On 2017-05-09 02:55, Tim Wiederhake wrote:
> > This used to hold a pair of pointers to the previous and next function
> > segment
> > in execution flow order.  It is no longer necessary as the previous and
> > next
> > function segments now are simply the previous and next elements in the
> > vector
> > of function segments.
> >
> > 2017-05-09  Tim Wiederhake  <tim.wiederhake@intel.com>
> >
> > gdb/ChangeLog:
> >
> > 	* btrace.c (ftrace_new_function, ftrace_fixup_level,
> > 	ftrace_connect_bfun, ftrace_bridge_gap, btrace_bridge_gaps,
> > 	btrace_insn_next, btrace_insn_prev): Remove references to
> > 	btrace_thread_info::flow.
> 
> btrace_function::flow.
> 
> > 	* btrace.h (struct btrace_function): Remove FLOW.
> >
> 
> The patch LGTM, but I have one question.  Did you consider adding a
> backlink in btrace_function to its btrace_thread_info owner?  It would
> possible to implement gap->next () and gap->prev (), which could be used
> in many places, and would probably be more readable than
> 
>    ftrace_find_call_by_number (btinfo, gap->number - 1)
>    ftrace_find_call_by_number (btinfo, gap->number + 1)
> 
> And it could possibly bring more simplifications, I didn't look in
> details.

I believe, a C++-ificiation patch series could get rid of call_iterator and insn_iterator altogether. Sorry if I sound like a broken record :).

> 
> Thanks,
> 
> Simon

Regards,
Tim
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 09/12] btrace: Remove struct btrace_thread_info::flow.
  2017-05-10 11:46     ` Wiederhake, Tim
@ 2017-05-10 13:59       ` Simon Marchi
  0 siblings, 0 replies; 36+ messages in thread
From: Simon Marchi @ 2017-05-10 13:59 UTC (permalink / raw)
  To: Wiederhake, Tim; +Cc: gdb-patches, Metzger, Markus T

On 2017-05-10 07:46, Wiederhake, Tim wrote:
>> The patch LGTM, but I have one question.  Did you consider adding a
>> backlink in btrace_function to its btrace_thread_info owner?  It would
>> possible to implement gap->next () and gap->prev (), which could be 
>> used
>> in many places, and would probably be more readable than
>> 
>>    ftrace_find_call_by_number (btinfo, gap->number - 1)
>>    ftrace_find_call_by_number (btinfo, gap->number + 1)
>> 
>> And it could possibly bring more simplifications, I didn't look in
>> details.
> 
> I believe, a C++-ificiation patch series could get rid of
> call_iterator and insn_iterator altogether. Sorry if I sound like a
> broken record :).

No worries, I see no problem in addressing that in a later series.

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 10/12] btrace: Replace struct  btrace_thread_info::segment.
  2017-05-10 11:46     ` Wiederhake, Tim
@ 2017-05-10 14:13       ` Simon Marchi
  0 siblings, 0 replies; 36+ messages in thread
From: Simon Marchi @ 2017-05-10 14:13 UTC (permalink / raw)
  To: Wiederhake, Tim; +Cc: gdb-patches, Metzger, Markus T

On 2017-05-10 07:46, Wiederhake, Tim wrote:
>> Btw #2, I thing this function could be more efficient (or maybe I 
>> don't
>> understand as well as I think).  If bfun at function entry is in the
>> middle of a long list of segments, it will start from there and 
>> iterate
>> backwards until it hits the first segment.
> 
> Correct so far.
> 
>> But because the same bfun
>> variable is reused, it will iterate forward from the start
> 
> We saved PREV and NEXT beforehand and use BFUN as a temporary variable
> afterwards. The second "for" loop starts at NEXT, which is one past the
> original "middle of the long list of segments".

Ah, hence the need to save next before the first loop.  For some reason 
I thought the second loop was initialized from bfun.  Sorry about that.

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v3 12/12] btrace: Store function segments as objects.
  2017-05-10 11:46     ` Wiederhake, Tim
@ 2017-05-10 14:16       ` Simon Marchi
  0 siblings, 0 replies; 36+ messages in thread
From: Simon Marchi @ 2017-05-10 14:16 UTC (permalink / raw)
  To: Wiederhake, Tim; +Cc: gdb-patches, Metzger, Markus T

On 2017-05-10 07:46, Wiederhake, Tim wrote:
>> > +  struct btrace_function bfun {mfun, fun, 0, 0, 0, NULL, 0, 0, 0, 0,
>> > 0};
>> 
>> I think it would be much better to add a simple constructor to
>> btrace_function.  For the fields that should simply be zero'ed, you 
>> can
>> initialize fields directly, like we do in many other places (e.g. 
>> class
>> inferior).
> 
> Having a proper constructor would definitely be beneficial here.
> Nevertheless, I would do such a change in a separate patch set.

Ok, but in this case I'd still prefer if you initialized the fields 
directly in the class right away.  In the (unlikely) event that we add a 
field in btrace_function, we'll have to think to update this.

Thanks,

Simon

^ permalink raw reply	[flat|nested] 36+ messages in thread

end of thread, other threads:[~2017-05-10 14:16 UTC | newest]

Thread overview: 36+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-09  7:01 [PATCH v3 00/12] btrace: Turn linked list of function call segments into vector Tim Wiederhake
2017-05-09  7:01 ` [PATCH v3 03/12] btrace: Add btinfo to instruction interator Tim Wiederhake
2017-05-09  7:01 ` [PATCH v3 10/12] btrace: Replace struct btrace_thread_info::segment Tim Wiederhake
2017-05-10  4:14   ` Simon Marchi
2017-05-10 11:46     ` Wiederhake, Tim
2017-05-10 14:13       ` Simon Marchi
2017-05-09  7:01 ` [PATCH v3 08/12] btrace: Replace struct btrace_thread_info::up Tim Wiederhake
2017-05-10  3:26   ` Simon Marchi
2017-05-10 11:46     ` Wiederhake, Tim
2017-05-09  7:01 ` [PATCH v3 11/12] btrace: Remove bfun_s vector Tim Wiederhake
2017-05-10  4:27   ` Simon Marchi
2017-05-10 11:46     ` Wiederhake, Tim
2017-05-09  7:01 ` [PATCH v3 05/12] btrace: Use function segment index in insn iterator Tim Wiederhake
2017-05-10  2:20   ` Simon Marchi
2017-05-10 11:46     ` Wiederhake, Tim
2017-05-09  7:01 ` [PATCH v3 04/12] btrace: Use function segment index in call iterator Tim Wiederhake
2017-05-09 12:50   ` Simon Marchi
2017-05-09 13:14     ` Wiederhake, Tim
2017-05-09 14:29       ` Simon Marchi
2017-05-09  7:01 ` [PATCH v3 07/12] btrace: Remove struct btrace_thread_info::{begin,end} Tim Wiederhake
2017-05-10  3:06   ` Simon Marchi
2017-05-09  7:01 ` [PATCH v3 12/12] btrace: Store function segments as objects Tim Wiederhake
2017-05-10  5:10   ` Simon Marchi
2017-05-10 11:46     ` Wiederhake, Tim
2017-05-10 14:16       ` Simon Marchi
2017-05-09  7:01 ` [PATCH v3 01/12] btrace: Use std::vector in struct btrace_thread_information Tim Wiederhake
2017-05-09 12:10   ` Simon Marchi
2017-05-09  7:01 ` [PATCH v3 02/12] btrace: Transfer ownership of pointers Tim Wiederhake
2017-05-09 12:21   ` Simon Marchi
2017-05-09  7:01 ` [PATCH v3 09/12] btrace: Remove struct btrace_thread_info::flow Tim Wiederhake
2017-05-10  3:46   ` Simon Marchi
2017-05-10 11:46     ` Wiederhake, Tim
2017-05-10 13:59       ` Simon Marchi
2017-05-09  7:01 ` [PATCH v3 06/12] btrace: Remove constant arguments Tim Wiederhake
2017-05-10  2:45   ` Simon Marchi
2017-05-10 11:46     ` Wiederhake, Tim

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).