public inbox for gdb-patches@sourceware.org
 help / color / mirror / Atom feed
* [PATCH 08/11] [SQUASH] btrace: Adjust struct btrace_function::{flow,segment}.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (6 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 01/11] btrace: Use struct btrace_thread_info fields directly Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-24  9:33   ` Metzger, Markus T
  2017-02-17 13:27 ` [PATCH 06/11] [SQUASH] btrace: Save function calls in a vector Tim Wiederhake
                   ` (3 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This patch stands alone for easier review and is meant to be squashed together
for committing.  ChangeLog will be added to the squashed commit.


2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>
---
 gdb/btrace.c                  | 80 +++++++++++++++++++++++++------------------
 gdb/btrace.h                  | 20 ++++-------
 gdb/python/py-record-btrace.c |  8 ++---
 gdb/record-btrace.c           |  5 +--
 4 files changed, 60 insertions(+), 53 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 880a703..701daa3 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -231,7 +231,6 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
 
   bfun->msym = mfun;
   bfun->sym = fun;
-  bfun->flow.prev = prev;
 
   if (prev == NULL)
     {
@@ -241,9 +240,6 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
     }
   else
     {
-      gdb_assert (prev->flow.next == NULL);
-      prev->flow.next = bfun;
-
       bfun->number = prev->number + 1;
       bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
       bfun->level = prev->level;
@@ -277,16 +273,24 @@ ftrace_fixup_caller (struct btrace_thread_info *btinfo,
 		     struct btrace_function *caller,
 		     enum btrace_function_flag flags)
 {
-  struct btrace_function *prev, *next;
+  unsigned int prev, next;
 
+  prev = bfun->prev_segment;
+  next = bfun->next_segment;
   ftrace_update_caller (bfun, caller, flags);
 
   /* Update all function segments belonging to the same function.  */
-  for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
-    ftrace_update_caller (prev, caller, flags);
+  for (; prev != 0; prev = bfun->prev_segment)
+    {
+      bfun = ftrace_find_call_by_number (btinfo, prev);
+      ftrace_update_caller (bfun, caller, flags);
+    }
 
-  for (next = bfun->segment.next; next != NULL; next = next->segment.next)
-    ftrace_update_caller (next, caller, flags);
+  for (; next != 0; next = bfun->next_segment)
+    {
+      bfun = ftrace_find_call_by_number (btinfo, next);
+      ftrace_update_caller (bfun, caller, flags);
+    }
 }
 
 /* Add a new function segment for a call.
@@ -408,10 +412,10 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
     {
       /* The caller of PREV is the preceding btrace function segment in this
 	 function instance.  */
-      gdb_assert (caller->segment.next == NULL);
+      gdb_assert (caller->next_segment == 0);
 
-      caller->segment.next = bfun;
-      bfun->segment.prev = caller;
+      caller->next_segment = bfun->number;
+      bfun->prev_segment = caller->number;
 
       /* Maintain the function level.  */
       bfun->level = caller->level;
@@ -682,7 +686,8 @@ ftrace_match_backtrace (struct btrace_thread_info *btinfo,
 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.  */
 
 static void
-ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
+ftrace_fixup_level (struct btrace_thread_info *btinfo,
+		    struct btrace_function *bfun, int adjustment)
 {
   if (adjustment == 0)
     return;
@@ -690,8 +695,11 @@ ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
   DEBUG_FTRACE ("fixup level (%+d)", adjustment);
   ftrace_debug (bfun, "..bfun");
 
-  for (; bfun != NULL; bfun = bfun->flow.next)
-    bfun->level += adjustment;
+  while (bfun != NULL)
+    {
+      bfun->level += adjustment;
+      bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
+    }
 }
 
 /* Recompute the global level offset.  Traverse the function trace and compute
@@ -718,8 +726,11 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
     end = NULL;
 
   level = INT_MAX;
-  for (; bfun != end; bfun = bfun->flow.next)
-    level = std::min (level, bfun->level);
+  while (bfun != end)
+    {
+      level = std::min (level, bfun->level);
+      bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
+    }
 
   DEBUG_FTRACE ("setting global level offset: %d", -level);
   btinfo->level = -level;
@@ -738,14 +749,14 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
   ftrace_debug (next, "..next");
 
   /* The function segments are not yet connected.  */
-  gdb_assert (prev->segment.next == NULL);
-  gdb_assert (next->segment.prev == NULL);
+  gdb_assert (prev->next_segment == 0);
+  gdb_assert (next->prev_segment == 0);
 
-  prev->segment.next = next;
-  next->segment.prev = prev;
+  prev->next_segment = next->number;
+  next->prev_segment = prev->number;
 
   /* We may have moved NEXT to a different function level.  */
-  ftrace_fixup_level (next, prev->level - next->level);
+  ftrace_fixup_level (btinfo, next, prev->level - next->level);
 
   /* If we run out of back trace for one, let's use the other's.  */
   if (prev->up == 0)
@@ -816,7 +827,8 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
 
 		     Otherwise we will fix up CALLER's level when we connect it
 		     to PREV's caller in the next iteration.  */
-		  ftrace_fixup_level (caller, prev->level - caller->level - 1);
+		  ftrace_fixup_level (btinfo, caller,
+				      prev->level - caller->level - 1);
 		  break;
 		}
 
@@ -912,7 +924,7 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
      To catch this, we already fix up the level here where we can start at RHS
      instead of at BEST_R.  We will ignore the level fixup when connecting
      BEST_L to BEST_R as they will already be on the same level.  */
-  ftrace_fixup_level (rhs, best_l->level - best_r->level);
+  ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
 
   ftrace_connect_backtrace (btinfo, best_l, best_r);
 
@@ -925,12 +937,14 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
 static void
 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 {
+  struct btrace_thread_info *btinfo;
   VEC (bfun_s) *remaining;
   struct cleanup *old_chain;
   int min_matches;
 
   DEBUG ("bridge gaps");
 
+  btinfo = &tp->btrace;
   remaining = NULL;
   old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
 
@@ -959,20 +973,20 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 		 all but the leftmost gap in such a sequence.
 
 		 Also ignore gaps at the beginning of the trace.  */
-	      lhs = gap->flow.prev;
+	      lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
 	      if (lhs == NULL || lhs->errcode != 0)
 		continue;
 
 	      /* Skip gaps to the right.  */
-	      for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
-		if (rhs->errcode == 0)
-		  break;
+	      rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
+	      while (rhs != NULL && rhs->errcode != 0)
+		rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
 
 	      /* Ignore gaps at the end of the trace.  */
 	      if (rhs == NULL)
 		continue;
 
-	      bridged = ftrace_bridge_gap (&tp->btrace, lhs, rhs, min_matches);
+	      bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
 
 	      /* Keep track of gaps we were not able to bridge and try again.
 		 If we just pushed them to the end of GAPS we would risk an
@@ -1002,7 +1016,7 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 
   /* We may omit this in some cases.  Not sure it is worth the extra
      complication, though.  */
-  ftrace_compute_global_level_offset (&tp->btrace);
+  ftrace_compute_global_level_offset (btinfo);
 }
 
 /* Compute the function branch trace from BTS trace.  */
@@ -2371,7 +2385,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
 	{
 	  const struct btrace_function *next;
 
-	  next = bfun->flow.next;
+	  next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
 	  if (next == NULL)
 	    break;
 
@@ -2401,7 +2415,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
 	{
 	  const struct btrace_function *next;
 
-	  next = bfun->flow.next;
+	  next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
 	  if (next == NULL)
 	    {
 	      /* We stepped past the last function.
@@ -2450,7 +2464,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
 	{
 	  const struct btrace_function *prev;
 
-	  prev = bfun->flow.prev;
+	  prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
 	  if (prev == NULL)
 	    break;
 
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 2b28ff8..8f8a7fa 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -83,13 +83,6 @@ struct btrace_insn
 typedef struct btrace_insn btrace_insn_s;
 DEF_VEC_O (btrace_insn_s);
 
-/* A doubly-linked list of branch trace function segments.  */
-struct btrace_func_link
-{
-  struct btrace_function *prev;
-  struct btrace_function *next;
-};
-
 /* Flags for btrace function segments.  */
 enum btrace_function_flag
 {
@@ -144,13 +137,12 @@ struct btrace_function
   struct minimal_symbol *msym;
   struct symbol *sym;
 
-  /* The previous and next segment belonging to the same function.
-     If a function calls another function, the former will have at least
-     two segments: one before the call and another after the return.  */
-  struct btrace_func_link segment;
-
-  /* The previous and next function in control flow order.  */
-  struct btrace_func_link flow;
+  /* The function segment numbers of the previous and next segment belonging to
+     the same function.  If a function calls another function, the former will
+     have at least two segments: one before the call and another after the
+     return.  Will be zero if there is no such function segment.  */
+  unsigned int prev_segment;
+  unsigned int next_segment;
 
   /* The function segment number of the directly preceding function segment in
      a (fake) call stack.  Will be zero if there is no such function segment in
diff --git a/gdb/python/py-record-btrace.c b/gdb/python/py-record-btrace.c
index 14ad5b7..2c8132f 100644
--- a/gdb/python/py-record-btrace.c
+++ b/gdb/python/py-record-btrace.c
@@ -478,10 +478,10 @@ btpy_call_prev_sibling (PyObject *self, void *closure)
   if (func == NULL)
     Py_RETURN_NONE;
 
-  if (func->segment.prev == NULL)
+  if (func->prev_segment == 0)
     Py_RETURN_NONE;
 
-  return btpy_call_new (obj->ptid, func->segment.prev->number);
+  return btpy_call_new (obj->ptid, func->prev_segment);
 }
 
 /* Implementation of BtraceFunctionCall.next_sibling [BtraceFunctionCall].
@@ -500,10 +500,10 @@ btpy_call_next_sibling (PyObject *self, void *closure)
   if (func == NULL)
     Py_RETURN_NONE;
 
-  if (func->segment.next == NULL)
+  if (func->next_segment == 0)
     Py_RETURN_NONE;
 
-  return btpy_call_new (obj->ptid, func->segment.next->number);
+  return btpy_call_new (obj->ptid, func->next_segment);
 }
 
 /* Python rich compare function to allow for equality and inequality checks
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 791963c..7ba3844 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1605,8 +1605,9 @@ record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
   bfun = cache->bfun;
   gdb_assert (bfun != NULL);
 
-  while (bfun->segment.prev != NULL)
-    bfun = bfun->segment.prev;
+  while (bfun->prev_segment != 0)
+    bfun = VEC_index (btrace_fun_s, cache->tp->btrace.functions,
+		      bfun->prev_segment - 1);
 
   code = get_frame_func (this_frame);
   special = bfun->number;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 01/11] btrace: Use struct btrace_thread_info fields directly.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (5 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 05/11] btrace: Use function segment index in insn iterator Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-17 13:27 ` [PATCH 08/11] [SQUASH] btrace: Adjust struct btrace_function::{flow,segment} Tim Wiederhake
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This will later allow to remove BEGIN and END fields.

2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (btrace_compute_ftrace_bts, ftrace_add_pt): Use struct
	btrace_thread_info fields directly.
	(btrace_compute_ftrace_pt): Adjusted for change in ftrace_add_pt.


---
 gdb/btrace.c | 94 +++++++++++++++++++++++++++---------------------------------
 1 file changed, 43 insertions(+), 51 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 95dc7ab..14a16a2 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -976,16 +976,13 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 			   VEC (bfun_s) **gaps)
 {
   struct btrace_thread_info *btinfo;
-  struct btrace_function *begin, *end;
   struct gdbarch *gdbarch;
   unsigned int blk;
   int level;
 
   gdbarch = target_gdbarch ();
   btinfo = &tp->btrace;
-  begin = btinfo->begin;
-  end = btinfo->end;
-  level = begin != NULL ? -btinfo->level : INT_MAX;
+  level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
   blk = VEC_length (btrace_block_s, btrace->blocks);
 
   while (blk != 0)
@@ -1007,27 +1004,27 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  if (block->end < pc)
 	    {
 	      /* Indicate the gap in the trace.  */
-	      end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
-	      if (begin == NULL)
-		begin = end;
+	      btinfo->end = ftrace_new_gap (btinfo->end, BDE_BTS_OVERFLOW);
+	      if (btinfo->begin == NULL)
+		btinfo->begin = btinfo->end;
 
-	      VEC_safe_push (bfun_s, *gaps, end);
+	      VEC_safe_push (bfun_s, *gaps, btinfo->end);
 
 	      warning (_("Recorded trace may be corrupted at instruction "
-			 "%u (pc = %s)."), end->insn_offset - 1,
+			 "%u (pc = %s)."), btinfo->end->insn_offset - 1,
 		       core_addr_to_string_nz (pc));
 
 	      break;
 	    }
 
-	  end = ftrace_update_function (end, pc);
-	  if (begin == NULL)
-	    begin = end;
+	  btinfo->end = ftrace_update_function (btinfo->end, pc);
+	  if (btinfo->begin == NULL)
+	    btinfo->begin = btinfo->end;
 
 	  /* Maintain the function level offset.
 	     For all but the last block, we do it here.  */
 	  if (blk != 0)
-	    level = std::min (level, end->level);
+	    level = std::min (level, btinfo->end->level);
 
 	  size = 0;
 	  TRY
@@ -1044,7 +1041,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  insn.iclass = ftrace_classify_insn (gdbarch, pc);
 	  insn.flags = 0;
 
-	  ftrace_update_insns (end, &insn);
+	  ftrace_update_insns (btinfo->end, &insn);
 
 	  /* We're done once we pushed the instruction at the end.  */
 	  if (block->end == pc)
@@ -1055,12 +1052,12 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	    {
 	      /* Indicate the gap in the trace.  We just added INSN so we're
 		 not at the beginning.  */
-	      end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
+	      btinfo->end = ftrace_new_gap (btinfo->end, BDE_BTS_INSN_SIZE);
 
-	      VEC_safe_push (bfun_s, *gaps, end);
+	      VEC_safe_push (bfun_s, *gaps, btinfo->end);
 
 	      warning (_("Recorded trace may be incomplete at instruction %u "
-			 "(pc = %s)."), end->insn_offset - 1,
+			 "(pc = %s)."), btinfo->end->insn_offset - 1,
 		       core_addr_to_string_nz (pc));
 
 	      break;
@@ -1075,13 +1072,10 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	     and is not really part of the execution history, it shouldn't
 	     affect the level.  */
 	  if (blk == 0)
-	    level = std::min (level, end->level);
+	    level = std::min (level, btinfo->end->level);
 	}
     }
 
-  btinfo->begin = begin;
-  btinfo->end = end;
-
   /* LEVEL is the minimal function level of all btrace function segments.
      Define the global level offset to -LEVEL so all function levels are
      normalized to start at zero.  */
@@ -1126,16 +1120,13 @@ pt_btrace_insn_flags (const struct pt_insn *insn)
 
 static void
 ftrace_add_pt (struct pt_insn_decoder *decoder,
-	       struct btrace_function **pbegin,
-	       struct btrace_function **pend, int *plevel,
-	       VEC (bfun_s) **gaps)
+	       struct btrace_thread_info *btinfo,
+	       int *plevel, VEC (bfun_s) **gaps)
 {
-  struct btrace_function *begin, *end, *upd;
+  struct btrace_function *upd;
   uint64_t offset;
   int errcode;
 
-  begin = *pbegin;
-  end = *pend;
   for (;;)
     {
       struct btrace_insn btinsn;
@@ -1158,7 +1149,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	    break;
 
 	  /* Look for gaps in the trace - unless we're at the beginning.  */
-	  if (begin != NULL)
+	  if (btinfo->begin != NULL)
 	    {
 	      /* Tracing is disabled and re-enabled each time we enter the
 		 kernel.  Most times, we continue from the same instruction we
@@ -1167,69 +1158,70 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 		 from some other instruction.  Indicate this as a trace gap.  */
 	      if (insn.enabled)
 		{
-		  *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
+		  btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_DISABLED);
 
-		  VEC_safe_push (bfun_s, *gaps, end);
+		  VEC_safe_push (bfun_s, *gaps, btinfo->end);
 
 		  pt_insn_get_offset (decoder, &offset);
 
 		  warning (_("Non-contiguous trace at instruction %u (offset "
 			     "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
-			   end->insn_offset - 1, offset, insn.ip);
+			   btinfo->end->insn_offset - 1, offset, insn.ip);
 		}
 	    }
 
 	  /* Indicate trace overflows.  */
 	  if (insn.resynced)
 	    {
-	      *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
-	      if (begin == NULL)
-		*pbegin = begin = end;
+	      btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_OVERFLOW);
+	      if (btinfo->begin == NULL)
+		btinfo->begin = btinfo->end;
 
-	      VEC_safe_push (bfun_s, *gaps, end);
+	      VEC_safe_push (bfun_s, *gaps, btinfo->end);
 
 	      pt_insn_get_offset (decoder, &offset);
 
 	      warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
-			 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
-		       offset, insn.ip);
+			 ", pc = 0x%" PRIx64 ")."),
+		       btinfo->end->insn_offset - 1, offset, insn.ip);
 	    }
 
-	  upd = ftrace_update_function (end, insn.ip);
-	  if (upd != end)
+	  upd = ftrace_update_function (btinfo->end, insn.ip);
+	  if (upd != btinfo->end)
 	    {
-	      *pend = end = upd;
+	      btinfo->end = upd;
 
-	      if (begin == NULL)
-		*pbegin = begin = upd;
+	      if (btinfo->begin == NULL)
+		btinfo->begin = upd;
 	    }
 
 	  /* Maintain the function level offset.  */
-	  *plevel = std::min (*plevel, end->level);
+	  *plevel = std::min (*plevel, btinfo->end->level);
 
 	  btinsn.pc = (CORE_ADDR) insn.ip;
 	  btinsn.size = (gdb_byte) insn.size;
 	  btinsn.iclass = pt_reclassify_insn (insn.iclass);
 	  btinsn.flags = pt_btrace_insn_flags (&insn);
 
-	  ftrace_update_insns (end, &btinsn);
+	  ftrace_update_insns (btinfo->end, &btinsn);
 	}
 
       if (errcode == -pte_eos)
 	break;
 
       /* Indicate the gap in the trace.  */
-      *pend = end = ftrace_new_gap (end, errcode);
-      if (begin == NULL)
-	*pbegin = begin = end;
+      btinfo->end = ftrace_new_gap (btinfo->end, errcode);
+      if (btinfo->begin == NULL)
+	btinfo->begin = btinfo->end;
 
-      VEC_safe_push (bfun_s, *gaps, end);
+      VEC_safe_push (bfun_s, *gaps, btinfo->end);
 
       pt_insn_get_offset (decoder, &offset);
 
       warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
-		 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
-	       offset, insn.ip, pt_errstr (pt_errcode (errcode)));
+		 ", pc = 0x%" PRIx64 "): %s."), errcode,
+	       btinfo->end->insn_offset - 1, offset, insn.ip,
+	       pt_errstr (pt_errcode (errcode)));
     }
 }
 
@@ -1343,7 +1335,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
 	error (_("Failed to configure the Intel Processor Trace decoder: "
 		 "%s."), pt_errstr (pt_errcode (errcode)));
 
-      ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
+      ftrace_add_pt (decoder, btinfo, &level, gaps);
     }
   CATCH (error, RETURN_MASK_ALL)
     {
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 10/11] [SQUASH] btrace: Remove bfun_s vector.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
  2017-02-17 13:27 ` [PATCH 07/11] [SQUASH] btrace: Adjust struct btrace_function::up Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-17 13:27 ` [PATCH 04/11] btrace: Use function segment index in call iterator Tim Wiederhake
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This patch stands alone for easier review and is meant to be squashed together
for committing.  ChangeLog will be added to the squashed commit.


2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>
---
 gdb/btrace.c | 101 +++++++++++++++++++++--------------------------------------
 1 file changed, 35 insertions(+), 66 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index cd2475d..859c87f 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -38,6 +38,7 @@
 #include <inttypes.h>
 #include <ctype.h>
 #include <algorithm>
+#include <vector>
 
 /* Command lists for btrace maintenance commands.  */
 static struct cmd_list_element *maint_btrace_cmdlist;
@@ -49,10 +50,6 @@ static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
 /* Control whether to skip PAD packets when computing the packet history.  */
 static int maint_btrace_pt_skip_pad = 1;
 
-/* A vector of function segments.  */
-typedef struct btrace_function * bfun_s;
-DEF_VEC_P (bfun_s);
-
 static void btrace_add_pc (struct thread_info *tp);
 
 /* Print a record debug message.  Use do ... while (0) to avoid ambiguities
@@ -498,7 +495,8 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
    ERRCODE is the format-specific error code.  */
 
 static struct btrace_function *
-ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
+ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
+		std::vector<unsigned int> &gaps)
 {
   struct btrace_function *bfun;
 
@@ -513,6 +511,7 @@ ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
     }
 
   bfun->errcode = errcode;
+  gaps.push_back (bfun->number);
 
   ftrace_debug (bfun, "new gap");
 
@@ -937,18 +936,15 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
    function segments that are separated by the gap.  */
 
 static void
-btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
+btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
 {
   struct btrace_thread_info *btinfo;
-  VEC (bfun_s) *remaining;
-  struct cleanup *old_chain;
+  std::vector<unsigned int> remaining;
   int min_matches;
 
   DEBUG ("bridge gaps");
 
   btinfo = &tp->btrace;
-  remaining = NULL;
-  old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
 
   /* We require a minimum amount of matches for bridging a gap.  The number of
      required matches will be lowered with each iteration.
@@ -960,16 +956,15 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
     {
       /* Let's try to bridge as many gaps as we can.  In some cases, we need to
 	 skip a gap and revisit it again after we closed later gaps.  */
-      while (!VEC_empty (bfun_s, *gaps))
+      while (!gaps.empty ())
 	{
-	  struct btrace_function *gap;
-	  unsigned int idx;
-
-	  for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
+	  for (auto& number : gaps)
 	    {
-	      struct btrace_function *lhs, *rhs;
+	      struct btrace_function *gap, *lhs, *rhs;
 	      int bridged;
 
+	      gap = ftrace_find_call_by_number (btinfo, number);
+
 	      /* We may have a sequence of gaps if we run from one error into
 		 the next as we try to re-sync onto the trace stream.  Ignore
 		 all but the leftmost gap in such a sequence.
@@ -994,28 +989,24 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 		 If we just pushed them to the end of GAPS we would risk an
 		 infinite loop in case we simply cannot bridge a gap.  */
 	      if (bridged == 0)
-		VEC_safe_push (bfun_s, remaining, gap);
+		remaining.push_back (number);
 	    }
 
 	  /* Let's see if we made any progress.  */
-	  if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
+	  if (remaining.size () == gaps.size ())
 	    break;
 
-	  VEC_free (bfun_s, *gaps);
-
-	  *gaps = remaining;
-	  remaining = NULL;
+	  gaps.clear ();
+	  gaps.swap (remaining);
 	}
 
       /* We get here if either GAPS is empty or if GAPS equals REMAINING.  */
-      if (VEC_empty (bfun_s, *gaps))
+      if (gaps.empty ())
 	break;
 
-      VEC_free (bfun_s, remaining);
+      remaining.clear ();
     }
 
-  do_cleanups (old_chain);
-
   /* We may omit this in some cases.  Not sure it is worth the extra
      complication, though.  */
   ftrace_compute_global_level_offset (btinfo);
@@ -1026,7 +1017,7 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 static void
 btrace_compute_ftrace_bts (struct thread_info *tp,
 			   const struct btrace_data_bts *btrace,
-			   VEC (bfun_s) **gaps)
+			   std::vector<unsigned int> &gaps)
 {
   struct btrace_thread_info *btinfo;
   struct gdbarch *gdbarch;
@@ -1062,9 +1053,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  if (block->end < pc)
 	    {
 	      /* Indicate the gap in the trace.  */
-	      bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
-
-	      VEC_safe_push (bfun_s, *gaps, bfun);
+	      bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
 
 	      warning (_("Recorded trace may be corrupted at instruction "
 			 "%u (pc = %s)."), bfun->insn_offset - 1,
@@ -1106,9 +1095,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	    {
 	      /* Indicate the gap in the trace.  We just added INSN so we're
 		 not at the beginning.  */
-	      bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
-
-	      VEC_safe_push (bfun_s, *gaps, bfun);
+	      bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
 
 	      warning (_("Recorded trace may be incomplete at instruction %u "
 			 "(pc = %s)."), bfun->insn_offset - 1,
@@ -1175,7 +1162,7 @@ pt_btrace_insn_flags (const struct pt_insn *insn)
 static void
 ftrace_add_pt (struct pt_insn_decoder *decoder,
 	       struct btrace_thread_info *btinfo,
-	       int *plevel, VEC (bfun_s) **gaps)
+	       int *plevel, std::vector<unsigned int> &gaps)
 {
   struct btrace_function *bfun, *upd;
   uint64_t offset;
@@ -1212,9 +1199,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 		 from some other instruction.  Indicate this as a trace gap.  */
 	      if (insn.enabled)
 		{
-		  bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
-
-		  VEC_safe_push (bfun_s, *gaps, bfun);
+		  bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
 
 		  pt_insn_get_offset (decoder, &offset);
 
@@ -1227,9 +1212,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	  /* Indicate trace overflows.  */
 	  if (insn.resynced)
 	    {
-	      bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
-
-	      VEC_safe_push (bfun_s, *gaps, bfun);
+	      bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
 
 	      pt_insn_get_offset (decoder, &offset);
 
@@ -1255,9 +1238,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	break;
 
       /* Indicate the gap in the trace.  */
-      bfun = ftrace_new_gap (btinfo, errcode);
-
-      VEC_safe_push (bfun_s, *gaps, bfun);
+      bfun = ftrace_new_gap (btinfo, errcode, gaps);
 
       pt_insn_get_offset (decoder, &offset);
 
@@ -1334,7 +1315,7 @@ static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
 static void
 btrace_compute_ftrace_pt (struct thread_info *tp,
 			  const struct btrace_data_pt *btrace,
-			  VEC (bfun_s) **gaps)
+			  std::vector<unsigned int> &gaps)
 {
   struct btrace_thread_info *btinfo;
   struct pt_insn_decoder *decoder;
@@ -1388,13 +1369,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
       /* Indicate a gap in the trace if we quit trace processing.  */
       if (error.reason == RETURN_QUIT && !VEC_empty (btrace_fun_s,
 						     btinfo->functions))
-	{
-	  struct btrace_function *bfun;
-
-	  bfun = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
-
-	  VEC_safe_push (bfun_s, *gaps, bfun);
-	}
+	ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
 
       btrace_finalize_ftrace_pt (decoder, tp, level);
 
@@ -1410,7 +1385,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
 static void
 btrace_compute_ftrace_pt (struct thread_info *tp,
 			  const struct btrace_data_pt *btrace,
-			  VEC (bfun_s) **gaps)
+			  std::vector<unsigned int> &gaps)
 {
   internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
 }
@@ -1422,7 +1397,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
 
 static void
 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
-			 VEC (bfun_s) **gaps)
+			 std::vector<unsigned int> &gaps)
 {
   DEBUG ("compute ftrace");
 
@@ -1444,11 +1419,11 @@ btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
 }
 
 static void
-btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
+btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
 {
-  if (!VEC_empty (bfun_s, *gaps))
+  if (!gaps.empty ())
     {
-      tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
+      tp->btrace.ngaps += gaps.size ();
       btrace_bridge_gaps (tp, gaps);
     }
 }
@@ -1456,27 +1431,21 @@ btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
 static void
 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
 {
-  VEC (bfun_s) *gaps;
-  struct cleanup *old_chain;
-
-  gaps = NULL;
-  old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
+  std::vector<unsigned int> gaps;
 
   TRY
     {
-      btrace_compute_ftrace_1 (tp, btrace, &gaps);
+      btrace_compute_ftrace_1 (tp, btrace, gaps);
     }
   CATCH (error, RETURN_MASK_ALL)
     {
-      btrace_finalize_ftrace (tp, &gaps);
+      btrace_finalize_ftrace (tp, gaps);
 
       throw_exception (error);
     }
   END_CATCH
 
-  btrace_finalize_ftrace (tp, &gaps);
-
-  do_cleanups (old_chain);
+  btrace_finalize_ftrace (tp, gaps);
 }
 
 /* Add an entry for the current PC.  */
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 05/11] btrace: Use function segment index in insn iterator.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (4 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 11/11] [SQUASH] btrace: Cleanup Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-24  9:32   ` Metzger, Markus T
  2017-02-17 13:27 ` [PATCH 01/11] btrace: Use struct btrace_thread_info fields directly Tim Wiederhake
                   ` (5 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog
	* btrace.c: (btrace_insn_get, btrace_insn_get_error, btrace_insn_number,
	btrace_insn_begin, btrace_insn_end, btrace_insn_next, btrace_insn_prev,
	btrace_find_insn_by_number): Replaced function segment pointer with
	index.
	(btrace_insn_cmp): Simplify.
	* btrace.h: (struct btrace_insn_iterator) Renamed index to
	insn_index.  Replaced function segment pointer with index into function
	segment vector.
	* record-btrace.c (record_btrace_call_history): Replaced function
	segment pointer use with index.
	(record_btrace_frame_sniffer): Retrieve function call segment through
	vector.
	(record_btrace_set_replay): Remove defunc't safety check.

---
 gdb/btrace.c        | 59 +++++++++++++++++++++++++++++++++--------------------
 gdb/btrace.h        |  8 ++++----
 gdb/record-btrace.c |  7 ++++---
 3 files changed, 45 insertions(+), 29 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 31590ce..1e110cc 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -2236,8 +2236,8 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
   const struct btrace_function *bfun;
   unsigned int index, end;
 
-  index = it->index;
-  bfun = it->function;
+  index = it->insn_index;
+  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
 
   /* Check if the iterator points to a gap in the trace.  */
   if (bfun->errcode != 0)
@@ -2256,7 +2256,10 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
 int
 btrace_insn_get_error (const struct btrace_insn_iterator *it)
 {
-  return it->function->errcode;
+  const struct btrace_function *bfun;
+
+  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
+  return bfun->errcode;
 }
 
 /* See btrace.h.  */
@@ -2264,7 +2267,10 @@ btrace_insn_get_error (const struct btrace_insn_iterator *it)
 unsigned int
 btrace_insn_number (const struct btrace_insn_iterator *it)
 {
-  return it->function->insn_offset + it->index;
+  const struct btrace_function *bfun;
+
+  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
+  return bfun->insn_offset + it->insn_index;
 }
 
 /* See btrace.h.  */
@@ -2280,8 +2286,8 @@ btrace_insn_begin (struct btrace_insn_iterator *it,
     error (_("No trace."));
 
   it->btinfo = btinfo;
-  it->function = bfun;
-  it->index = 0;
+  it->call_index = 0;
+  it->insn_index = 0;
 }
 
 /* See btrace.h.  */
@@ -2306,8 +2312,8 @@ btrace_insn_end (struct btrace_insn_iterator *it,
     length -= 1;
 
   it->btinfo = btinfo;
-  it->function = bfun;
-  it->index = length;
+  it->call_index = bfun->number - 1;
+  it->insn_index = length;
 }
 
 /* See btrace.h.  */
@@ -2318,9 +2324,9 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
   const struct btrace_function *bfun;
   unsigned int index, steps;
 
-  bfun = it->function;
+  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
   steps = 0;
-  index = it->index;
+  index = it->insn_index;
 
   while (stride != 0)
     {
@@ -2386,8 +2392,8 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
     }
 
   /* Update the iterator.  */
-  it->function = bfun;
-  it->index = index;
+  it->call_index = bfun->number - 1;
+  it->insn_index = index;
 
   return steps;
 }
@@ -2400,9 +2406,9 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
   const struct btrace_function *bfun;
   unsigned int index, steps;
 
-  bfun = it->function;
+  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
   steps = 0;
-  index = it->index;
+  index = it->insn_index;
 
   while (stride != 0)
     {
@@ -2444,8 +2450,8 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
     }
 
   /* Update the iterator.  */
-  it->function = bfun;
-  it->index = index;
+  it->call_index = bfun->number - 1;
+  it->insn_index = index;
 
   return steps;
 }
@@ -2456,12 +2462,21 @@ int
 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
 		 const struct btrace_insn_iterator *rhs)
 {
-  unsigned int lnum, rnum;
+  gdb_assert (lhs->btinfo == rhs->btinfo);
 
-  lnum = btrace_insn_number (lhs);
-  rnum = btrace_insn_number (rhs);
+  if (lhs->call_index > rhs->call_index)
+    return 1;
+
+  if (lhs->call_index < rhs->call_index)
+    return -1;
+
+  if (lhs->insn_index > rhs->insn_index)
+    return 1;
+
+  if (lhs->insn_index < rhs->insn_index)
+    return -1;
 
-  return (int) (lnum - rnum);
+  return 0;
 }
 
 /* See btrace.h.  */
@@ -2510,8 +2525,8 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
     }
 
   it->btinfo = btinfo;
-  it->function = bfun;
-  it->index = number - bfun->insn_offset;
+  it->call_index = bfun->number - 1;
+  it->insn_index = number - bfun->insn_offset;
   return 1;
 }
 
diff --git a/gdb/btrace.h b/gdb/btrace.h
index c49b114..53df6e9 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -196,12 +196,12 @@ struct btrace_insn_iterator
   /* The branch trace information for this thread.  Will never be NULL.  */
   const struct btrace_thread_info *btinfo;
 
-  /* The branch trace function segment containing the instruction.
-     Will never be NULL.  */
-  const struct btrace_function *function;
+  /* The index of the function call segment in struct btrace_thread_info's
+     FUNCTIONS vector.  Note that index + 1 == number.  */
+  unsigned int call_index;
 
   /* The index into the function segment's instruction vector.  */
-  unsigned int index;
+  unsigned int insn_index;
 };
 
 /* A branch trace function call iterator.  */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index ba83be0..83e65e7 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1111,7 +1111,7 @@ record_btrace_call_history (struct target_ops *self, int size, int int_flags)
       if (replay != NULL)
 	{
 	  begin.btinfo = btinfo;
-	  begin.call_index = replay->function->number - 1;
+	  begin.call_index = replay->call_index;
 	}
       else
 	btrace_call_end (&begin, btinfo);
@@ -1692,7 +1692,8 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
 
       replay = tp->btrace.replay;
       if (replay != NULL)
-	bfun = replay->function;
+	bfun = VEC_index (btrace_fun_p, tp->btrace.functions,
+			  replay->call_index);
     }
   else
     {
@@ -2705,7 +2706,7 @@ record_btrace_set_replay (struct thread_info *tp,
 
   btinfo = &tp->btrace;
 
-  if (it == NULL || it->function == NULL)
+  if (it == NULL)
     record_btrace_stop_replaying (tp);
   else
     {
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 09/11] [SQUASH] btrace: Remove struct btrace_thread_info::{begin,end}.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (8 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 06/11] [SQUASH] btrace: Save function calls in a vector Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-17 13:27 ` [PATCH 02/11] btrace: Change parameters to use btrace_thread_info Tim Wiederhake
  2017-02-24  9:32 ` [PATCH 00/11] btrace: Turn linked list of function call segments into vector Metzger, Markus T
  11 siblings, 0 replies; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This patch stands alone for easier review and is meant to be squashed together
for committing.  ChangeLog will be added to the squashed commit.


2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>
---
 gdb/btrace.c        | 159 +++++++++++++++++++++++++---------------------------
 gdb/btrace.h        |   1 -
 gdb/record-btrace.c |   2 +-
 3 files changed, 77 insertions(+), 85 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 701daa3..cd2475d 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -402,8 +402,8 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 {
   struct btrace_function *prev, *bfun, *caller;
 
-  prev = btinfo->end;
   bfun = ftrace_new_function (btinfo, mfun, fun);
+  prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
 
   /* It is important to start at PREV's caller.  Otherwise, we might find
      PREV itself, if PREV is a recursive function.  */
@@ -500,16 +500,17 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
 static struct btrace_function *
 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
 {
-  struct btrace_function *prev, *bfun;
-
-  prev = btinfo->end;
+  struct btrace_function *bfun;
 
-  /* We hijack prev if it was empty.  */
-  if (prev != NULL && prev->errcode == 0
-      && VEC_empty (btrace_insn_s, prev->insn))
-    bfun = prev;
-  else
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     bfun = ftrace_new_function (btinfo, NULL, NULL);
+  else
+    {
+      /* We hijack the previous function call segment if it was empty.  */
+      bfun = VEC_last (btrace_fun_s, btinfo->functions);
+      if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
+	bfun = ftrace_new_function (btinfo, NULL, NULL);
+    }
 
   bfun->errcode = errcode;
 
@@ -531,8 +532,6 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
   struct btrace_insn *last;
   struct btrace_function *bfun;
 
-  bfun = btinfo->end;
-
   /* Try to determine the function we're in.  We use both types of symbols
      to avoid surprises when we sometimes get a full symbol and sometimes
      only a minimal symbol.  */
@@ -543,8 +542,13 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
   if (fun == NULL && mfun == NULL)
     DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
 
-  /* If we didn't have a function or if we had a gap before, we create one.  */
-  if (bfun == NULL || bfun->errcode != 0)
+  /* If we didn't have a function, we create one.  */
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
+    return ftrace_new_function (btinfo, mfun, fun);
+
+  /* If we had a gap before, we create a function.  */
+  bfun = VEC_last (btrace_fun_s, btinfo->functions);
+  if (bfun->errcode != 0)
     return ftrace_new_function (btinfo, mfun, fun);
 
   /* Check the last instruction, if we have one.
@@ -627,7 +631,7 @@ static void
 ftrace_update_insns (struct btrace_thread_info *btinfo,
 		     const struct btrace_insn *insn)
 {
-  struct btrace_function *bfun = btinfo->end;
+  struct btrace_function *bfun = VEC_last (btrace_fun_s, btinfo->functions);
 
   VEC_safe_push (btrace_insn_s, bfun->insn, insn);
 
@@ -708,28 +712,26 @@ ftrace_fixup_level (struct btrace_thread_info *btinfo,
 static void
 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
 {
-  struct btrace_function *bfun, *end;
-  int level;
+  struct btrace_function *bfun;
+  int i, length, level;
 
   if (btinfo == NULL)
     return;
 
-  bfun = btinfo->begin;
-  if (bfun == NULL)
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     return;
 
-  /* The last function segment contains the current instruction, which is not
-     really part of the trace.  If it contains just this one instruction, we
-     stop when we reach it; otherwise, we let the below loop run to the end.  */
-  end = btinfo->end;
-  if (VEC_length (btrace_insn_s, end->insn) > 1)
-    end = NULL;
-
   level = INT_MAX;
-  while (bfun != end)
+  length = VEC_length (btrace_fun_s, btinfo->functions);
+  for (i = 0; VEC_iterate (btrace_fun_s, btinfo->functions, i, bfun); i++)
     {
+      /* The last function segment contains the current instruction, which is
+	 not really part of the trace.  If it contains just this one
+	 instruction, we ignore the segment.  */
+      if (bfun->number == length && VEC_length (btrace_insn_s, bfun->insn) == 1)
+	  continue;
+
       level = std::min (level, bfun->level);
-      bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
     }
 
   DEBUG_FTRACE ("setting global level offset: %d", -level);
@@ -1033,9 +1035,13 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 
   gdbarch = target_gdbarch ();
   btinfo = &tp->btrace;
-  level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
   blk = VEC_length (btrace_block_s, btrace->blocks);
 
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
+    level = INT_MAX;
+  else
+    level = -btinfo->level;
+
   while (blk != 0)
     {
       btrace_block_s *block;
@@ -1048,6 +1054,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 
       for (;;)
 	{
+	  struct btrace_function *bfun;
 	  struct btrace_insn insn;
 	  int size;
 
@@ -1055,27 +1062,23 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  if (block->end < pc)
 	    {
 	      /* Indicate the gap in the trace.  */
-	      btinfo->end = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
-	      if (btinfo->begin == NULL)
-		btinfo->begin = btinfo->end;
+	      bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
 
-	      VEC_safe_push (bfun_s, *gaps, btinfo->end);
+	      VEC_safe_push (bfun_s, *gaps, bfun);
 
 	      warning (_("Recorded trace may be corrupted at instruction "
-			 "%u (pc = %s)."), btinfo->end->insn_offset - 1,
+			 "%u (pc = %s)."), bfun->insn_offset - 1,
 		       core_addr_to_string_nz (pc));
 
 	      break;
 	    }
 
-	  btinfo->end = ftrace_update_function (btinfo, pc);
-	  if (btinfo->begin == NULL)
-	    btinfo->begin = btinfo->end;
+	  bfun = ftrace_update_function (btinfo, pc);
 
 	  /* Maintain the function level offset.
 	     For all but the last block, we do it here.  */
 	  if (blk != 0)
-	    level = std::min (level, btinfo->end->level);
+	    level = std::min (level, bfun->level);
 
 	  size = 0;
 	  TRY
@@ -1103,12 +1106,12 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	    {
 	      /* Indicate the gap in the trace.  We just added INSN so we're
 		 not at the beginning.  */
-	      btinfo->end = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
+	      bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
 
-	      VEC_safe_push (bfun_s, *gaps, btinfo->end);
+	      VEC_safe_push (bfun_s, *gaps, bfun);
 
 	      warning (_("Recorded trace may be incomplete at instruction %u "
-			 "(pc = %s)."), btinfo->end->insn_offset - 1,
+			 "(pc = %s)."), bfun->insn_offset - 1,
 		       core_addr_to_string_nz (pc));
 
 	      break;
@@ -1123,7 +1126,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	     and is not really part of the execution history, it shouldn't
 	     affect the level.  */
 	  if (blk == 0)
-	    level = std::min (level, btinfo->end->level);
+	    level = std::min (level, bfun->level);
 	}
     }
 
@@ -1174,7 +1177,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	       struct btrace_thread_info *btinfo,
 	       int *plevel, VEC (bfun_s) **gaps)
 {
-  struct btrace_function *upd;
+  struct btrace_function *bfun, *upd;
   uint64_t offset;
   int errcode;
 
@@ -1200,7 +1203,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	    break;
 
 	  /* Look for gaps in the trace - unless we're at the beginning.  */
-	  if (btinfo->begin != NULL)
+	  if (!VEC_empty (btrace_fun_s, btinfo->functions))
 	    {
 	      /* Tracing is disabled and re-enabled each time we enter the
 		 kernel.  Most times, we continue from the same instruction we
@@ -1209,45 +1212,36 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 		 from some other instruction.  Indicate this as a trace gap.  */
 	      if (insn.enabled)
 		{
-		  btinfo->end = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
+		  bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
 
-		  VEC_safe_push (bfun_s, *gaps, btinfo->end);
+		  VEC_safe_push (bfun_s, *gaps, bfun);
 
 		  pt_insn_get_offset (decoder, &offset);
 
 		  warning (_("Non-contiguous trace at instruction %u (offset "
 			     "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
-			   btinfo->end->insn_offset - 1, offset, insn.ip);
+			   bfun->insn_offset - 1, offset, insn.ip);
 		}
 	    }
 
 	  /* Indicate trace overflows.  */
 	  if (insn.resynced)
 	    {
-	      btinfo->end = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
-	      if (btinfo->begin == NULL)
-		btinfo->begin = btinfo->end;
+	      bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
 
-	      VEC_safe_push (bfun_s, *gaps, btinfo->end);
+	      VEC_safe_push (bfun_s, *gaps, bfun);
 
 	      pt_insn_get_offset (decoder, &offset);
 
 	      warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
 			 ", pc = 0x%" PRIx64 ")."),
-		       btinfo->end->insn_offset - 1, offset, insn.ip);
+		       bfun->insn_offset - 1, offset, insn.ip);
 	    }
 
-	  upd = ftrace_update_function (btinfo, insn.ip);
-	  if (upd != btinfo->end)
-	    {
-	      btinfo->end = upd;
-
-	      if (btinfo->begin == NULL)
-		btinfo->begin = upd;
-	    }
+	  bfun = ftrace_update_function (btinfo, insn.ip);
 
 	  /* Maintain the function level offset.  */
-	  *plevel = std::min (*plevel, btinfo->end->level);
+	  *plevel = std::min (*plevel, bfun->level);
 
 	  btinsn.pc = (CORE_ADDR) insn.ip;
 	  btinsn.size = (gdb_byte) insn.size;
@@ -1261,17 +1255,15 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	break;
 
       /* Indicate the gap in the trace.  */
-      btinfo->end = ftrace_new_gap (btinfo, errcode);
-      if (btinfo->begin == NULL)
-	btinfo->begin = btinfo->end;
+      bfun = ftrace_new_gap (btinfo, errcode);
 
-      VEC_safe_push (bfun_s, *gaps, btinfo->end);
+      VEC_safe_push (bfun_s, *gaps, bfun);
 
       pt_insn_get_offset (decoder, &offset);
 
       warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
 		 ", pc = 0x%" PRIx64 "): %s."), errcode,
-	       btinfo->end->insn_offset - 1, offset, insn.ip,
+	       bfun->insn_offset - 1, offset, insn.ip,
 	       pt_errstr (pt_errcode (errcode)));
     }
 }
@@ -1353,7 +1345,10 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
     return;
 
   btinfo = &tp->btrace;
-  level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
+    level = INT_MAX;
+  else
+    level = -btinfo->level;
 
   pt_config_init(&config);
   config.begin = btrace->data;
@@ -1391,11 +1386,14 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
   CATCH (error, RETURN_MASK_ALL)
     {
       /* Indicate a gap in the trace if we quit trace processing.  */
-      if (error.reason == RETURN_QUIT && btinfo->end != NULL)
+      if (error.reason == RETURN_QUIT && !VEC_empty (btrace_fun_s,
+						     btinfo->functions))
 	{
-	  btinfo->end = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
+	  struct btrace_function *bfun;
+
+	  bfun = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
 
-	  VEC_safe_push (bfun_s, *gaps, btinfo->end);
+	  VEC_safe_push (bfun_s, *gaps, bfun);
 	}
 
       btrace_finalize_ftrace_pt (decoder, tp, level);
@@ -1627,8 +1625,9 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
   btrace_block_s *first_new_block;
 
   btinfo = &tp->btrace;
-  last_bfun = btinfo->end;
-  gdb_assert (last_bfun != NULL);
+  gdb_assert (!VEC_empty (btrace_fun_s, btinfo->functions));
+
+  last_bfun = VEC_last (btrace_fun_s, btinfo->functions);
   gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
 
   /* If the existing trace ends with a gap, we just glue the traces
@@ -1695,7 +1694,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
      of just that one instruction.  If we remove it, we might turn the now
      empty btrace function segment into a gap.  But we don't want gaps at
      the beginning.  To avoid this, we remove the entire old trace.  */
-  if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
+  if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
     btrace_clear (tp);
 
   return 0;
@@ -1857,7 +1856,7 @@ btrace_fetch (struct thread_info *tp)
   cleanup = make_cleanup_btrace_data (&btrace);
 
   /* Let's first try to extend the trace we already have.  */
-  if (btinfo->end != NULL)
+  if (!VEC_empty (btrace_fun_s, btinfo->functions))
     {
       errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
       if (errcode == 0)
@@ -1933,9 +1932,6 @@ btrace_clear (struct thread_info *tp)
     }
 
   VEC_truncate (btrace_fun_s, btinfo->functions, 0);
-
-  btinfo->begin = NULL;
-  btinfo->end = NULL;
   btinfo->ngaps = 0;
 
   /* Must clear the maint data before - it depends on BTINFO->DATA.  */
@@ -2324,10 +2320,7 @@ void
 btrace_insn_begin (struct btrace_insn_iterator *it,
 		   const struct btrace_thread_info *btinfo)
 {
-  const struct btrace_function *bfun;
-
-  bfun = btinfo->begin;
-  if (bfun == NULL)
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     error (_("No trace."));
 
   it->btinfo = btinfo;
@@ -2344,10 +2337,10 @@ btrace_insn_end (struct btrace_insn_iterator *it,
   const struct btrace_function *bfun;
   unsigned int length;
 
-  bfun = btinfo->end;
-  if (bfun == NULL)
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     error (_("No trace."));
 
+  bfun = VEC_last (btrace_fun_s, btinfo->functions);
   length = VEC_length (btrace_insn_s, bfun->insn);
 
   /* The last function may either be a gap or it contains the current
@@ -2780,7 +2773,7 @@ btrace_is_empty (struct thread_info *tp)
 
   btinfo = &tp->btrace;
 
-  if (btinfo->begin == NULL)
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     return 1;
 
   btrace_insn_begin (&begin, btinfo);
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 8f8a7fa..d238d4f 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -335,7 +335,6 @@ struct btrace_thread_info
      part of the execution history.
      Both will be NULL if there is no branch trace available.  If there is
      branch trace available, both will be non-NULL.  */
-  struct btrace_function *begin;
   struct btrace_function *end;
 
   /* Vector of decoded function call segments in execution flow order.  Note
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 7ba3844..160424e 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1928,7 +1928,7 @@ record_btrace_start_replaying (struct thread_info *tp)
   replay = NULL;
 
   /* We can't start replaying without trace.  */
-  if (btinfo->begin == NULL)
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     return NULL;
 
   /* GDB stores the current frame_id when stepping in order to detects steps
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 00/11] btrace: Turn linked list of function call segments into vector
@ 2017-02-17 13:27 Tim Wiederhake
  2017-02-17 13:27 ` [PATCH 07/11] [SQUASH] btrace: Adjust struct btrace_function::up Tim Wiederhake
                   ` (11 more replies)
  0 siblings, 12 replies; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

Hi all,

this series removes the extra list of btrace function call segments in struct
btrace_thread_info.  To achieve this, the doubly linked list of function call
segments in struct btrace_thread_info is replaced by a (GDB) vector.  In some
instances, struct btrace_thread_info is initialized by memset'ing it to 0x00,
so we can't use std::vector (yet).

Patch 1, 2 and 3 are preparation, patch 4 and 5 change the iterators
(struct btrace_call_iterator and struct btrace_insn_iterator) to use indices
instead of pointers, patch 6 to 11 actually replace the the linked list in
struct btrace_thread_info.

Patch 6 to 11 is actually only one patch that is split up for easier review.
As we push more function call segments in the vector when we decode the trace,
the vector may run out of space and reallocate, rendering all pointers invalid
and preventing incremental change from pointer usage to index usage.

Patch 6 introduces some temporary pre-allocating of memory for this vector,
which is removed in patch 11.  This wastes a lot of memory but allows for
testing each step of the transition.

Regards,
Tim

Tim Wiederhake (11):
  btrace: Use struct btrace_thread_info fields directly.
  btrace: Change parameters to use btrace_thread_info.
  btrace: Add btinfo to instruction interator.
  btrace: Use function segment index in call iterator.
  btrace: Use function segment index in insn iterator.
  [SQUASH] btrace: Save function calls in a vector.
  [SQUASH] btrace: Adjust struct btrace_function::up.
  [SQUASH] btrace: Adjust struct btrace_function::{flow,segment}.
  [SQUASH] btrace: Remove struct btrace_thread_info::{begin,end}.
  [SQUASH] btrace: Remove bfun_s vector.
  [SQUASH] btrace: Cleanup.

 gdb/btrace.c                  | 843 ++++++++++++++++++++----------------------
 gdb/btrace.h                  |  56 ++-
 gdb/python/py-record-btrace.c |  12 +-
 gdb/record-btrace.c           |  32 +-
 4 files changed, 462 insertions(+), 481 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 11/11] [SQUASH] btrace: Cleanup.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (3 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 03/11] btrace: Add btinfo to instruction interator Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-17 13:27 ` [PATCH 05/11] btrace: Use function segment index in insn iterator Tim Wiederhake
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This patch stands alone for easier review and is meant to be squashed together
for committing.  ChangeLog will be added to the squashed commit.

2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>


---
 gdb/btrace.c | 38 +++++++++++++-------------------------
 1 file changed, 13 insertions(+), 25 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 859c87f..aa6959d 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -218,31 +218,28 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *prev = NULL, *bfun;
+  struct btrace_function bfun;
 
-  if (!VEC_empty (btrace_fun_s, btinfo->functions))
-    prev = VEC_last (btrace_fun_s, btinfo->functions);
-
-  bfun = VEC_safe_push (btrace_fun_s, btinfo->functions, NULL);
-  memset (bfun, 0, sizeof (*bfun));
+  memset (&bfun, 0, sizeof (bfun));
+  bfun.msym = mfun;
+  bfun.sym = fun;
 
-  bfun->msym = mfun;
-  bfun->sym = fun;
-
-  if (prev == NULL)
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     {
       /* Start counting at one.  */
-      bfun->number = 1;
-      bfun->insn_offset = 1;
+      bfun.number = 1;
+      bfun.insn_offset = 1;
     }
   else
     {
-      bfun->number = prev->number + 1;
-      bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
-      bfun->level = prev->level;
+      struct btrace_function *prev = VEC_last (btrace_fun_s, btinfo->functions);
+
+      bfun.number = prev->number + 1;
+      bfun.insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
+      bfun.level = prev->level;
     }
 
-  return bfun;
+  return VEC_safe_push (btrace_fun_s, btinfo->functions, &bfun);
 }
 
 /* Update the UP field of a function segment.  */
@@ -1496,10 +1493,6 @@ btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
   DEBUG ("enable thread %s (%s)", print_thread_id (tp),
 	 target_pid_to_str (tp->ptid));
 
-  /* Temporarily prevent resizing the vector until reworking struct
-     btrace_function is complete.  */
-  VEC_reserve (btrace_fun_s, tp->btrace.functions, 1000000);
-
   tp->btrace.target = target_enable_btrace (tp->ptid, conf);
 
   /* We're done if we failed to enable tracing.  */
@@ -1816,11 +1809,6 @@ btrace_fetch (struct thread_info *tp)
   /* We should not be called on running or exited threads.  */
   gdb_assert (can_access_registers_ptid (tp->ptid));
 
-  /* Temporarily prevent resizing the vector until reworking struct
-     btrace_function is complete.  */
-  if (btinfo->functions == NULL)
-    VEC_reserve (btrace_fun_s, btinfo->functions, 1000000);
-
   btrace_data_init (&btrace);
   cleanup = make_cleanup_btrace_data (&btrace);
 
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 02/11] btrace: Change parameters to use btrace_thread_info.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (9 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 09/11] [SQUASH] btrace: Remove struct btrace_thread_info::{begin,end} Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-24  9:32   ` Metzger, Markus T
  2017-02-24  9:32 ` [PATCH 00/11] btrace: Turn linked list of function call segments into vector Metzger, Markus T
  11 siblings, 1 reply; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This prepares the transition from function call segment pointers to indices in
a vector.

2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:
	* btrace.c (ftrace_new_function, ftrace_fixup_caller, ftrace_new_call,
	ftrace_new_tailcall, ftrace_find_caller, ftrace_find_call,
	ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
	ftrace_update_function, ftrace_update_insns, ftrace_connect_bfun,
	ftrace_connect_backtrace, ftrace_bridge_gap, btrace_compute_ftrace_bts,
	ftrace_add_pt, btrace_compute_ftrace_pt): Changed to use struct
	btrace_thread_info * as parameter. Adjusted comments where necessary.


---
 gdb/btrace.c | 138 ++++++++++++++++++++++++++++++++---------------------------
 1 file changed, 75 insertions(+), 63 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 14a16a2..da8e0f7 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -202,17 +202,18 @@ ftrace_function_switched (const struct btrace_function *bfun,
   return 0;
 }
 
-/* Allocate and initialize a new branch trace function segment.
-   PREV is the chronologically preceding function segment.
-   MFUN and FUN are the symbol information we have for this function.  */
+/* Allocate and initialize a new branch trace function segment at the end of
+   the trace.  MFUN and FUN are the symbol information we have for this
+   function.  */
 
 static struct btrace_function *
-ftrace_new_function (struct btrace_function *prev,
+ftrace_new_function (struct btrace_thread_info *btinfo,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *bfun;
+  struct btrace_function *prev, *bfun;
 
+  prev = btinfo->end;
   bfun = XCNEW (struct btrace_function);
 
   bfun->msym = mfun;
@@ -258,7 +259,8 @@ ftrace_update_caller (struct btrace_function *bfun,
 /* Fix up the caller for all segments of a function.  */
 
 static void
-ftrace_fixup_caller (struct btrace_function *bfun,
+ftrace_fixup_caller (struct btrace_thread_info *btinfo,
+		     struct btrace_function *bfun,
 		     struct btrace_function *caller,
 		     enum btrace_function_flag flags)
 {
@@ -275,18 +277,17 @@ ftrace_fixup_caller (struct btrace_function *bfun,
 }
 
 /* Add a new function segment for a call.
-   CALLER is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_call (struct btrace_function *caller,
+ftrace_new_call (struct btrace_thread_info *btinfo,
 		 struct minimal_symbol *mfun,
 		 struct symbol *fun)
 {
   struct btrace_function *bfun;
 
-  bfun = ftrace_new_function (caller, mfun, fun);
-  bfun->up = caller;
+  bfun = ftrace_new_function (btinfo, mfun, fun);
+  bfun->up = btinfo->end;
   bfun->level += 1;
 
   ftrace_debug (bfun, "new call");
@@ -295,18 +296,17 @@ ftrace_new_call (struct btrace_function *caller,
 }
 
 /* Add a new function segment for a tail call.
-   CALLER is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_tailcall (struct btrace_function *caller,
+ftrace_new_tailcall (struct btrace_thread_info *btinfo,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
   struct btrace_function *bfun;
 
-  bfun = ftrace_new_function (caller, mfun, fun);
-  bfun->up = caller;
+  bfun = ftrace_new_function (btinfo, mfun, fun);
+  bfun->up = btinfo->end;
   bfun->level += 1;
   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
 
@@ -331,7 +331,8 @@ ftrace_get_caller (struct btrace_function *bfun)
    symbol information.  */
 
 static struct btrace_function *
-ftrace_find_caller (struct btrace_function *bfun,
+ftrace_find_caller (struct btrace_thread_info *btinfo,
+		    struct btrace_function *bfun,
 		    struct minimal_symbol *mfun,
 		    struct symbol *fun)
 {
@@ -353,7 +354,8 @@ ftrace_find_caller (struct btrace_function *bfun,
    tail calls ending with a jump).  */
 
 static struct btrace_function *
-ftrace_find_call (struct btrace_function *bfun)
+ftrace_find_call (struct btrace_thread_info *btinfo,
+		  struct btrace_function *bfun)
 {
   for (; bfun != NULL; bfun = bfun->up)
     {
@@ -373,21 +375,21 @@ ftrace_find_call (struct btrace_function *bfun)
 }
 
 /* Add a continuation segment for a function into which we return.
-   PREV is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_return (struct btrace_function *prev,
+ftrace_new_return (struct btrace_thread_info *btinfo,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
-  struct btrace_function *bfun, *caller;
+  struct btrace_function *prev, *bfun, *caller;
 
-  bfun = ftrace_new_function (prev, mfun, fun);
+  prev = btinfo->end;
+  bfun = ftrace_new_function (btinfo, mfun, fun);
 
   /* It is important to start at PREV's caller.  Otherwise, we might find
      PREV itself, if PREV is a recursive function.  */
-  caller = ftrace_find_caller (prev->up, mfun, fun);
+  caller = ftrace_find_caller (btinfo, prev->up, mfun, fun);
   if (caller != NULL)
     {
       /* The caller of PREV is the preceding btrace function segment in this
@@ -412,7 +414,7 @@ ftrace_new_return (struct btrace_function *prev,
 	 wrong or that the call is simply not included in the trace.  */
 
       /* Let's search for some actual call.  */
-      caller = ftrace_find_call (prev->up);
+      caller = ftrace_find_call (btinfo, prev->up);
       if (caller == NULL)
 	{
 	  /* There is no call in PREV's back trace.  We assume that the
@@ -426,7 +428,7 @@ ftrace_new_return (struct btrace_function *prev,
 	  bfun->level = prev->level - 1;
 
 	  /* Fix up the call stack for PREV.  */
-	  ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
+	  ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
 
 	  ftrace_debug (bfun, "new return - no caller");
 	}
@@ -452,19 +454,19 @@ ftrace_new_return (struct btrace_function *prev,
 }
 
 /* Add a new function segment for a function switch.
-   PREV is the chronologically preceding function segment.
    MFUN and FUN are the symbol information we have for this function.  */
 
 static struct btrace_function *
-ftrace_new_switch (struct btrace_function *prev,
+ftrace_new_switch (struct btrace_thread_info *btinfo,
 		   struct minimal_symbol *mfun,
 		   struct symbol *fun)
 {
-  struct btrace_function *bfun;
+  struct btrace_function *prev, *bfun;
 
   /* This is an unexplained function switch.  We can't really be sure about the
      call stack, yet the best I can think of right now is to preserve it.  */
-  bfun = ftrace_new_function (prev, mfun, fun);
+  prev = btinfo->end;
+  bfun = ftrace_new_function (btinfo, mfun, fun);
   bfun->up = prev->up;
   bfun->flags = prev->flags;
 
@@ -474,20 +476,21 @@ ftrace_new_switch (struct btrace_function *prev,
 }
 
 /* Add a new function segment for a gap in the trace due to a decode error.
-   PREV is the chronologically preceding function segment.
    ERRCODE is the format-specific error code.  */
 
 static struct btrace_function *
-ftrace_new_gap (struct btrace_function *prev, int errcode)
+ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
 {
-  struct btrace_function *bfun;
+  struct btrace_function *prev, *bfun;
+
+  prev = btinfo->end;
 
   /* We hijack prev if it was empty.  */
   if (prev != NULL && prev->errcode == 0
       && VEC_empty (btrace_insn_s, prev->insn))
     bfun = prev;
   else
-    bfun = ftrace_new_function (prev, NULL, NULL);
+    bfun = ftrace_new_function (btinfo, NULL, NULL);
 
   bfun->errcode = errcode;
 
@@ -496,17 +499,20 @@ ftrace_new_gap (struct btrace_function *prev, int errcode)
   return bfun;
 }
 
-/* Update BFUN with respect to the instruction at PC.  This may create new
-   function segments.
+/* Update the current function call segment at the end of the trace with
+   respect to the instruction at PC.  This may create new function segments.
    Return the chronologically latest function segment, never NULL.  */
 
 static struct btrace_function *
-ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
+ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
 {
   struct bound_minimal_symbol bmfun;
   struct minimal_symbol *mfun;
   struct symbol *fun;
   struct btrace_insn *last;
+  struct btrace_function *bfun;
+
+  bfun = btinfo->end;
 
   /* Try to determine the function we're in.  We use both types of symbols
      to avoid surprises when we sometimes get a full symbol and sometimes
@@ -520,7 +526,7 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 
   /* If we didn't have a function or if we had a gap before, we create one.  */
   if (bfun == NULL || bfun->errcode != 0)
-    return ftrace_new_function (bfun, mfun, fun);
+    return ftrace_new_function (btinfo, mfun, fun);
 
   /* Check the last instruction, if we have one.
      We do this check first, since it allows us to fill in the call stack
@@ -548,9 +554,9 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 	       different frame id's.  This will confuse stepping.  */
 	    fname = ftrace_print_function_name (bfun);
 	    if (strcmp (fname, "_dl_runtime_resolve") == 0)
-	      return ftrace_new_tailcall (bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, mfun, fun);
 
-	    return ftrace_new_return (bfun, mfun, fun);
+	    return ftrace_new_return (btinfo, mfun, fun);
 	  }
 
 	case BTRACE_INSN_CALL:
@@ -558,7 +564,7 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 	  if (last->pc + last->size == pc)
 	    break;
 
-	  return ftrace_new_call (bfun, mfun, fun);
+	  return ftrace_new_call (btinfo, mfun, fun);
 
 	case BTRACE_INSN_JUMP:
 	  {
@@ -568,13 +574,13 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 
 	    /* A jump to the start of a function is (typically) a tail call.  */
 	    if (start == pc)
-	      return ftrace_new_tailcall (bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, mfun, fun);
 
 	    /* If we can't determine the function for PC, we treat a jump at
 	       the end of the block as tail call if we're switching functions
 	       and as an intra-function branch if we don't.  */
 	    if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
-	      return ftrace_new_tailcall (bfun, mfun, fun);
+	      return ftrace_new_tailcall (btinfo, mfun, fun);
 
 	    break;
 	  }
@@ -589,18 +595,21 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
 		    ftrace_print_function_name (bfun),
 		    ftrace_print_filename (bfun));
 
-      return ftrace_new_switch (bfun, mfun, fun);
+      return ftrace_new_switch (btinfo, mfun, fun);
     }
 
   return bfun;
 }
 
-/* Add the instruction at PC to BFUN's instructions.  */
+/* Add the instruction at PC to the instructions of the current function call
+   segment at the end of the trace.  */
 
 static void
-ftrace_update_insns (struct btrace_function *bfun,
+ftrace_update_insns (struct btrace_thread_info *btinfo,
 		     const struct btrace_insn *insn)
 {
+  struct btrace_function *bfun = btinfo->end;
+
   VEC_safe_push (btrace_insn_s, bfun->insn, insn);
 
   if (record_debug > 1)
@@ -704,7 +713,8 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
    ftrace_connect_backtrace.  */
 
 static void
-ftrace_connect_bfun (struct btrace_function *prev,
+ftrace_connect_bfun (struct btrace_thread_info *btinfo,
+		     struct btrace_function *prev,
 		     struct btrace_function *next)
 {
   DEBUG_FTRACE ("connecting...");
@@ -727,7 +737,7 @@ ftrace_connect_bfun (struct btrace_function *prev,
       if (next->up != NULL)
 	{
 	  DEBUG_FTRACE ("using next's callers");
-	  ftrace_fixup_caller (prev, next->up, next->flags);
+	  ftrace_fixup_caller (btinfo, prev, next->up, next->flags);
 	}
     }
   else if (next->up == NULL)
@@ -735,7 +745,7 @@ ftrace_connect_bfun (struct btrace_function *prev,
       if (prev->up != NULL)
 	{
 	  DEBUG_FTRACE ("using prev's callers");
-	  ftrace_fixup_caller (next, prev->up, prev->flags);
+	  ftrace_fixup_caller (btinfo, next, prev->up, prev->flags);
 	}
     }
   else
@@ -761,7 +771,7 @@ ftrace_connect_bfun (struct btrace_function *prev,
 
 	  DEBUG_FTRACE ("adding prev's tail calls to next");
 
-	  ftrace_fixup_caller (next, prev->up, prev->flags);
+	  ftrace_fixup_caller (btinfo, next, prev->up, prev->flags);
 
 	  for (prev = prev->up; prev != NULL; prev = prev->up)
 	    {
@@ -772,7 +782,7 @@ ftrace_connect_bfun (struct btrace_function *prev,
 		  ftrace_debug (prev, "..top");
 		  ftrace_debug (caller, "..up");
 
-		  ftrace_fixup_caller (prev, caller, flags);
+		  ftrace_fixup_caller (btinfo, prev, caller, flags);
 
 		  /* If we skipped any tail calls, this may move CALLER to a
 		     different function level.
@@ -803,7 +813,8 @@ ftrace_connect_bfun (struct btrace_function *prev,
    ftrace_match_backtrace.  */
 
 static void
-ftrace_connect_backtrace (struct btrace_function *lhs,
+ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
+			  struct btrace_function *lhs,
 			  struct btrace_function *rhs)
 {
   while (lhs != NULL && rhs != NULL)
@@ -819,7 +830,7 @@ ftrace_connect_backtrace (struct btrace_function *lhs,
       lhs = ftrace_get_caller (lhs);
       rhs = ftrace_get_caller (rhs);
 
-      ftrace_connect_bfun (prev, next);
+      ftrace_connect_bfun (btinfo, prev, next);
     }
 }
 
@@ -829,7 +840,8 @@ ftrace_connect_backtrace (struct btrace_function *lhs,
    Returns non-zero if the gap could be bridged, zero otherwise.  */
 
 static int
-ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
+ftrace_bridge_gap (struct btrace_thread_info *btinfo,
+		   struct btrace_function *lhs, struct btrace_function *rhs,
 		   int min_matches)
 {
   struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
@@ -877,7 +889,7 @@ ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
      BEST_L to BEST_R as they will already be on the same level.  */
   ftrace_fixup_level (rhs, best_l->level - best_r->level);
 
-  ftrace_connect_backtrace (best_l, best_r);
+  ftrace_connect_backtrace (btinfo, best_l, best_r);
 
   return best_matches;
 }
@@ -935,7 +947,7 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
 	      if (rhs == NULL)
 		continue;
 
-	      bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
+	      bridged = ftrace_bridge_gap (&tp->btrace, lhs, rhs, min_matches);
 
 	      /* Keep track of gaps we were not able to bridge and try again.
 		 If we just pushed them to the end of GAPS we would risk an
@@ -1004,7 +1016,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  if (block->end < pc)
 	    {
 	      /* Indicate the gap in the trace.  */
-	      btinfo->end = ftrace_new_gap (btinfo->end, BDE_BTS_OVERFLOW);
+	      btinfo->end = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
 	      if (btinfo->begin == NULL)
 		btinfo->begin = btinfo->end;
 
@@ -1017,7 +1029,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	      break;
 	    }
 
-	  btinfo->end = ftrace_update_function (btinfo->end, pc);
+	  btinfo->end = ftrace_update_function (btinfo, pc);
 	  if (btinfo->begin == NULL)
 	    btinfo->begin = btinfo->end;
 
@@ -1041,7 +1053,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	  insn.iclass = ftrace_classify_insn (gdbarch, pc);
 	  insn.flags = 0;
 
-	  ftrace_update_insns (btinfo->end, &insn);
+	  ftrace_update_insns (btinfo, &insn);
 
 	  /* We're done once we pushed the instruction at the end.  */
 	  if (block->end == pc)
@@ -1052,7 +1064,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
 	    {
 	      /* Indicate the gap in the trace.  We just added INSN so we're
 		 not at the beginning.  */
-	      btinfo->end = ftrace_new_gap (btinfo->end, BDE_BTS_INSN_SIZE);
+	      btinfo->end = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
 
 	      VEC_safe_push (bfun_s, *gaps, btinfo->end);
 
@@ -1158,7 +1170,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 		 from some other instruction.  Indicate this as a trace gap.  */
 	      if (insn.enabled)
 		{
-		  btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_DISABLED);
+		  btinfo->end = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
 
 		  VEC_safe_push (bfun_s, *gaps, btinfo->end);
 
@@ -1173,7 +1185,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	  /* Indicate trace overflows.  */
 	  if (insn.resynced)
 	    {
-	      btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_OVERFLOW);
+	      btinfo->end = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
 	      if (btinfo->begin == NULL)
 		btinfo->begin = btinfo->end;
 
@@ -1186,7 +1198,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 		       btinfo->end->insn_offset - 1, offset, insn.ip);
 	    }
 
-	  upd = ftrace_update_function (btinfo->end, insn.ip);
+	  upd = ftrace_update_function (btinfo, insn.ip);
 	  if (upd != btinfo->end)
 	    {
 	      btinfo->end = upd;
@@ -1203,14 +1215,14 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
 	  btinsn.iclass = pt_reclassify_insn (insn.iclass);
 	  btinsn.flags = pt_btrace_insn_flags (&insn);
 
-	  ftrace_update_insns (btinfo->end, &btinsn);
+	  ftrace_update_insns (btinfo, &btinsn);
 	}
 
       if (errcode == -pte_eos)
 	break;
 
       /* Indicate the gap in the trace.  */
-      btinfo->end = ftrace_new_gap (btinfo->end, errcode);
+      btinfo->end = ftrace_new_gap (btinfo, errcode);
       if (btinfo->begin == NULL)
 	btinfo->begin = btinfo->end;
 
@@ -1342,7 +1354,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
       /* Indicate a gap in the trace if we quit trace processing.  */
       if (error.reason == RETURN_QUIT && btinfo->end != NULL)
 	{
-	  btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
+	  btinfo->end = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
 
 	  VEC_safe_push (bfun_s, *gaps, btinfo->end);
 	}
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 07/11] [SQUASH] btrace: Adjust struct btrace_function::up.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-24  9:33   ` Metzger, Markus T
  2017-02-17 13:27 ` [PATCH 10/11] [SQUASH] btrace: Remove bfun_s vector Tim Wiederhake
                   ` (10 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This patch stands alone for easier review and is meant to be squashed together
for committing.  ChangeLog will be added to the squashed commit.


2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>
---
 gdb/btrace.c                  | 138 ++++++++++++++++++++++++------------------
 gdb/btrace.h                  |   6 +-
 gdb/python/py-record-btrace.c |   4 +-
 gdb/record-btrace.c           |  18 +++---
 4 files changed, 97 insertions(+), 69 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index cc22737..880a703 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -156,6 +156,16 @@ ftrace_call_num_insn (const struct btrace_function* bfun)
   return VEC_length (btrace_insn_s, bfun->insn);
 }
 
+static struct btrace_function *
+ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
+			    unsigned int number)
+{
+  if (number == 0 || number > VEC_length (btrace_fun_s, btinfo->functions))
+    return NULL;
+
+  return VEC_index (btrace_fun_s, btinfo->functions, number - 1);
+}
+
 /* Return non-zero if BFUN does not match MFUN and FUN,
    return zero otherwise.  */
 
@@ -249,10 +259,10 @@ ftrace_update_caller (struct btrace_function *bfun,
 		      struct btrace_function *caller,
 		      enum btrace_function_flag flags)
 {
-  if (bfun->up != NULL)
+  if (bfun->up != 0)
     ftrace_debug (bfun, "updating caller");
 
-  bfun->up = caller;
+  bfun->up = caller->number;
   bfun->flags = flags;
 
   ftrace_debug (bfun, "set caller");
@@ -287,10 +297,10 @@ ftrace_new_call (struct btrace_thread_info *btinfo,
 		 struct minimal_symbol *mfun,
 		 struct symbol *fun)
 {
-  struct btrace_function *bfun;
+  const unsigned int last = VEC_length (btrace_fun_s, btinfo->functions);
+  struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
 
-  bfun = ftrace_new_function (btinfo, mfun, fun);
-  bfun->up = btinfo->end;
+  bfun->up = last;
   bfun->level += 1;
 
   ftrace_debug (bfun, "new call");
@@ -306,10 +316,10 @@ ftrace_new_tailcall (struct btrace_thread_info *btinfo,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *bfun;
+  const unsigned int last = VEC_length (btrace_fun_s, btinfo->functions);
+  struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
 
-  bfun = ftrace_new_function (btinfo, mfun, fun);
-  bfun->up = btinfo->end;
+  bfun->up = last;
   bfun->level += 1;
   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
 
@@ -321,57 +331,58 @@ ftrace_new_tailcall (struct btrace_thread_info *btinfo,
 /* Return the caller of BFUN or NULL if there is none.  This function skips
    tail calls in the call chain.  */
 static struct btrace_function *
-ftrace_get_caller (struct btrace_function *bfun)
+ftrace_get_caller (struct btrace_thread_info *btinfo,
+		   struct btrace_function *bfun)
 {
-  for (; bfun != NULL; bfun = bfun->up)
+  for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
     if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
-      return bfun->up;
+      return ftrace_find_call_by_number (btinfo, bfun->up);
 
   return NULL;
 }
 
-/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
-   symbol information.  */
+/* Find the innermost caller with MFUN/FUN symbol information in the back trace
+   of the function call segment with number NUMBER.  */
 
 static struct btrace_function *
 ftrace_find_caller (struct btrace_thread_info *btinfo,
-		    struct btrace_function *bfun,
+		    unsigned int number,
 		    struct minimal_symbol *mfun,
 		    struct symbol *fun)
 {
-  for (; bfun != NULL; bfun = bfun->up)
+  struct btrace_function *bfun;
+
+  while ((bfun = ftrace_find_call_by_number (btinfo, number)) != NULL)
     {
-      /* Skip functions with incompatible symbol information.  */
-      if (ftrace_function_switched (bfun, mfun, fun))
-	continue;
+      if (!ftrace_function_switched (bfun, mfun, fun))
+	break;
 
-      /* This is the function segment we're looking for.  */
-      break;
+      number = bfun->up;
     }
 
   return bfun;
 }
 
-/* Find the innermost caller in the back trace of BFUN, skipping all
-   function segments that do not end with a call instruction (e.g.
-   tail calls ending with a jump).  */
+/* Find the innermost caller in the back trace of the function call segment
+   with number NUMBER, skipping all function call segments that do not end
+   with a call instruction (e.g. tail calls ending with a jump).  */
 
 static struct btrace_function *
-ftrace_find_call (struct btrace_thread_info *btinfo,
-		  struct btrace_function *bfun)
+ftrace_find_call (struct btrace_thread_info *btinfo, unsigned int number)
 {
-  for (; bfun != NULL; bfun = bfun->up)
-    {
-      struct btrace_insn *last;
+  struct btrace_function *bfun;
 
-      /* Skip gaps.  */
-      if (bfun->errcode != 0)
-	continue;
+  while ((bfun = ftrace_find_call_by_number (btinfo, number)) != NULL)
+    {
+      if (bfun->errcode == 0)
+	{
+	  struct btrace_insn *last = VEC_last (btrace_insn_s, bfun->insn);
 
-      last = VEC_last (btrace_insn_s, bfun->insn);
+	  if (last->iclass == BTRACE_INSN_CALL)
+	    break;
+	}
 
-      if (last->iclass == BTRACE_INSN_CALL)
-	break;
+      number = bfun->up;
     }
 
   return bfun;
@@ -425,8 +436,9 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 
 	  /* Let's find the topmost function and add a new caller for it.
 	     This should handle a series of initial tail calls.  */
-	  while (prev->up != NULL)
-	    prev = prev->up;
+	  for (caller = prev; caller != NULL;
+	       caller = ftrace_find_call_by_number (btinfo, prev->up))
+	    prev = caller;
 
 	  bfun->level = prev->level - 1;
 
@@ -446,7 +458,7 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
 	     on the same level as they are.
 	     This should handle things like schedule () correctly where we're
 	     switching contexts.  */
-	  prev->up = bfun;
+	  prev->up = bfun->number;
 	  prev->flags = BFUN_UP_LINKS_TO_RET;
 
 	  ftrace_debug (bfun, "new return - unknown caller");
@@ -649,7 +661,8 @@ ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
    match.  */
 
 static int
-ftrace_match_backtrace (struct btrace_function *lhs,
+ftrace_match_backtrace (struct btrace_thread_info *btinfo,
+			struct btrace_function *lhs,
 			struct btrace_function *rhs)
 {
   int matches;
@@ -659,8 +672,8 @@ ftrace_match_backtrace (struct btrace_function *lhs,
       if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
 	return 0;
 
-      lhs = ftrace_get_caller (lhs);
-      rhs = ftrace_get_caller (rhs);
+      lhs = ftrace_get_caller (btinfo, lhs);
+      rhs = ftrace_get_caller (btinfo, rhs);
     }
 
   return matches;
@@ -735,20 +748,26 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
   ftrace_fixup_level (next, prev->level - next->level);
 
   /* If we run out of back trace for one, let's use the other's.  */
-  if (prev->up == NULL)
+  if (prev->up == 0)
     {
-      if (next->up != NULL)
+      btrace_function_flags flags = next->flags;
+
+      next = ftrace_find_call_by_number (btinfo, next->up);
+      if (next != NULL)
 	{
 	  DEBUG_FTRACE ("using next's callers");
-	  ftrace_fixup_caller (btinfo, prev, next->up, next->flags);
+	  ftrace_fixup_caller (btinfo, prev, next, flags);
 	}
     }
-  else if (next->up == NULL)
+  else if (next->up == 0)
     {
-      if (prev->up != NULL)
+      btrace_function_flags flags = prev->flags;
+
+      prev = ftrace_find_call_by_number (btinfo, prev->up);
+      if (prev != NULL)
 	{
 	  DEBUG_FTRACE ("using prev's callers");
-	  ftrace_fixup_caller (btinfo, next, prev->up, prev->flags);
+	  ftrace_fixup_caller (btinfo, next, prev, flags);
 	}
     }
   else
@@ -766,26 +785,27 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
       if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
 	{
 	  struct btrace_function *caller;
-	  btrace_function_flags flags;
+	  btrace_function_flags flags = prev->flags;
 
 	  /* We checked NEXT->UP above so CALLER can't be NULL.  */
-	  caller = next->up;
-	  flags = next->flags;
+	  caller = ftrace_find_call_by_number (btinfo, next->up);
 
 	  DEBUG_FTRACE ("adding prev's tail calls to next");
 
-	  ftrace_fixup_caller (btinfo, next, prev->up, prev->flags);
+	  prev = ftrace_find_call_by_number (btinfo, prev->up);
+	  ftrace_fixup_caller (btinfo, next, prev, flags);
 
-	  for (prev = prev->up; prev != NULL; prev = prev->up)
+	  for (; prev != NULL;
+	       prev = ftrace_find_call_by_number (btinfo, prev->up))
 	    {
 	      /* At the end of PREV's back trace, continue with CALLER.  */
-	      if (prev->up == NULL)
+	      if (prev->up == 0)
 		{
 		  DEBUG_FTRACE ("fixing up link for tailcall chain");
 		  ftrace_debug (prev, "..top");
 		  ftrace_debug (caller, "..up");
 
-		  ftrace_fixup_caller (btinfo, prev, caller, flags);
+		  ftrace_fixup_caller (btinfo, prev, caller, next->flags);
 
 		  /* If we skipped any tail calls, this may move CALLER to a
 		     different function level.
@@ -830,8 +850,8 @@ ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
       prev = lhs;
       next = rhs;
 
-      lhs = ftrace_get_caller (lhs);
-      rhs = ftrace_get_caller (rhs);
+      lhs = ftrace_get_caller (btinfo, lhs);
+      rhs = ftrace_get_caller (btinfo, rhs);
 
       ftrace_connect_bfun (btinfo, prev, next);
     }
@@ -860,12 +880,14 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
   /* We search the back traces of LHS and RHS for valid connections and connect
      the two functon segments that give the longest combined back trace.  */
 
-  for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
-    for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
+  for (cand_l = lhs; cand_l != NULL;
+       cand_l = ftrace_get_caller (btinfo, cand_l))
+    for (cand_r = rhs; cand_r != NULL;
+	 cand_r = ftrace_get_caller (btinfo, cand_r))
       {
 	int matches;
 
-	matches = ftrace_match_backtrace (cand_l, cand_r);
+	matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
 	if (best_matches < matches)
 	  {
 	    best_matches = matches;
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 92435e7..2b28ff8 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -152,8 +152,10 @@ struct btrace_function
   /* The previous and next function in control flow order.  */
   struct btrace_func_link flow;
 
-  /* The directly preceding function segment in a (fake) call stack.  */
-  struct btrace_function *up;
+  /* The function segment number of the directly preceding function segment in
+     a (fake) call stack.  Will be zero if there is no such function segment in
+     the record.  */
+  unsigned int up;
 
   /* The instructions in this function segment.
      The instruction vector will be empty if the function segment
diff --git a/gdb/python/py-record-btrace.c b/gdb/python/py-record-btrace.c
index 6158f31..14ad5b7 100644
--- a/gdb/python/py-record-btrace.c
+++ b/gdb/python/py-record-btrace.c
@@ -456,10 +456,10 @@ btpy_call_up (PyObject *self, void *closure)
   if (func == NULL)
     Py_RETURN_NONE;
 
-  if (func->up == NULL)
+  if (func->up == 0)
     Py_RETURN_NONE;
 
-  return btpy_call_new (obj->ptid, func->up->number);
+  return btpy_call_new (obj->ptid, func->up);
 }
 
 /* Implementation of BtraceFunctionCall.prev_sibling [BtraceFunctionCall].
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 87fbcba..791963c 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1584,7 +1584,7 @@ record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
   bfun = cache->bfun;
   gdb_assert (bfun != NULL);
 
-  if (bfun->up == NULL)
+  if (bfun->up == 0)
     return UNWIND_UNAVAILABLE;
 
   return UNWIND_NO_REASON;
@@ -1643,11 +1643,12 @@ record_btrace_frame_prev_register (struct frame_info *this_frame,
   bfun = cache->bfun;
   gdb_assert (bfun != NULL);
 
-  caller = bfun->up;
-  if (caller == NULL)
+  if (bfun->up == 0)
     throw_error (NOT_AVAILABLE_ERROR,
 		 _("No caller in btrace record history"));
 
+  caller = VEC_index (btrace_fun_s, cache->tp->btrace.functions, bfun->up - 1);
+
   if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
     {
       insn = VEC_index (btrace_insn_s, caller->insn, 0);
@@ -1701,7 +1702,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
 
       callee = btrace_get_frame_function (next);
       if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
-	bfun = callee->up;
+	bfun = VEC_index (btrace_fun_s, tp->btrace.functions, callee->up - 1);
     }
 
   if (bfun == NULL)
@@ -1728,6 +1729,7 @@ record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
 {
   const struct btrace_function *bfun, *callee;
   struct btrace_frame_cache *cache;
+  struct thread_info *tinfo;
   struct frame_info *next;
 
   next = get_next_frame (this_frame);
@@ -1741,16 +1743,18 @@ record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
   if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
     return 0;
 
-  bfun = callee->up;
-  if (bfun == NULL)
+  if (callee->up == 0)
     return 0;
 
+  tinfo = find_thread_ptid (inferior_ptid);
+  bfun = VEC_index (btrace_fun_s, tinfo->btrace.functions, callee->up - 1);
+
   DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
 	 btrace_get_bfun_name (bfun), bfun->level);
 
   /* This is our frame.  Initialize the frame cache.  */
   cache = bfcache_new (this_frame);
-  cache->tp = find_thread_ptid (inferior_ptid);
+  cache->tp = tinfo;
   cache->bfun = bfun;
 
   *this_cache = cache;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 06/11] [SQUASH] btrace: Save function calls in a vector.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (7 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 08/11] [SQUASH] btrace: Adjust struct btrace_function::{flow,segment} Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-24  9:33   ` Metzger, Markus T
  2017-02-17 13:27 ` [PATCH 09/11] [SQUASH] btrace: Remove struct btrace_thread_info::{begin,end} Tim Wiederhake
                   ` (2 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

This patch stands alone for easier review and is meant to be squashed together
for committing.  ChangeLog will be added to the squashed commit.


2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>
---
 gdb/btrace.c        | 87 +++++++++++++++++++++++++++++------------------------
 gdb/btrace.h        | 12 +++++---
 gdb/record-btrace.c |  2 +-
 3 files changed, 56 insertions(+), 45 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 1e110cc..cc22737 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -204,17 +204,20 @@ ftrace_function_switched (const struct btrace_function *bfun,
 
 /* Allocate and initialize a new branch trace function segment at the end of
    the trace.  MFUN and FUN are the symbol information we have for this
-   function.  */
+   function.  This invalidates all struct btrace_function pointers held.  */
 
 static struct btrace_function *
 ftrace_new_function (struct btrace_thread_info *btinfo,
 		     struct minimal_symbol *mfun,
 		     struct symbol *fun)
 {
-  struct btrace_function *prev, *bfun;
+  struct btrace_function *prev = NULL, *bfun;
 
-  prev = btinfo->end;
-  bfun = XCNEW (struct btrace_function);
+  if (!VEC_empty (btrace_fun_s, btinfo->functions))
+    prev = VEC_last (btrace_fun_s, btinfo->functions);
+
+  bfun = VEC_safe_push (btrace_fun_s, btinfo->functions, NULL);
+  memset (bfun, 0, sizeof (*bfun));
 
   bfun->msym = mfun;
   bfun->sym = fun;
@@ -1490,6 +1493,10 @@ btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
   DEBUG ("enable thread %s (%s)", print_thread_id (tp),
 	 target_pid_to_str (tp->ptid));
 
+  /* Temporarily prevent resizing the vector until reworking struct
+     btrace_function is complete.  */
+  VEC_reserve (btrace_fun_s, tp->btrace.functions, 1000000);
+
   tp->btrace.target = target_enable_btrace (tp->ptid, conf);
 
   /* We're done if we failed to enable tracing.  */
@@ -1550,6 +1557,7 @@ btrace_disable (struct thread_info *tp)
   btp->target = NULL;
 
   btrace_clear (tp);
+  VEC_free (btrace_fun_s, btp->functions);
 }
 
 /* See btrace.h.  */
@@ -1804,6 +1812,11 @@ btrace_fetch (struct thread_info *tp)
   /* We should not be called on running or exited threads.  */
   gdb_assert (can_access_registers_ptid (tp->ptid));
 
+  /* Temporarily prevent resizing the vector until reworking struct
+     btrace_function is complete.  */
+  if (btinfo->functions == NULL)
+    VEC_reserve (btrace_fun_s, btinfo->functions, 1000000);
+
   btrace_data_init (&btrace);
   cleanup = make_cleanup_btrace_data (&btrace);
 
@@ -1843,19 +1856,13 @@ btrace_fetch (struct thread_info *tp)
   /* Compute the trace, provided we have any.  */
   if (!btrace_data_empty (&btrace))
     {
-      struct btrace_function *bfun;
-
       /* Store the raw trace data.  The stored data will be cleared in
 	 btrace_clear, so we always append the new trace.  */
       btrace_data_append (&btinfo->data, &btrace);
       btrace_maint_clear (btinfo);
 
-      VEC_truncate (btrace_fun_p, btinfo->functions, 0);
       btrace_clear_history (btinfo);
       btrace_compute_ftrace (tp, &btrace);
-
-      for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
-	VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
     }
 
   do_cleanups (cleanup);
@@ -1868,6 +1875,7 @@ btrace_clear (struct thread_info *tp)
 {
   struct btrace_thread_info *btinfo;
   struct btrace_function *it, *trash;
+  unsigned int length, i;
 
   DEBUG ("clear thread %s (%s)", print_thread_id (tp),
 	 target_pid_to_str (tp->ptid));
@@ -1878,17 +1886,18 @@ btrace_clear (struct thread_info *tp)
 
   btinfo = &tp->btrace;
 
-  VEC_free (btrace_fun_p, btinfo->functions);
-
-  it = btinfo->begin;
-  while (it != NULL)
+  length = VEC_length (btrace_fun_s, btinfo->functions);
+  for (i = 0; i < length; ++i)
     {
-      trash = it;
-      it = it->flow.next;
+      struct btrace_function *bfun;
+
+      bfun = VEC_index (btrace_fun_s, btinfo->functions, i);
 
-      xfree (trash);
+      VEC_free (btrace_insn_s, bfun->insn);
     }
 
+  VEC_truncate (btrace_fun_s, btinfo->functions, 0);
+
   btinfo->begin = NULL;
   btinfo->end = NULL;
   btinfo->ngaps = 0;
@@ -2237,7 +2246,7 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
   unsigned int index, end;
 
   index = it->insn_index;
-  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
+  bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index);
 
   /* Check if the iterator points to a gap in the trace.  */
   if (bfun->errcode != 0)
@@ -2258,7 +2267,7 @@ btrace_insn_get_error (const struct btrace_insn_iterator *it)
 {
   const struct btrace_function *bfun;
 
-  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
+  bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index);
   return bfun->errcode;
 }
 
@@ -2269,7 +2278,7 @@ btrace_insn_number (const struct btrace_insn_iterator *it)
 {
   const struct btrace_function *bfun;
 
-  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
+  bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index);
   return bfun->insn_offset + it->insn_index;
 }
 
@@ -2324,7 +2333,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
   const struct btrace_function *bfun;
   unsigned int index, steps;
 
-  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
+  bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index);
   steps = 0;
   index = it->insn_index;
 
@@ -2406,7 +2415,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
   const struct btrace_function *bfun;
   unsigned int index, steps;
 
-  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
+  bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index);
   steps = 0;
   index = it->insn_index;
 
@@ -2489,16 +2498,16 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
   const struct btrace_function *bfun;
   unsigned int upper, lower;
 
-  if (VEC_empty (btrace_fun_p, btinfo->functions))
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
       return 0;
 
   lower = 0;
-  bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
+  bfun = VEC_index (btrace_fun_s, btinfo->functions, lower);
   if (number < bfun->insn_offset)
     return 0;
 
-  upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
-  bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
+  upper = VEC_length (btrace_fun_s, btinfo->functions) - 1;
+  bfun = VEC_index (btrace_fun_s, btinfo->functions, upper);
   if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
     return 0;
 
@@ -2507,7 +2516,7 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
     {
       const unsigned int average = lower + (upper - lower) / 2;
 
-      bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
+      bfun = VEC_index (btrace_fun_s, btinfo->functions, average);
 
       if (number < bfun->insn_offset)
 	{
@@ -2538,10 +2547,10 @@ btrace_ends_with_single_insn (const struct btrace_thread_info* btinfo)
 {
   const btrace_function *bfun;
 
-  if (VEC_empty (btrace_fun_p, btinfo->functions))
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     return 0;
 
-  bfun = VEC_last (btrace_fun_p, btinfo->functions);
+  bfun = VEC_last (btrace_fun_s, btinfo->functions);
   return ftrace_call_num_insn (bfun) == 1;
 }
 
@@ -2550,10 +2559,10 @@ btrace_ends_with_single_insn (const struct btrace_thread_info* btinfo)
 const struct btrace_function *
 btrace_call_get (const struct btrace_call_iterator *it)
 {
-  if (it->call_index >= VEC_length (btrace_fun_p, it->btinfo->functions))
+  if (it->call_index >= VEC_length (btrace_fun_s, it->btinfo->functions))
     return NULL;
 
-  return VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
+  return VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index);
 }
 
 /* See btrace.h.  */
@@ -2561,7 +2570,7 @@ btrace_call_get (const struct btrace_call_iterator *it)
 unsigned int
 btrace_call_number (const struct btrace_call_iterator *it)
 {
-  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
+  const unsigned int length = VEC_length (btrace_fun_s, it->btinfo->functions);
 
   if ((it->call_index == length) && btrace_ends_with_single_insn (it->btinfo))
     return length;
@@ -2575,13 +2584,13 @@ void
 btrace_call_begin (struct btrace_call_iterator *it,
 		   const struct btrace_thread_info *btinfo)
 {
-  if (VEC_empty (btrace_fun_p, btinfo->functions))
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     error (_("No trace."));
 
   it->btinfo = btinfo;
   it->call_index = 0;
 
-  if ((VEC_length (btrace_fun_p, it->btinfo->functions) == 1)
+  if ((VEC_length (btrace_fun_s, it->btinfo->functions) == 1)
       && (btrace_ends_with_single_insn (btinfo)))
     it->call_index = 1;
 }
@@ -2592,11 +2601,11 @@ void
 btrace_call_end (struct btrace_call_iterator *it,
 		 const struct btrace_thread_info *btinfo)
 {
-  if (VEC_empty (btrace_fun_p, btinfo->functions))
+  if (VEC_empty (btrace_fun_s, btinfo->functions))
     error (_("No trace."));
 
   it->btinfo = btinfo;
-  it->call_index = VEC_length (btrace_fun_p, it->btinfo->functions);
+  it->call_index = VEC_length (btrace_fun_s, it->btinfo->functions);
 }
 
 /* See btrace.h.  */
@@ -2604,7 +2613,7 @@ btrace_call_end (struct btrace_call_iterator *it,
 unsigned int
 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
 {
-  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
+  const unsigned int length = VEC_length (btrace_fun_s, it->btinfo->functions);
 
   if (it->call_index + stride < length - 1)
     {
@@ -2634,7 +2643,7 @@ btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
 unsigned int
 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
 {
-  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
+  const unsigned int length = VEC_length (btrace_fun_s, it->btinfo->functions);
   int steps;
 
   if (length == 0 || stride == 0)
@@ -2677,7 +2686,7 @@ btrace_find_call_by_number (struct btrace_call_iterator *it,
 			    const struct btrace_thread_info *btinfo,
 			    unsigned int number)
 {
-  const unsigned int length = VEC_length (btrace_fun_p, btinfo->functions);
+  const unsigned int length = VEC_length (btrace_fun_s, btinfo->functions);
 
   if ((number == 0) || (number > length))
     return 0;
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 53df6e9..92435e7 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -187,8 +187,9 @@ struct btrace_function
   btrace_function_flags flags;
 };
 
-typedef struct btrace_function *btrace_fun_p;
-DEF_VEC_P (btrace_fun_p);
+/* A vector of branch trace function segments.  */
+typedef struct btrace_function btrace_fun_s;
+DEF_VEC_O (btrace_fun_s);
 
 /* A branch trace instruction iterator.  */
 struct btrace_insn_iterator
@@ -343,9 +344,10 @@ struct btrace_thread_info
   struct btrace_function *begin;
   struct btrace_function *end;
 
-  /* Vector of pointer to decoded function segments.  These are in execution
-     order with the first element == BEGIN and the last element == END.  */
-  VEC (btrace_fun_p) *functions;
+  /* Vector of decoded function call segments in execution flow order.  Note
+     that the numbering for btrace function segments starts with 1, so function
+     call segment i will be at index (i - 1).  */
+  VEC (btrace_fun_s) *functions;
 
   /* The function level offset.  When added to each function's LEVEL,
      this normalizes the function levels such that the smallest level
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 83e65e7..87fbcba 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1692,7 +1692,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
 
       replay = tp->btrace.replay;
       if (replay != NULL)
-	bfun = VEC_index (btrace_fun_p, tp->btrace.functions,
+	bfun = VEC_index (btrace_fun_s, tp->btrace.functions,
 			  replay->call_index);
     }
   else
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 03/11] btrace: Add btinfo to instruction interator.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (2 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 04/11] btrace: Use function segment index in call iterator Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-24  9:32   ` Metzger, Markus T
  2017-02-17 13:27 ` [PATCH 11/11] [SQUASH] btrace: Cleanup Tim Wiederhake
                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog:

	* btrace.c (btrace_insn_begin, btrace_insn_end,
	btrace_find_insn_by_number): Added btinfo to iterator.
	* btrace.h (struct btrace_insn_iterator): Added btinfo.


---
 gdb/btrace.c | 3 +++
 gdb/btrace.h | 3 +++
 2 files changed, 6 insertions(+)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index da8e0f7..5f0eb7a 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -2279,6 +2279,7 @@ btrace_insn_begin (struct btrace_insn_iterator *it,
   if (bfun == NULL)
     error (_("No trace."));
 
+  it->btinfo = btinfo;
   it->function = bfun;
   it->index = 0;
 }
@@ -2304,6 +2305,7 @@ btrace_insn_end (struct btrace_insn_iterator *it,
   if (length > 0)
     length -= 1;
 
+  it->btinfo = btinfo;
   it->function = bfun;
   it->index = length;
 }
@@ -2507,6 +2509,7 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
       break;
     }
 
+  it->btinfo = btinfo;
   it->function = bfun;
   it->index = number - bfun->insn_offset;
   return 1;
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 07ed10c..f912b04 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -193,6 +193,9 @@ DEF_VEC_P (btrace_fun_p);
 /* A branch trace instruction iterator.  */
 struct btrace_insn_iterator
 {
+  /* The branch trace information for this thread.  Will never be NULL.  */
+  const struct btrace_thread_info *btinfo;
+
   /* The branch trace function segment containing the instruction.
      Will never be NULL.  */
   const struct btrace_function *function;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 04/11] btrace: Use function segment index in call iterator.
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
  2017-02-17 13:27 ` [PATCH 07/11] [SQUASH] btrace: Adjust struct btrace_function::up Tim Wiederhake
  2017-02-17 13:27 ` [PATCH 10/11] [SQUASH] btrace: Remove bfun_s vector Tim Wiederhake
@ 2017-02-17 13:27 ` Tim Wiederhake
  2017-02-24  9:32   ` Metzger, Markus T
  2017-02-17 13:27 ` [PATCH 03/11] btrace: Add btinfo to instruction interator Tim Wiederhake
                   ` (8 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Tim Wiederhake @ 2017-02-17 13:27 UTC (permalink / raw)
  To: gdb-patches; +Cc: markus.t.metzger

2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>

gdb/ChangeLog

	* btrace.c (btrace_ends_with_single_insn): New function.
	(btrace_call_get, btrace_call_number, btrace_call_begin,
	btrace_call_end, btrace_call_next, btrace_call_prev,
	btrace_find_call_by_number): Use
	index into call segment vector instead of pointer.
	(btrace_call_cmp): Simplify.
	* btrace.h (struct btrace_call_iterator): Replace function call segment
	pointer with index into vector.
	* record-btrace.c (record_btrace_call_history): Use index instead of
	pointer.


---
 gdb/btrace.c        | 188 +++++++++++++++++++++-------------------------------
 gdb/btrace.h        |   6 +-
 gdb/record-btrace.c |   2 +-
 3 files changed, 78 insertions(+), 118 deletions(-)

diff --git a/gdb/btrace.c b/gdb/btrace.c
index 5f0eb7a..31590ce 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -2515,12 +2515,30 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
   return 1;
 }
 
+/* Returns a non-zero value if the recording ends with a function call segment
+   that contains only a single (i.e. the current) instruction.  */
+
+static int
+btrace_ends_with_single_insn (const struct btrace_thread_info* btinfo)
+{
+  const btrace_function *bfun;
+
+  if (VEC_empty (btrace_fun_p, btinfo->functions))
+    return 0;
+
+  bfun = VEC_last (btrace_fun_p, btinfo->functions);
+  return ftrace_call_num_insn (bfun) == 1;
+}
+
 /* See btrace.h.  */
 
 const struct btrace_function *
 btrace_call_get (const struct btrace_call_iterator *it)
 {
-  return it->function;
+  if (it->call_index >= VEC_length (btrace_fun_p, it->btinfo->functions))
+    return NULL;
+
+  return VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);
 }
 
 /* See btrace.h.  */
@@ -2528,28 +2546,12 @@ btrace_call_get (const struct btrace_call_iterator *it)
 unsigned int
 btrace_call_number (const struct btrace_call_iterator *it)
 {
-  const struct btrace_thread_info *btinfo;
-  const struct btrace_function *bfun;
-  unsigned int insns;
-
-  btinfo = it->btinfo;
-  bfun = it->function;
-  if (bfun != NULL)
-    return bfun->number;
-
-  /* For the end iterator, i.e. bfun == NULL, we return one more than the
-     number of the last function.  */
-  bfun = btinfo->end;
-  insns = VEC_length (btrace_insn_s, bfun->insn);
+  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
 
-  /* If the function contains only a single instruction (i.e. the current
-     instruction), it will be skipped and its number is already the number
-     we seek.  */
-  if (insns == 1)
-    return bfun->number;
+  if ((it->call_index == length) && btrace_ends_with_single_insn (it->btinfo))
+    return length;
 
-  /* Otherwise, return one more than the number of the last function.  */
-  return bfun->number + 1;
+  return it->call_index + 1;
 }
 
 /* See btrace.h.  */
@@ -2558,14 +2560,15 @@ void
 btrace_call_begin (struct btrace_call_iterator *it,
 		   const struct btrace_thread_info *btinfo)
 {
-  const struct btrace_function *bfun;
-
-  bfun = btinfo->begin;
-  if (bfun == NULL)
+  if (VEC_empty (btrace_fun_p, btinfo->functions))
     error (_("No trace."));
 
   it->btinfo = btinfo;
-  it->function = bfun;
+  it->call_index = 0;
+
+  if ((VEC_length (btrace_fun_p, it->btinfo->functions) == 1)
+      && (btrace_ends_with_single_insn (btinfo)))
+    it->call_index = 1;
 }
 
 /* See btrace.h.  */
@@ -2574,14 +2577,11 @@ void
 btrace_call_end (struct btrace_call_iterator *it,
 		 const struct btrace_thread_info *btinfo)
 {
-  const struct btrace_function *bfun;
-
-  bfun = btinfo->end;
-  if (bfun == NULL)
+  if (VEC_empty (btrace_fun_p, btinfo->functions))
     error (_("No trace."));
 
   it->btinfo = btinfo;
-  it->function = NULL;
+  it->call_index = VEC_length (btrace_fun_p, it->btinfo->functions);
 }
 
 /* See btrace.h.  */
@@ -2589,35 +2589,29 @@ btrace_call_end (struct btrace_call_iterator *it,
 unsigned int
 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
 {
-  const struct btrace_function *bfun;
-  unsigned int steps;
+  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
 
-  bfun = it->function;
-  steps = 0;
-  while (bfun != NULL)
+  if (it->call_index + stride < length - 1)
     {
-      const struct btrace_function *next;
-      unsigned int insns;
-
-      next = bfun->flow.next;
-      if (next == NULL)
-	{
-	  /* Ignore the last function if it only contains a single
-	     (i.e. the current) instruction.  */
-	  insns = VEC_length (btrace_insn_s, bfun->insn);
-	  if (insns == 1)
-	    steps -= 1;
-	}
-
-      if (stride == steps)
-	break;
-
-      bfun = next;
-      steps += 1;
+      it->call_index += stride;
+    }
+  else if (it->call_index + stride == length - 1)
+    {
+      if (btrace_ends_with_single_insn (it->btinfo))
+	it->call_index = length;
+      else
+	it->call_index += stride;
+    }
+  else
+    {
+      if (btrace_ends_with_single_insn (it->btinfo))
+	stride = length - it->call_index - 1;
+      else
+	stride = length - it->call_index;
+      it->call_index = length;
     }
 
-  it->function = bfun;
-  return steps;
+  return stride;
 }
 
 /* See btrace.h.  */
@@ -2625,48 +2619,30 @@ btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
 unsigned int
 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
 {
-  const struct btrace_thread_info *btinfo;
-  const struct btrace_function *bfun;
-  unsigned int steps;
+  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
+  int steps;
+
+  if (length == 0 || stride == 0)
+    return 0;
 
-  bfun = it->function;
   steps = 0;
 
-  if (bfun == NULL)
+  if (it->call_index >= length)
     {
-      unsigned int insns;
-
-      btinfo = it->btinfo;
-      bfun = btinfo->end;
-      if (bfun == NULL)
-	return 0;
-
-      /* Ignore the last function if it only contains a single
-	 (i.e. the current) instruction.  */
-      insns = VEC_length (btrace_insn_s, bfun->insn);
-      if (insns == 1)
-	bfun = bfun->flow.prev;
-
-      if (bfun == NULL)
-	return 0;
+      if (btrace_ends_with_single_insn (it->btinfo))
+	it->call_index = length - 2;
+      else
+	it->call_index = length - 1;
 
-      steps += 1;
+      steps = 1;
+      stride -= 1;
     }
 
-  while (steps < stride)
-    {
-      const struct btrace_function *prev;
+  if (it->call_index < stride)
+    stride = it->call_index;
 
-      prev = bfun->flow.prev;
-      if (prev == NULL)
-	break;
-
-      bfun = prev;
-      steps += 1;
-    }
-
-  it->function = bfun;
-  return steps;
+  it->call_index -= stride;
+  return steps + stride;
 }
 
 /* See btrace.h.  */
@@ -2675,12 +2651,8 @@ int
 btrace_call_cmp (const struct btrace_call_iterator *lhs,
 		 const struct btrace_call_iterator *rhs)
 {
-  unsigned int lnum, rnum;
-
-  lnum = btrace_call_number (lhs);
-  rnum = btrace_call_number (rhs);
-
-  return (int) (lnum - rnum);
+  gdb_assert (lhs->btinfo == rhs->btinfo);
+  return (int) (lhs->call_index - rhs->call_index);
 }
 
 /* See btrace.h.  */
@@ -2690,26 +2662,14 @@ btrace_find_call_by_number (struct btrace_call_iterator *it,
 			    const struct btrace_thread_info *btinfo,
 			    unsigned int number)
 {
-  const struct btrace_function *bfun;
+  const unsigned int length = VEC_length (btrace_fun_p, btinfo->functions);
 
-  for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
-    {
-      unsigned int bnum;
-
-      bnum = bfun->number;
-      if (number == bnum)
-	{
-	  it->btinfo = btinfo;
-	  it->function = bfun;
-	  return 1;
-	}
-
-      /* Functions are ordered and numbered consecutively.  We could bail out
-	 earlier.  On the other hand, it is very unlikely that we search for
-	 a nonexistent function.  */
-  }
+  if ((number == 0) || (number > length))
+    return 0;
 
-  return 0;
+  it->btinfo = btinfo;
+  it->call_index = number - 1;
+  return 1;
 }
 
 /* See btrace.h.  */
diff --git a/gdb/btrace.h b/gdb/btrace.h
index f912b04..c49b114 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -210,9 +210,9 @@ struct btrace_call_iterator
   /* The branch trace information for this thread.  Will never be NULL.  */
   const struct btrace_thread_info *btinfo;
 
-  /* The branch trace function segment.
-     This will be NULL for the iterator pointing to the end of the trace.  */
-  const struct btrace_function *function;
+  /* The index of the function call segment in struct btrace_thread_info's
+     FUNCTIONS vector.  Note that index + 1 == number.  */
+  unsigned int call_index;
 };
 
 /* Branch trace iteration state for "record instruction-history".  */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index f7683f2..ba83be0 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1110,8 +1110,8 @@ record_btrace_call_history (struct target_ops *self, int size, int int_flags)
       replay = btinfo->replay;
       if (replay != NULL)
 	{
-	  begin.function = replay->function;
 	  begin.btinfo = btinfo;
+	  begin.call_index = replay->function->number - 1;
 	}
       else
 	btrace_call_end (&begin, btinfo);
-- 
2.7.4

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 05/11] btrace: Use function segment index in insn iterator.
  2017-02-17 13:27 ` [PATCH 05/11] btrace: Use function segment index in insn iterator Tim Wiederhake
@ 2017-02-24  9:32   ` Metzger, Markus T
  0 siblings, 0 replies; 20+ messages in thread
From: Metzger, Markus T @ 2017-02-24  9:32 UTC (permalink / raw)
  To: Wiederhake, Tim, gdb-patches

> -----Original Message-----
> From: Wiederhake, Tim
> Sent: Friday, February 17, 2017 2:26 PM
> To: gdb-patches@sourceware.org
> Cc: Metzger, Markus T <markus.t.metzger@intel.com>
> Subject: [PATCH 05/11] btrace: Use function segment index in insn iterator.

Hello Tim,

 
> 2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog
> 	* btrace.c: (btrace_insn_get, btrace_insn_get_error,
> btrace_insn_number,
> 	btrace_insn_begin, btrace_insn_end, btrace_insn_next,
> btrace_insn_prev,
> 	btrace_find_insn_by_number): Replaced function segment pointer with
> 	index.

I think this is "Replace function segment ...".  More below.


> diff --git a/gdb/btrace.c b/gdb/btrace.c
> index 31590ce..1e110cc 100644
> --- a/gdb/btrace.c
> +++ b/gdb/btrace.c
> @@ -2236,8 +2236,8 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
>    const struct btrace_function *bfun;
>    unsigned int index, end;
> 
> -  index = it->index;
> -  bfun = it->function;
> +  index = it->insn_index;
> +  bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index);

Should we assert that IT->CALL_INDEX lies inside the vector's bounds?  More below.


> diff --git a/gdb/btrace.h b/gdb/btrace.h
> index c49b114..53df6e9 100644
> --- a/gdb/btrace.h
> +++ b/gdb/btrace.h
> @@ -196,12 +196,12 @@ struct btrace_insn_iterator
>    /* The branch trace information for this thread.  Will never be NULL.  */
>    const struct btrace_thread_info *btinfo;
> 
> -  /* The branch trace function segment containing the instruction.
> -     Will never be NULL.  */
> -  const struct btrace_function *function;
> +  /* The index of the function call segment in struct btrace_thread_info's
> +     FUNCTIONS vector.  Note that index + 1 == number.  */
> +  unsigned int call_index;

The comment is really referring to the iterator's BTINFO field, isn't it?  Why
not say "The index in BTINFO->FUNCTIONS".  I don't think we need the note
on how the index relates to the function number, here.

With this change, the btrace_insn_iterator really contains all the fields of
btrace_call_iterator.  Should we make it actually contain a btrace_call_iterator?

It might simplify the above code in that it allows us to use btrace_call_get instead
of accessing the BTINFO->FUNCTIONS vector directly.


> @@ -1692,7 +1692,8 @@ record_btrace_frame_sniffer (const struct
> frame_unwind *self,
> 
>        replay = tp->btrace.replay;
>        if (replay != NULL)
> -	bfun = replay->function;
> +	bfun = VEC_index (btrace_fun_p, tp->btrace.functions,
> +			  replay->call_index);

We should use REPLAY->BTINFO or, even better, btrace_call_get.


> @@ -2705,7 +2706,7 @@ record_btrace_set_replay (struct thread_info *tp,
> 
>    btinfo = &tp->btrace;
> 
> -  if (it == NULL || it->function == NULL)
> +  if (it == NULL)
>      record_btrace_stop_replaying (tp);

IT->FUNCTION == NULL checks for the end iterator.  I don't think that we can
simply omit it.

Thanks,
Markus.

Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 04/11] btrace: Use function segment index in call iterator.
  2017-02-17 13:27 ` [PATCH 04/11] btrace: Use function segment index in call iterator Tim Wiederhake
@ 2017-02-24  9:32   ` Metzger, Markus T
  0 siblings, 0 replies; 20+ messages in thread
From: Metzger, Markus T @ 2017-02-24  9:32 UTC (permalink / raw)
  To: Wiederhake, Tim, gdb-patches

> -----Original Message-----
> From: Wiederhake, Tim
> Sent: Friday, February 17, 2017 2:26 PM
> To: gdb-patches@sourceware.org
> Cc: Metzger, Markus T <markus.t.metzger@intel.com>
> Subject: [PATCH 04/11] btrace: Use function segment index in call iterator.

Hello Tim,

> 
> 2017-02-17  Tim Wiederhake  <tim.wiederhake@intel.com>
> 
> gdb/ChangeLog
> 
> 	* btrace.c (btrace_ends_with_single_insn): New function.
> 	(btrace_call_get, btrace_call_number, btrace_call_begin,
> 	btrace_call_end, btrace_call_next, btrace_call_prev,
> 	btrace_find_call_by_number): Use
> 	index into call segment vector instead of pointer.

Most, if not all, should still fit onto the previous line.


> +static int
> +btrace_ends_with_single_insn (const struct btrace_thread_info* btinfo)

The space ' ' goes on the other side of the '*', i.e. "const struct btrace_thread_info *btinfo".


> +{
> +  const btrace_function *bfun;
> +
> +  if (VEC_empty (btrace_fun_p, btinfo->functions))
> +    return 0;
> +
> +  bfun = VEC_last (btrace_fun_p, btinfo->functions);
> +  return ftrace_call_num_insn (bfun) == 1;

Shouldn't we check for gaps?  They also count as one instruction.

 
>  /* See btrace.h.  */
> @@ -2528,28 +2546,12 @@ btrace_call_get (const struct btrace_call_iterator *it)
>  unsigned int
>  btrace_call_number (const struct btrace_call_iterator *it)
>  {
> -  const struct btrace_thread_info *btinfo;
> -  const struct btrace_function *bfun;
> -  unsigned int insns;
> -
> -  btinfo = it->btinfo;
> -  bfun = it->function;
> -  if (bfun != NULL)
> -    return bfun->number;
> -
> -  /* For the end iterator, i.e. bfun == NULL, we return one more than the
> -     number of the last function.  */
> -  bfun = btinfo->end;
> -  insns = VEC_length (btrace_insn_s, bfun->insn);
> +  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
> 
> -  /* If the function contains only a single instruction (i.e. the current
> -     instruction), it will be skipped and its number is already the number
> -     we seek.  */
> -  if (insns == 1)
> -    return bfun->number;
> +  if ((it->call_index == length) && btrace_ends_with_single_insn (it->btinfo))
> +    return length;

Please leave the comment or modify it to better match the new code.  This is
otherwise quite hard to read.


>  /* See btrace.h.  */
> @@ -2558,14 +2560,15 @@ void
>  btrace_call_begin (struct btrace_call_iterator *it,
>  		   const struct btrace_thread_info *btinfo)
>  {
> -  const struct btrace_function *bfun;
> -
> -  bfun = btinfo->begin;
> -  if (bfun == NULL)
> +  if (VEC_empty (btrace_fun_p, btinfo->functions))
>      error (_("No trace."));
> 
>    it->btinfo = btinfo;
> -  it->function = bfun;
> +  it->call_index = 0;
> +
> +  if ((VEC_length (btrace_fun_p, it->btinfo->functions) == 1)
> +      && (btrace_ends_with_single_insn (btinfo)))
> +    it->call_index = 1;

We didn't have such a check before.  Why is this needed?  Was this a bug before?


>  /* See btrace.h.  */
> @@ -2589,35 +2589,29 @@ btrace_call_end (struct btrace_call_iterator *it,
>  unsigned int
>  btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
>  {
> -  const struct btrace_function *bfun;
> -  unsigned int steps;
> +  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
> 
> -  bfun = it->function;
> -  steps = 0;
> -  while (bfun != NULL)
> +  if (it->call_index + stride < length - 1)
>      {
> -      const struct btrace_function *next;
> -      unsigned int insns;
> -
> -      next = bfun->flow.next;
> -      if (next == NULL)
> -	{
> -	  /* Ignore the last function if it only contains a single
> -	     (i.e. the current) instruction.  */
> -	  insns = VEC_length (btrace_insn_s, bfun->insn);
> -	  if (insns == 1)
> -	    steps -= 1;
> -	}
> -
> -      if (stride == steps)
> -	break;
> -
> -      bfun = next;
> -      steps += 1;
> +      it->call_index += stride;
> +    }

No {} if there's a single statement left.


> +  else if (it->call_index + stride == length - 1)
> +    {
> +      if (btrace_ends_with_single_insn (it->btinfo))
> +	it->call_index = length;
> +      else
> +	it->call_index += stride;
> +    }
> +  else
> +    {
> +      if (btrace_ends_with_single_insn (it->btinfo))
> +	stride = length - it->call_index - 1;
> +      else
> +	stride = length - it->call_index;
> +      it->call_index = length;
>      }

Can we merge the two last cases?

Please add a comment explaining that we're ignoring that last function segment
if it only contains the current instruction.


>  /* See btrace.h.  */
> @@ -2625,48 +2619,30 @@ btrace_call_next (struct btrace_call_iterator *it,
> unsigned int stride)
>  unsigned int
>  btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
>  {
> -  const struct btrace_thread_info *btinfo;
> -  const struct btrace_function *bfun;
> -  unsigned int steps;
> +  const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions);
> +  int steps;
> +
> +  if (length == 0 || stride == 0)
> +    return 0;
> 
> -  bfun = it->function;
>    steps = 0;
> 
> -  if (bfun == NULL)
> +  if (it->call_index >= length)

"> length" is really an internal error, isn't it?


>      {
> -      unsigned int insns;
> -
> -      btinfo = it->btinfo;
> -      bfun = btinfo->end;
> -      if (bfun == NULL)
> -	return 0;
> -
> -      /* Ignore the last function if it only contains a single
> -	 (i.e. the current) instruction.  */
> -      insns = VEC_length (btrace_insn_s, bfun->insn);
> -      if (insns == 1)
> -	bfun = bfun->flow.prev;
> -
> -      if (bfun == NULL)
> -	return 0;
> +      if (btrace_ends_with_single_insn (it->btinfo))
> +	it->call_index = length - 2;
> +      else
> +	it->call_index = length - 1;

Please keep the comment or add a new one to explain why we're doing this.

LENGTH - 2 may be negative (or, rather, very big) if the trace only contained
the current instruction.


> -      steps += 1;
> +      steps = 1;
> +      stride -= 1;
>      }

Please add a comment to explain this.


> -  while (steps < stride)
> -    {
> -      const struct btrace_function *prev;
> +  if (it->call_index < stride)
> +    stride = it->call_index;
> 
> -      prev = bfun->flow.prev;
> -      if (prev == NULL)
> -	break;
> -
> -      bfun = prev;
> -      steps += 1;
> -    }
> -
> -  it->function = bfun;
> -  return steps;
> +  it->call_index -= stride;
> +  return steps + stride;
>  }

I think I understand what this is doing but it wasn't obvious.  Maybe the
comment on the STEPS and STRIDE adjustment above suffices to explain it.


>  /* Branch trace iteration state for "record instruction-history".  */
> diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
> index f7683f2..ba83be0 100644
> --- a/gdb/record-btrace.c
> +++ b/gdb/record-btrace.c
> @@ -1110,8 +1110,8 @@ record_btrace_call_history (struct target_ops *self, int
> size, int int_flags)
>        replay = btinfo->replay;
>        if (replay != NULL)
>  	{
> -	  begin.function = replay->function;
>  	  begin.btinfo = btinfo;
> +	  begin.call_index = replay->function->number - 1;
>  	}

No {} when there's only one statement left.

Thanks,
Markus.

Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 00/11] btrace: Turn linked list of function call segments into vector
  2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
                   ` (10 preceding siblings ...)
  2017-02-17 13:27 ` [PATCH 02/11] btrace: Change parameters to use btrace_thread_info Tim Wiederhake
@ 2017-02-24  9:32 ` Metzger, Markus T
  11 siblings, 0 replies; 20+ messages in thread
From: Metzger, Markus T @ 2017-02-24  9:32 UTC (permalink / raw)
  To: Wiederhake, Tim, gdb-patches

> -----Original Message-----
> From: Wiederhake, Tim
> Sent: Friday, February 17, 2017 2:26 PM
> To: gdb-patches@sourceware.org
> Cc: Metzger, Markus T <markus.t.metzger@intel.com>
> Subject: [PATCH 00/11] btrace: Turn linked list of function call segments into
> vector

Hello Tim,

Thanks for your patches.  This makes the trace representation a lot more compact.


> this series removes the extra list of btrace function call segments in struct
> btrace_thread_info.  To achieve this, the doubly linked list of function call
> segments in struct btrace_thread_info is replaced by a (GDB) vector.  In some
> instances, struct btrace_thread_info is initialized by memset'ing it to 0x00,
> so we can't use std::vector (yet).

Feel free to submit further patches to fix that;-)


> Patch 6 to 11 is actually only one patch that is split up for easier review.
> As we push more function call segments in the vector when we decode the trace,
> the vector may run out of space and reallocate, rendering all pointers invalid
> and preventing incremental change from pointer usage to index usage.

Those smaller patches are indeed much easier to review.  I'm wondering if we could
keep the smaller patches.  They're nice as future reference and they would make a
bisect easier, as well.

We can't afford to temporarily resize the vector, though.  But if we could get rid of
the function segment pointers earlier in the series and replace them with function
numbers, this may not be necessary.  See a few more comments in this direction in
replies to individual patches.

Thanks,
Markus.

Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 02/11] btrace: Change parameters to use btrace_thread_info.
  2017-02-17 13:27 ` [PATCH 02/11] btrace: Change parameters to use btrace_thread_info Tim Wiederhake
@ 2017-02-24  9:32   ` Metzger, Markus T
  0 siblings, 0 replies; 20+ messages in thread
From: Metzger, Markus T @ 2017-02-24  9:32 UTC (permalink / raw)
  To: Wiederhake, Tim, gdb-patches

> -----Original Message-----
> From: Wiederhake, Tim
> Sent: Friday, February 17, 2017 2:26 PM
> To: gdb-patches@sourceware.org
> Cc: Metzger, Markus T <markus.t.metzger@intel.com>
> Subject: [PATCH 02/11] btrace: Change parameters to use btrace_thread_info.

Hello Tim,

> gdb/ChangeLog:
> 	* btrace.c (ftrace_new_function, ftrace_fixup_caller, ftrace_new_call,
> 	ftrace_new_tailcall, ftrace_find_caller, ftrace_find_call,
> 	ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
> 	ftrace_update_function, ftrace_update_insns, ftrace_connect_bfun,
> 	ftrace_connect_backtrace, ftrace_bridge_gap,
> btrace_compute_ftrace_bts,
> 	ftrace_add_pt, btrace_compute_ftrace_pt): Changed to use struct
> 	btrace_thread_info * as parameter. Adjusted comments where

Two spaces after '.'.


>    bfun->msym = mfun;
> @@ -258,7 +259,8 @@ ftrace_update_caller (struct btrace_function *bfun,
>  /* Fix up the caller for all segments of a function.  */
> 
>  static void
> -ftrace_fixup_caller (struct btrace_function *bfun,
> +ftrace_fixup_caller (struct btrace_thread_info *btinfo,
> +		     struct btrace_function *bfun,
>  		     struct btrace_function *caller,
>  		     enum btrace_function_flag flags)
>  {

I assume the new argument will be used in later patches.


> @@ -275,18 +277,17 @@ ftrace_fixup_caller (struct btrace_function *bfun,
>  }
> 
>  /* Add a new function segment for a call.
> -   CALLER is the chronologically preceding function segment.
>     MFUN and FUN are the symbol information we have for this function.  */

Please add 'at the end of the trace' like you did for ftrace_new_function.


>    ftrace_debug (bfun, "new call");
> @@ -295,18 +296,17 @@ ftrace_new_call (struct btrace_function *caller,
>  }
> 
>  /* Add a new function segment for a tail call.
> -   CALLER is the chronologically preceding function segment.
>     MFUN and FUN are the symbol information we have for this function.  */

Same here.  More instances below.

 
> -/* Add the instruction at PC to BFUN's instructions.  */
> +/* Add the instruction at PC to the instructions of the current function call
> +   segment at the end of the trace.  */

Just "function segment" without the "call".


> @@ -1017,7 +1029,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
>  	      break;
>  	    }
> 
> -	  btinfo->end = ftrace_update_function (btinfo->end, pc);
> +	  btinfo->end = ftrace_update_function (btinfo, pc);
>  	  if (btinfo->begin == NULL)
>  	    btinfo->begin = btinfo->end;

It would be cleaner if we moved this BTINFO->BEGIN update into ftrace_new_function.

Patch 9 will remove BTINFO->BEGIN/END but this would allow us to get rid of most of
the btrace_function pointers before patch 5.  We may not need to squash everything,
which may help later bisects.

We really only need btrace_function pointers for maintaining the list of gaps and the
function level adjustment, and for fixing up callers.  The latter two don't generate new
functions, so we're safe.  And for gaps, we have to switch to indices, anyway.

Thanks,
Markus.

Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 03/11] btrace: Add btinfo to instruction interator.
  2017-02-17 13:27 ` [PATCH 03/11] btrace: Add btinfo to instruction interator Tim Wiederhake
@ 2017-02-24  9:32   ` Metzger, Markus T
  0 siblings, 0 replies; 20+ messages in thread
From: Metzger, Markus T @ 2017-02-24  9:32 UTC (permalink / raw)
  To: Wiederhake, Tim, gdb-patches

> -----Original Message-----
> From: Wiederhake, Tim
> Sent: Friday, February 17, 2017 2:26 PM
> To: gdb-patches@sourceware.org
> Cc: Metzger, Markus T <markus.t.metzger@intel.com>
> Subject: [PATCH 03/11] btrace: Add btinfo to instruction interator.

Hello Tim,

> 	* btrace.c (btrace_insn_begin, btrace_insn_end,
> 	btrace_find_insn_by_number): Added btinfo to iterator.
> 	* btrace.h (struct btrace_insn_iterator): Added btinfo.

I think this is "Add foo to bar".


Looks good to me, otherwise.

Thanks,
Markus.
Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 06/11] [SQUASH] btrace: Save function calls in a vector.
  2017-02-17 13:27 ` [PATCH 06/11] [SQUASH] btrace: Save function calls in a vector Tim Wiederhake
@ 2017-02-24  9:33   ` Metzger, Markus T
  0 siblings, 0 replies; 20+ messages in thread
From: Metzger, Markus T @ 2017-02-24  9:33 UTC (permalink / raw)
  To: Wiederhake, Tim, gdb-patches

> -----Original Message-----
> From: Wiederhake, Tim
> Sent: Friday, February 17, 2017 2:26 PM
> To: gdb-patches@sourceware.org
> Cc: Metzger, Markus T <markus.t.metzger@intel.com>
> Subject: [PATCH 06/11] [SQUASH] btrace: Save function calls in a vector.

Hello Tim,

> This patch stands alone for easier review and is meant to be squashed together
> for committing.  ChangeLog will be added to the squashed commit.

Thanks.


>  /* Allocate and initialize a new branch trace function segment at the end of
>     the trace.  MFUN and FUN are the symbol information we have for this
> -   function.  */
> +   function.  This invalidates all struct btrace_function pointers held.  */

I understand that this is necessary but if we could get rid of those pointers before,
it wouldn't be that bad.  See my comments on patch 2.

Not sure if it really works but if it does, we could avoid the temporary resizing
of the functions vector and leave those patches separate.


>  static struct btrace_function *
>  ftrace_new_function (struct btrace_thread_info *btinfo,
>  		     struct minimal_symbol *mfun,
>  		     struct symbol *fun)
>  {
> -  struct btrace_function *prev, *bfun;
> +  struct btrace_function *prev = NULL, *bfun;
> 
> -  prev = btinfo->end;
> -  bfun = XCNEW (struct btrace_function);
> +  if (!VEC_empty (btrace_fun_s, btinfo->functions))
> +    prev = VEC_last (btrace_fun_s, btinfo->functions);
> +
> +  bfun = VEC_safe_push (btrace_fun_s, btinfo->functions, NULL);
> +  memset (bfun, 0, sizeof (*bfun));

This assumes that we're not reallocating the vector.  That's why we allocate a huge
vector initially.

We could also just take VEC_length, then add the new element, and refer to its
predecessor via the stored length - or use VEC_length - 1 after adding the new
element.

We'd still break other pointers that someone stores across that call, but at least
this function would be OK.  We can probably take care of (most) other pointers,
as well.


>    /* We're done if we failed to enable tracing.  */
> @@ -1550,6 +1557,7 @@ btrace_disable (struct thread_info *tp)
>    btp->target = NULL;
> 
>    btrace_clear (tp);
> +  VEC_free (btrace_fun_s, btp->functions);

Shouldn't we do this in btrace_clear?

  
> +  VEC_truncate (btrace_fun_s, btinfo->functions, 0);

Should this be VEC_free?
 
  
>  /* A branch trace instruction iterator.  */
>  struct btrace_insn_iterator
> @@ -343,9 +344,10 @@ struct btrace_thread_info
>    struct btrace_function *begin;
>    struct btrace_function *end;
> 
> -  /* Vector of pointer to decoded function segments.  These are in execution
> -     order with the first element == BEGIN and the last element == END.  */
> -  VEC (btrace_fun_p) *functions;
> +  /* Vector of decoded function call segments in execution flow order.  Note
> +     that the numbering for btrace function segments starts with 1, so function
> +     call segment i will be at index (i - 1).  */
> +  VEC (btrace_fun_s) *functions;

"Vector of function segments in ...".

Thanks,
Markus.

Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 07/11] [SQUASH] btrace: Adjust struct btrace_function::up.
  2017-02-17 13:27 ` [PATCH 07/11] [SQUASH] btrace: Adjust struct btrace_function::up Tim Wiederhake
@ 2017-02-24  9:33   ` Metzger, Markus T
  0 siblings, 0 replies; 20+ messages in thread
From: Metzger, Markus T @ 2017-02-24  9:33 UTC (permalink / raw)
  To: Wiederhake, Tim, gdb-patches

> -----Original Message-----
> From: Wiederhake, Tim
> Sent: Friday, February 17, 2017 2:26 PM
> To: gdb-patches@sourceware.org
> Cc: Metzger, Markus T <markus.t.metzger@intel.com>
> Subject: [PATCH 07/11] [SQUASH] btrace: Adjust struct btrace_function::up.

Hello Tim,

> +static struct btrace_function *
> +ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
> +			    unsigned int number)
> +{
> +  if (number == 0 || number > VEC_length (btrace_fun_s, btinfo->functions))
> +    return NULL;
> +
> +  return VEC_index (btrace_fun_s, btinfo->functions, number - 1);
> +}

This new function needs a comment.


> -/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
> -   symbol information.  */
> +/* Find the innermost caller with MFUN/FUN symbol information in the back
> trace
> +   of the function call segment with number NUMBER.  */

Just "function segment" without the "call".  More below.


>  static struct btrace_function *
>  ftrace_find_caller (struct btrace_thread_info *btinfo,
> -		    struct btrace_function *bfun,
> +		    unsigned int number,
>  		    struct minimal_symbol *mfun,
>  		    struct symbol *fun)
>  {
> -  for (; bfun != NULL; bfun = bfun->up)
> +  struct btrace_function *bfun;
> +
> +  while ((bfun = ftrace_find_call_by_number (btinfo, number)) != NULL)
>      {
> -      /* Skip functions with incompatible symbol information.  */
> -      if (ftrace_function_switched (bfun, mfun, fun))
> -	continue;
> +      if (!ftrace_function_switched (bfun, mfun, fun))
> +	break;
> 
> -      /* This is the function segment we're looking for.  */
> -      break;
> +      number = bfun->up;
>      }
> 
>    return bfun;
>  }

Eventually, I think we'd want most functions to take and return indices or function
numbers instead of btrace_function pointers.  They can get the actual btrace_function
in the body.

This will reduce the lifetime of btrace_function pointers and make it easier to deal
with vector reallocations.

If we added helpers to get the up, segment.prev, and segment.next numbers from a
function number, most won't need a btrace_function pointer, at all.  They could work
solely on the function numbers.


> -  caller = bfun->up;
> -  if (caller == NULL)
> +  if (bfun->up == 0)
>      throw_error (NOT_AVAILABLE_ERROR,
>  		 _("No caller in btrace record history"));
> 
> +  caller = VEC_index (btrace_fun_s, cache->tp->btrace.functions, bfun->up - 1);

We should add functions to btrace.h to translate function numbers into
const struct btrace_function *.

Thanks,
Markus.

Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 08/11] [SQUASH] btrace: Adjust struct btrace_function::{flow,segment}.
  2017-02-17 13:27 ` [PATCH 08/11] [SQUASH] btrace: Adjust struct btrace_function::{flow,segment} Tim Wiederhake
@ 2017-02-24  9:33   ` Metzger, Markus T
  0 siblings, 0 replies; 20+ messages in thread
From: Metzger, Markus T @ 2017-02-24  9:33 UTC (permalink / raw)
  To: Wiederhake, Tim, gdb-patches

> -----Original Message-----
> From: Wiederhake, Tim
> Sent: Friday, February 17, 2017 2:26 PM
> To: gdb-patches@sourceware.org
> Cc: Metzger, Markus T <markus.t.metzger@intel.com>
> Subject: [PATCH 08/11] [SQUASH] btrace: Adjust struct
> btrace_function::{flow,segment}.

Hello Tim,


> -  /* The previous and next segment belonging to the same function.
> -     If a function calls another function, the former will have at least
> -     two segments: one before the call and another after the return.  */
> -  struct btrace_func_link segment;
> -
> -  /* The previous and next function in control flow order.  */
> -  struct btrace_func_link flow;
> +  /* The function segment numbers of the previous and next segment belonging
> to
> +     the same function.  If a function calls another function, the former will
> +     have at least two segments: one before the call and another after the
> +     return.  Will be zero if there is no such function segment.  */
> +  unsigned int prev_segment;
> +  unsigned int next_segment;

Since we don't need FLOW.PREV/NEXT anymore, I'd omit _SEGMENT in the name
and just call it PREV and NEXT.  The comment will describe what they mean.

If we manage to not needing to squash the patches, I'd keep the two parts separate.

 
> -  for (; bfun != NULL; bfun = bfun->flow.next)
> -    bfun->level += adjustment;
> +  while (bfun != NULL)
> +    {
> +      bfun->level += adjustment;
> +      bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
> +    }

Could we do a VEC_iterate instead?


>    level = INT_MAX;
> -  for (; bfun != end; bfun = bfun->flow.next)
> -    level = std::min (level, bfun->level);
> +  while (bfun != end)
> +    {
> +      level = std::min (level, bfun->level);
> +      bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
> +    }

Same here.

Thanks,
Markus.

Intel Deutschland GmbH
Registered Address: Am Campeon 10-12, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de
Managing Directors: Christin Eisenschmid, Christian Lamprechter
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2017-02-24  9:33 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-17 13:27 [PATCH 00/11] btrace: Turn linked list of function call segments into vector Tim Wiederhake
2017-02-17 13:27 ` [PATCH 07/11] [SQUASH] btrace: Adjust struct btrace_function::up Tim Wiederhake
2017-02-24  9:33   ` Metzger, Markus T
2017-02-17 13:27 ` [PATCH 10/11] [SQUASH] btrace: Remove bfun_s vector Tim Wiederhake
2017-02-17 13:27 ` [PATCH 04/11] btrace: Use function segment index in call iterator Tim Wiederhake
2017-02-24  9:32   ` Metzger, Markus T
2017-02-17 13:27 ` [PATCH 03/11] btrace: Add btinfo to instruction interator Tim Wiederhake
2017-02-24  9:32   ` Metzger, Markus T
2017-02-17 13:27 ` [PATCH 11/11] [SQUASH] btrace: Cleanup Tim Wiederhake
2017-02-17 13:27 ` [PATCH 05/11] btrace: Use function segment index in insn iterator Tim Wiederhake
2017-02-24  9:32   ` Metzger, Markus T
2017-02-17 13:27 ` [PATCH 01/11] btrace: Use struct btrace_thread_info fields directly Tim Wiederhake
2017-02-17 13:27 ` [PATCH 08/11] [SQUASH] btrace: Adjust struct btrace_function::{flow,segment} Tim Wiederhake
2017-02-24  9:33   ` Metzger, Markus T
2017-02-17 13:27 ` [PATCH 06/11] [SQUASH] btrace: Save function calls in a vector Tim Wiederhake
2017-02-24  9:33   ` Metzger, Markus T
2017-02-17 13:27 ` [PATCH 09/11] [SQUASH] btrace: Remove struct btrace_thread_info::{begin,end} Tim Wiederhake
2017-02-17 13:27 ` [PATCH 02/11] btrace: Change parameters to use btrace_thread_info Tim Wiederhake
2017-02-24  9:32   ` Metzger, Markus T
2017-02-24  9:32 ` [PATCH 00/11] btrace: Turn linked list of function call segments into vector Metzger, Markus T

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).