public inbox for gdb-patches@sourceware.org
 help / color / mirror / Atom feed
* [PATCH v3] gdb: c++ify btrace_target_info
@ 2023-09-08 10:53 Markus Metzger
  2023-09-08 14:31 ` Simon Marchi
  0 siblings, 1 reply; 7+ messages in thread
From: Markus Metzger @ 2023-09-08 10:53 UTC (permalink / raw)
  To: gdb-patches; +Cc: simon.marchi, vries

Following the example of private_thread_info and private_inferior, turn
struct btrace_target_info into a small class hierarchy.

Fixes PR gdb/30751.
---
 gdb/nat/linux-btrace.c     | 159 +++++++++++++++++--------------------
 gdb/nat/linux-btrace.h     |  60 ++++----------
 gdb/remote.c               |  24 ++----
 gdbsupport/btrace-common.h |  18 ++++-
 4 files changed, 112 insertions(+), 149 deletions(-)

diff --git a/gdb/nat/linux-btrace.c b/gdb/nat/linux-btrace.c
index c5b3f1c93cf..672b25063de 100644
--- a/gdb/nat/linux-btrace.c
+++ b/gdb/nat/linux-btrace.c
@@ -277,7 +277,7 @@ perf_event_sample_ok (const struct perf_event_sample *sample)
    part at the end and its upper part at the beginning of the buffer.  */
 
 static std::vector<btrace_block> *
-perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
+perf_event_read_bts (btrace_target_info *tinfo, const uint8_t *begin,
 		     const uint8_t *end, const uint8_t *start, size_t size)
 {
   std::vector<btrace_block> *btrace = new std::vector<btrace_block>;
@@ -447,12 +447,19 @@ diagnose_perf_event_open_fail ()
   error (_("Failed to start recording: %s"), safe_strerror (errno));
 }
 
+/* Get the linux version of a btrace_target_info.  */
+
+static linux_btrace_target_info *
+get_linux_btrace_target_info (btrace_target_info *gtinfo)
+{
+  return gdb::checked_static_cast<linux_btrace_target_info *> (gtinfo);
+}
+
 /* Enable branch tracing in BTS format.  */
 
 static struct btrace_target_info *
 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
 {
-  struct btrace_tinfo_bts *bts;
   size_t size, pages;
   __u64 data_offset;
   int pid, pg;
@@ -460,31 +467,29 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
   if (!cpu_supports_bts ())
     error (_("BTS support has been disabled for the target cpu."));
 
-  gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
-    (XCNEW (btrace_target_info));
-  tinfo->ptid = ptid;
+  std::unique_ptr<linux_btrace_target_info> tinfo
+    { new linux_btrace_target_info { ptid } };
 
   tinfo->conf.format = BTRACE_FORMAT_BTS;
-  bts = &tinfo->variant.bts;
 
-  bts->attr.size = sizeof (bts->attr);
-  bts->attr.type = PERF_TYPE_HARDWARE;
-  bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
-  bts->attr.sample_period = 1;
+  tinfo->attr.size = sizeof (tinfo->attr);
+  tinfo->attr.type = PERF_TYPE_HARDWARE;
+  tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+  tinfo->attr.sample_period = 1;
 
   /* We sample from and to address.  */
-  bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
+  tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
 
-  bts->attr.exclude_kernel = 1;
-  bts->attr.exclude_hv = 1;
-  bts->attr.exclude_idle = 1;
+  tinfo->attr.exclude_kernel = 1;
+  tinfo->attr.exclude_hv = 1;
+  tinfo->attr.exclude_idle = 1;
 
   pid = ptid.lwp ();
   if (pid == 0)
     pid = ptid.pid ();
 
   errno = 0;
-  scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
+  scoped_fd fd (syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0));
   if (fd.get () < 0)
     diagnose_perf_event_open_fail ();
 
@@ -552,12 +557,12 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
     }
 #endif /* defined (PERF_ATTR_SIZE_VER5) */
 
-  bts->bts.size = size;
-  bts->bts.data_head = &header->data_head;
-  bts->bts.mem = (const uint8_t *) data.release () + data_offset;
-  bts->bts.last_head = 0ull;
-  bts->header = header;
-  bts->file = fd.release ();
+  tinfo->pev.size = size;
+  tinfo->pev.data_head = &header->data_head;
+  tinfo->pev.mem = (const uint8_t *) data.release () + data_offset;
+  tinfo->pev.last_head = 0ull;
+  tinfo->header = header;
+  tinfo->file = fd.release ();
 
   tinfo->conf.bts.size = (unsigned int) size;
   return tinfo.release ();
@@ -604,7 +609,6 @@ perf_event_pt_event_type ()
 static struct btrace_target_info *
 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
 {
-  struct btrace_tinfo_pt *pt;
   size_t pages;
   int pid, pg;
 
@@ -612,22 +616,20 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
   if (pid == 0)
     pid = ptid.pid ();
 
-  gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
-    (XCNEW (btrace_target_info));
-  tinfo->ptid = ptid;
+  std::unique_ptr<linux_btrace_target_info> tinfo
+    { new linux_btrace_target_info (ptid) };
 
   tinfo->conf.format = BTRACE_FORMAT_PT;
-  pt = &tinfo->variant.pt;
 
-  pt->attr.size = sizeof (pt->attr);
-  pt->attr.type = perf_event_pt_event_type ();
+  tinfo->attr.size = sizeof (tinfo->attr);
+  tinfo->attr.type = perf_event_pt_event_type ();
 
-  pt->attr.exclude_kernel = 1;
-  pt->attr.exclude_hv = 1;
-  pt->attr.exclude_idle = 1;
+  tinfo->attr.exclude_kernel = 1;
+  tinfo->attr.exclude_hv = 1;
+  tinfo->attr.exclude_idle = 1;
 
   errno = 0;
-  scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
+  scoped_fd fd (syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0));
   if (fd.get () < 0)
     diagnose_perf_event_open_fail ();
 
@@ -687,14 +689,14 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
   if (pages == 0)
     error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
 
-  pt->pt.size = aux.size ();
-  pt->pt.mem = (const uint8_t *) aux.release ();
-  pt->pt.data_head = &header->aux_head;
-  pt->header = (struct perf_event_mmap_page *) data.release ();
-  gdb_assert (pt->header == header);
-  pt->file = fd.release ();
+  tinfo->pev.size = aux.size ();
+  tinfo->pev.mem = (const uint8_t *) aux.release ();
+  tinfo->pev.data_head = &header->aux_head;
+  tinfo->header = (struct perf_event_mmap_page *) data.release ();
+  gdb_assert (tinfo->header == header);
+  tinfo->file = fd.release ();
 
-  tinfo->conf.pt.size = (unsigned int) pt->pt.size;
+  tinfo->conf.pt.size = (unsigned int) tinfo->pev.size;
   return tinfo.release ();
 }
 
@@ -731,83 +733,74 @@ linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
 
 /* Disable BTS tracing.  */
 
-static enum btrace_error
-linux_disable_bts (struct btrace_tinfo_bts *tinfo)
+static void
+linux_disable_bts (struct linux_btrace_target_info *tinfo)
 {
-  munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
+  munmap ((void *) tinfo->header, tinfo->pev.size + PAGE_SIZE);
   close (tinfo->file);
-
-  return BTRACE_ERR_NONE;
 }
 
 /* Disable Intel Processor Trace tracing.  */
 
-static enum btrace_error
-linux_disable_pt (struct btrace_tinfo_pt *tinfo)
+static void
+linux_disable_pt (struct linux_btrace_target_info *tinfo)
 {
-  munmap((void *) tinfo->pt.mem, tinfo->pt.size);
-  munmap((void *) tinfo->header, PAGE_SIZE);
+  munmap ((void *) tinfo->pev.mem, tinfo->pev.size);
+  munmap ((void *) tinfo->header, PAGE_SIZE);
   close (tinfo->file);
-
-  return BTRACE_ERR_NONE;
 }
 
 /* See linux-btrace.h.  */
 
 enum btrace_error
-linux_disable_btrace (struct btrace_target_info *tinfo)
+linux_disable_btrace (struct btrace_target_info *gtinfo)
 {
-  enum btrace_error errcode;
+  linux_btrace_target_info *tinfo
+    = get_linux_btrace_target_info (gtinfo);
 
-  errcode = BTRACE_ERR_NOT_SUPPORTED;
   switch (tinfo->conf.format)
     {
     case BTRACE_FORMAT_NONE:
-      break;
+      return BTRACE_ERR_NOT_SUPPORTED;
 
     case BTRACE_FORMAT_BTS:
-      errcode = linux_disable_bts (&tinfo->variant.bts);
-      break;
+      linux_disable_bts (tinfo);
+      delete tinfo;
+      return BTRACE_ERR_NONE;
 
     case BTRACE_FORMAT_PT:
-      errcode = linux_disable_pt (&tinfo->variant.pt);
-      break;
+      linux_disable_pt (tinfo);
+      delete tinfo;
+      return BTRACE_ERR_NONE;
     }
 
-  if (errcode == BTRACE_ERR_NONE)
-    xfree (tinfo);
-
-  return errcode;
+  return BTRACE_ERR_NOT_SUPPORTED;
 }
 
 /* Read branch trace data in BTS format for the thread given by TINFO into
    BTRACE using the TYPE reading method.  */
 
 static enum btrace_error
-linux_read_bts (struct btrace_data_bts *btrace,
-		struct btrace_target_info *tinfo,
+linux_read_bts (btrace_data_bts *btrace, linux_btrace_target_info *tinfo,
 		enum btrace_read_type type)
 {
-  struct perf_event_buffer *pevent;
   const uint8_t *begin, *end, *start;
   size_t buffer_size, size;
   __u64 data_head = 0, data_tail;
   unsigned int retries = 5;
 
-  pevent = &tinfo->variant.bts.bts;
-
   /* For delta reads, we return at least the partial last block containing
      the current PC.  */
-  if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
+  if (type == BTRACE_READ_NEW && !perf_event_new_data (&tinfo->pev))
     return BTRACE_ERR_NONE;
 
-  buffer_size = pevent->size;
-  data_tail = pevent->last_head;
+  buffer_size = tinfo->pev.size;
+  data_tail = tinfo->pev.last_head;
 
   /* We may need to retry reading the trace.  See below.  */
   while (retries--)
     {
-      data_head = *pevent->data_head;
+      data_head = *tinfo->pev.data_head;
 
       /* Delete any leftover trace from the previous iteration.  */
       delete btrace->blocks;
@@ -845,13 +838,13 @@ linux_read_bts (struct btrace_data_bts *btrace,
 	}
 
       /* Data_head keeps growing; the buffer itself is circular.  */
-      begin = pevent->mem;
+      begin = tinfo->pev.mem;
       start = begin + data_head % buffer_size;
 
       if (data_head <= buffer_size)
 	end = start;
       else
-	end = begin + pevent->size;
+	end = begin + tinfo->pev.size;
 
       btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
 
@@ -860,11 +853,11 @@ linux_read_bts (struct btrace_data_bts *btrace,
 	 kernel might be writing the last branch trace records.
 
 	 Let's check whether the data head moved while we read the trace.  */
-      if (data_head == *pevent->data_head)
+      if (data_head == *tinfo->pev.data_head)
 	break;
     }
 
-  pevent->last_head = data_head;
+  tinfo->pev.last_head = data_head;
 
   /* Prune the incomplete last block (i.e. the first one of inferior execution)
      if we're not doing a delta read.  There is no way of filling in its zeroed
@@ -887,14 +880,9 @@ linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
    given by TINFO into BTRACE using the TYPE reading method.  */
 
 static enum btrace_error
-linux_read_pt (struct btrace_data_pt *btrace,
-	       struct btrace_target_info *tinfo,
+linux_read_pt (btrace_data_pt *btrace, linux_btrace_target_info *tinfo,
 	       enum btrace_read_type type)
 {
-  struct perf_event_buffer *pt;
-
-  pt = &tinfo->variant.pt.pt;
-
   linux_fill_btrace_pt_config (&btrace->config);
 
   switch (type)
@@ -905,12 +893,12 @@ linux_read_pt (struct btrace_data_pt *btrace,
       return BTRACE_ERR_NOT_SUPPORTED;
 
     case BTRACE_READ_NEW:
-      if (!perf_event_new_data (pt))
+      if (!perf_event_new_data (&tinfo->pev))
 	return BTRACE_ERR_NONE;
 
       /* Fall through.  */
     case BTRACE_READ_ALL:
-      perf_event_read_all (pt, &btrace->data, &btrace->size);
+      perf_event_read_all (&tinfo->pev, &btrace->data, &btrace->size);
       return BTRACE_ERR_NONE;
     }
 
@@ -921,9 +909,12 @@ linux_read_pt (struct btrace_data_pt *btrace,
 
 enum btrace_error
 linux_read_btrace (struct btrace_data *btrace,
-		   struct btrace_target_info *tinfo,
+		   struct btrace_target_info *gtinfo,
 		   enum btrace_read_type type)
 {
+  linux_btrace_target_info *tinfo
+    = get_linux_btrace_target_info (gtinfo);
+
   switch (tinfo->conf.format)
     {
     case BTRACE_FORMAT_NONE:
diff --git a/gdb/nat/linux-btrace.h b/gdb/nat/linux-btrace.h
index ab69647c591..6505a7bc05b 100644
--- a/gdb/nat/linux-btrace.h
+++ b/gdb/nat/linux-btrace.h
@@ -23,6 +23,7 @@
 #define NAT_LINUX_BTRACE_H
 
 #include "gdbsupport/btrace-common.h"
+#include "gdbsupport/gdb-checked-static-cast.h"
 #if HAVE_LINUX_PERF_EVENT_H
 #  include <linux/perf_event.h>
 #endif
@@ -45,60 +46,27 @@ struct perf_event_buffer
   /* The data_head value from the last read.  */
   __u64 last_head;
 };
+#endif /* HAVE_LINUX_PERF_EVENT_H */
 
-/* Branch trace target information for BTS tracing.  */
-struct btrace_tinfo_bts
+/* Branch trace target information per thread.  */
+struct linux_btrace_target_info final : public btrace_target_info
 {
-  /* The Linux perf_event configuration for collecting the branch trace.  */
-  struct perf_event_attr attr;
-
-  /* The perf event file.  */
-  int file;
+  linux_btrace_target_info (ptid_t ptid)
+    : btrace_target_info (ptid)
+    {}
 
-  /* The perf event configuration page. */
-  volatile struct perf_event_mmap_page *header;
-
-  /* The BTS perf event buffer.  */
-  struct perf_event_buffer bts;
-};
-
-/* Branch trace target information for Intel Processor Trace
-   tracing.  */
-struct btrace_tinfo_pt
-{
+#if HAVE_LINUX_PERF_EVENT_H
   /* The Linux perf_event configuration for collecting the branch trace.  */
-  struct perf_event_attr attr;
+  struct perf_event_attr attr {};
 
   /* The perf event file.  */
-  int file;
+  int file = -1;
 
-  /* The perf event configuration page. */
-  volatile struct perf_event_mmap_page *header;
+  /* The perf event configuration page.  */
+  volatile struct perf_event_mmap_page *header = nullptr;
 
-  /* The trace perf event buffer.  */
-  struct perf_event_buffer pt;
-};
-#endif /* HAVE_LINUX_PERF_EVENT_H */
-
-/* Branch trace target information per thread.  */
-struct btrace_target_info
-{
-  /* The ptid of this thread.  */
-  ptid_t ptid;
-
-  /* The obtained branch trace configuration.  */
-  struct btrace_config conf;
-
-#if HAVE_LINUX_PERF_EVENT_H
-  /* The branch tracing format specific information.  */
-  union
-  {
-    /* CONF.FORMAT == BTRACE_FORMAT_BTS.  */
-    struct btrace_tinfo_bts bts;
-
-    /* CONF.FORMAT == BTRACE_FORMAT_PT.  */
-    struct btrace_tinfo_pt pt;
-  } variant;
+  /* The perf event buffer containing the trace data.  */
+  struct perf_event_buffer pev {};
 #endif /* HAVE_LINUX_PERF_EVENT_H */
 };
 
diff --git a/gdb/remote.c b/gdb/remote.c
index 55f2fc3b6b5..ba81c5b0b6f 100644
--- a/gdb/remote.c
+++ b/gdb/remote.c
@@ -14423,15 +14423,6 @@ parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
 #endif  /* !defined (HAVE_LIBEXPAT) */
 }
 
-struct btrace_target_info
-{
-  /* The ptid of the traced thread.  */
-  ptid_t ptid;
-
-  /* The obtained branch trace configuration.  */
-  struct btrace_config conf;
-};
-
 /* Reset our idea of our target's btrace configuration.  */
 
 static void
@@ -14502,7 +14493,7 @@ remote_target::btrace_sync_conf (const btrace_config *conf)
 /* Read TP's btrace configuration from the target and store it into CONF.  */
 
 static void
-btrace_read_config (thread_info *tp, struct btrace_config *conf)
+btrace_read_config (thread_info *tp, btrace_config *conf)
 {
   /* target_read_stralloc relies on INFERIOR_PTID.  */
   scoped_restore_current_thread restore_thread;
@@ -14564,9 +14555,8 @@ remote_target::remote_btrace_maybe_reopen ()
 		      btrace_format_string (rs->btrace_config.format));
 	}
 
-      tp->btrace.target = XCNEW (struct btrace_target_info);
-      tp->btrace.target->ptid = tp->ptid;
-      tp->btrace.target->conf = rs->btrace_config;
+      tp->btrace.target
+	= new btrace_target_info { tp->ptid, rs->btrace_config };
     }
 }
 
@@ -14576,7 +14566,6 @@ struct btrace_target_info *
 remote_target::enable_btrace (thread_info *tp,
 			      const struct btrace_config *conf)
 {
-  struct btrace_target_info *tinfo = NULL;
   struct packet_config *packet = NULL;
   struct remote_state *rs = get_remote_state ();
   char *buf = rs->buf.data ();
@@ -14620,8 +14609,7 @@ remote_target::enable_btrace (thread_info *tp,
 	       target_pid_to_str (ptid).c_str ());
     }
 
-  tinfo = XCNEW (struct btrace_target_info);
-  tinfo->ptid = ptid;
+  btrace_target_info *tinfo = new btrace_target_info { ptid };
 
   /* If we fail to read the configuration, we lose some information, but the
      tracing itself is not impacted.  */
@@ -14667,7 +14655,7 @@ remote_target::disable_btrace (struct btrace_target_info *tinfo)
 	       target_pid_to_str (tinfo->ptid).c_str ());
     }
 
-  xfree (tinfo);
+  delete tinfo;
 }
 
 /* Teardown branch tracing.  */
@@ -14676,7 +14664,7 @@ void
 remote_target::teardown_btrace (struct btrace_target_info *tinfo)
 {
   /* We must not talk to the target during teardown.  */
-  xfree (tinfo);
+  delete tinfo;
 }
 
 /* Read the branch trace.  */
diff --git a/gdbsupport/btrace-common.h b/gdbsupport/btrace-common.h
index e287c93a6c1..b4defdea84c 100644
--- a/gdbsupport/btrace-common.h
+++ b/gdbsupport/btrace-common.h
@@ -214,7 +214,23 @@ struct btrace_data
 };
 
 /* Target specific branch trace information.  */
-struct btrace_target_info;
+struct btrace_target_info
+{
+  btrace_target_info (ptid_t ptid) : ptid (ptid)
+    {}
+
+  btrace_target_info (ptid_t ptid, btrace_config conf)
+    : ptid (ptid), conf (conf)
+    {}
+
+  /* The ptid of this thread.  */
+  ptid_t ptid {};
+
+  /* The obtained branch trace configuration.  */
+  btrace_config conf {};
+
+  virtual ~btrace_target_info () = default;
+};
 
 /* Enumeration of btrace read types.  */
 
-- 
2.34.1

Intel Deutschland GmbH
Registered Address: Am Campeon 10, 85579 Neubiberg, Germany
Tel: +49 89 99 8853-0, www.intel.de <http://www.intel.de>
Managing Directors: Christin Eisenschmid, Sharon Heck, Tiffany Doon Silva  
Chairperson of the Supervisory Board: Nicole Lau
Registered Office: Munich
Commercial Register: Amtsgericht Muenchen HRB 186928


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-09-11 15:27 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-08 10:53 [PATCH v3] gdb: c++ify btrace_target_info Markus Metzger
2023-09-08 14:31 ` Simon Marchi
2023-09-11  6:18   ` Metzger, Markus T
2023-09-11  9:17     ` Andrew Burgess
2023-09-11  9:53       ` Metzger, Markus T
2023-09-11 14:25         ` Andrew Burgess
2023-09-11 15:27           ` Simon Marchi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).