public inbox for gdb-patches@sourceware.org
 help / color / mirror / Atom feed
* [pushed v2 0/2] Fix debugging multi inferiors using the ROCm runtime
@ 2023-07-31 14:30 Lancelot SIX
  2023-07-31 14:30 ` [pushed v2 1/2] gdb/testsuite/rocm: Add the hip_devices_support_debug_multi_process proc Lancelot SIX
  2023-07-31 14:30 ` [pushed v2 2/2] gdb/amdgpu: Fix debugging multiple inferiors using the ROCm runtime Lancelot SIX
  0 siblings, 2 replies; 3+ messages in thread
From: Lancelot SIX @ 2023-07-31 14:30 UTC (permalink / raw)
  To: gdb-patches; +Cc: lsix, Lancelot SIX

Hi,

Here are the patches addressing Pedro's comments on
https://sourceware.org/pipermail/gdb-patches/2023-June/200582.html.  As
the changes from V1 are minor, and following Pedro's recommendation,
I have pushed those 2 patches.

A have rebased and re-tested the patches.

Best,
Lancelot.

Lancelot Six (2):
  gdb/testsuite/rocm: Add the hip_devices_support_debug_multi_process
    proc
  gdb/amdgpu: Fix debugging multiple inferiors using the ROCm runtime

 gdb/amd-dbgapi-target.c                       |   8 +-
 gdb/testsuite/gdb.rocm/multi-inferior-gpu.cpp | 113 ++++++++++++++++++
 gdb/testsuite/gdb.rocm/multi-inferior-gpu.exp |  89 ++++++++++++++
 gdb/testsuite/lib/rocm.exp                    |  20 ++++
 4 files changed, 227 insertions(+), 3 deletions(-)
 create mode 100644 gdb/testsuite/gdb.rocm/multi-inferior-gpu.cpp
 create mode 100644 gdb/testsuite/gdb.rocm/multi-inferior-gpu.exp


base-commit: 6a6e82dc75afc3d6d72e925ad562fba8f297251c
-- 
2.34.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [pushed v2 1/2] gdb/testsuite/rocm: Add the hip_devices_support_debug_multi_process proc
  2023-07-31 14:30 [pushed v2 0/2] Fix debugging multi inferiors using the ROCm runtime Lancelot SIX
@ 2023-07-31 14:30 ` Lancelot SIX
  2023-07-31 14:30 ` [pushed v2 2/2] gdb/amdgpu: Fix debugging multiple inferiors using the ROCm runtime Lancelot SIX
  1 sibling, 0 replies; 3+ messages in thread
From: Lancelot SIX @ 2023-07-31 14:30 UTC (permalink / raw)
  To: gdb-patches; +Cc: lsix, Lancelot Six, Pedro Alves

From: Lancelot Six <lancelot.six@amd.com>

It is not possible to debug multiple processes simultaneously on all
generations of AMDGPU devices.  As some tests will need to debug
multiple inferiors using AMDGPU devices, we need to ensure that all
devices available have the required capability.  Failing to do so would
result in GDB not being able to debug all inferiors properly.

Add the hip_devices_support_debug_multi_process helper function used to
ensure that all devices available can debug multiple processes.

Approved-By: Pedro Alves <pedro@palves.net>
---
 gdb/testsuite/lib/rocm.exp | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/gdb/testsuite/lib/rocm.exp b/gdb/testsuite/lib/rocm.exp
index 389d73bcaa5..98a3b308228 100644
--- a/gdb/testsuite/lib/rocm.exp
+++ b/gdb/testsuite/lib/rocm.exp
@@ -166,3 +166,23 @@ proc with_rocm_gpu_lock { body } {
 	return -code $code $result
     }
 }
+
+# Return true if all the devices support debugging multiple processes
+# using the GPU.
+
+proc hip_devices_support_debug_multi_process {} {
+    set unsupported_targets \
+	{gfx900 gfx906 gfx908 gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032}
+
+    set targets [hcc_amdgpu_targets]
+    if { [llength $targets] == 0 } {
+	return 0
+    }
+
+    foreach target $targets {
+	if { [lsearch -exact $unsupported_targets $target] != -1 } {
+	    return 0
+	}
+    }
+    return 1
+}
-- 
2.34.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [pushed v2 2/2] gdb/amdgpu: Fix debugging multiple inferiors using the ROCm runtime
  2023-07-31 14:30 [pushed v2 0/2] Fix debugging multi inferiors using the ROCm runtime Lancelot SIX
  2023-07-31 14:30 ` [pushed v2 1/2] gdb/testsuite/rocm: Add the hip_devices_support_debug_multi_process proc Lancelot SIX
@ 2023-07-31 14:30 ` Lancelot SIX
  1 sibling, 0 replies; 3+ messages in thread
From: Lancelot SIX @ 2023-07-31 14:30 UTC (permalink / raw)
  To: gdb-patches; +Cc: lsix, Lancelot Six, Pedro Alves

From: Lancelot Six <lancelot.six@amd.com>

When debugging a multi-process application where a parent spawns
multiple child processes using the ROCm runtime, I see the following
assertion failure:

    ../../gdb/amd-dbgapi-target.c:1071: internal-error: process_one_event: Assertion `runtime_state == AMD_DBGAPI_RUNTIME_STATE_UNLOADED' failed.
    A problem internal to GDB has been detected,
    further debugging may prove unreliable.
    ----- Backtrace -----
    0x556e9a318540 gdb_internal_backtrace_1
            ../../gdb/bt-utils.c:122
    0x556e9a318540 _Z22gdb_internal_backtracev
            ../../gdb/bt-utils.c:168
    0x556e9a730224 internal_vproblem
            ../../gdb/utils.c:396
    0x556e9a7304e0 _Z15internal_verrorPKciS0_P13__va_list_tag
            ../../gdb/utils.c:476
    0x556e9a87aeb4 _Z18internal_error_locPKciS0_z
            ../../gdbsupport/errors.cc:58
    0x556e9a29f446 process_one_event
            ../../gdb/amd-dbgapi-target.c:1071
    0x556e9a29f446 process_event_queue
            ../../gdb/amd-dbgapi-target.c:1156
    0x556e9a29faf2 _ZN17amd_dbgapi_target4waitE6ptid_tP17target_waitstatus10enum_flagsI16target_wait_flagE
            ../../gdb/amd-dbgapi-target.c:1262
    0x556e9a6b0965 _Z11target_wait6ptid_tP17target_waitstatus10enum_flagsI16target_wait_flagE
            ../../gdb/target.c:2586
    0x556e9a4c221f do_target_wait_1
            ../../gdb/infrun.c:3876
    0x556e9a4d8489 operator()
            ../../gdb/infrun.c:3935
    0x556e9a4d8489 do_target_wait
            ../../gdb/infrun.c:3964
    0x556e9a4d8489 _Z20fetch_inferior_eventv
            ../../gdb/infrun.c:4365
    0x556e9a87b915 gdb_wait_for_event
            ../../gdbsupport/event-loop.cc:694
    0x556e9a87c3a9 gdb_wait_for_event
            ../../gdbsupport/event-loop.cc:593
    0x556e9a87c3a9 _Z16gdb_do_one_eventi
            ../../gdbsupport/event-loop.cc:217
    0x556e9a521689 start_event_loop
            ../../gdb/main.c:412
    0x556e9a521689 captured_command_loop
            ../../gdb/main.c:476
    0x556e9a523c04 captured_main
            ../../gdb/main.c:1320
    0x556e9a523c04 _Z8gdb_mainP18captured_main_args
            ../../gdb/main.c:1339
    0x556e9a24b1bf main
            ../../gdb/gdb.c:32
    ---------------------
    ../../gdb/amd-dbgapi-target.c:1071: internal-error: process_one_event: Assertion `runtime_state == AMD_DBGAPI_RUNTIME_STATE_UNLOADED' failed.
    A problem internal to GDB has been detected,

Before diving into why this error appears, let's explore how things are
expected to work in normal circumstances.  When a process being debugged
starts using the ROCm runtime, the following happens:

- The runtime registers itself to the driver.
- The driver creates a "runtime loaded" event and notifies the debugger
  that a new event is available by writing to a file descriptor which is
  registered in GDB's main event loop.
- GDB core calls the callback associated with this file descriptor
  (dbgapi_notifier_handler).  Because the amd-dbgapi-target is not
  pushed at this point, the handler pulls the "runtime loaded" event
  from the driver (this is the only event which can be available at this
  point) and eventually pushes the amd-dbgapi-target on the inferior's
  target stack.

In a nutshell, this is the expected AMDGPU runtime activation process.

From there, when new events are available regarding the GPU threads, the
same file descriptor is written to.  The callback sees that the
amd-dbgapi-target is pushed so marks the amd_dbgapi_async_event_handler.
This will later cause amd_dbgapi_target::wait to be called.  The wait
method pulls all the available events from the driver and handles them.
The wait method returns the information conveyed by the first event, the
other events are cached for later calls of the wait method.

Note that because we are under the wait method, we know that the
amd-dbgapi-target is pushed on the inferior target stack.  This implies
that the runtime activation event has been seen already.  As a
consequence, we cannot receive another event indicating that the runtime
gets activated.  This is what the failing assertion checks.

In the case when we have multiple inferiors however, there is a flaw in
what have been described above.  If one inferior (let's call it inferior
1) already has the amd-dbgapi-target pushed to its target stack and
another inferior (inferior 2) activates the ROCm runtime, here is what
can happen:

- The driver creates the runtime activation for inferior 2 and writes to
  the associated file descriptor.
- GDB has inferior 1 selected and calls target_wait for some reason.
- This prompts amd_dbgapi_target::wait to be called.  The method pulls
  all events from the driver, including the runtime activation event for
  inferior 2, leading to the assertion failure.

The fix for this problem is simple.  To avoid such problem, we need to
make sure that amd_dbgapi_target::wait only pulls events for the current
inferior from the driver.  This is what this patch implements.

This patch also includes a testcase which could fail before this patch.

This patch has been tested on a system with multiple GPUs which had more
chances to reproduce the original bug.  It has also been tested on top
of the downstream ROCgdb port which has more AMDGPU related tests.  The
testcase has been tested with `make check check-read1 check-readmore`.

Approved-By: Pedro Alves <pedro@palves.net>
---
 gdb/amd-dbgapi-target.c                       |   8 +-
 gdb/testsuite/gdb.rocm/multi-inferior-gpu.cpp | 113 ++++++++++++++++++
 gdb/testsuite/gdb.rocm/multi-inferior-gpu.exp |  89 ++++++++++++++
 3 files changed, 207 insertions(+), 3 deletions(-)
 create mode 100644 gdb/testsuite/gdb.rocm/multi-inferior-gpu.cpp
 create mode 100644 gdb/testsuite/gdb.rocm/multi-inferior-gpu.exp

diff --git a/gdb/amd-dbgapi-target.c b/gdb/amd-dbgapi-target.c
index 40f24b5fc2f..e90628c8183 100644
--- a/gdb/amd-dbgapi-target.c
+++ b/gdb/amd-dbgapi-target.c
@@ -148,7 +148,7 @@ struct amd_dbgapi_inferior_info
 };
 
 static amd_dbgapi_event_id_t process_event_queue
-  (amd_dbgapi_process_id_t process_id = AMD_DBGAPI_PROCESS_NONE,
+  (amd_dbgapi_process_id_t process_id,
    amd_dbgapi_event_kind_t until_event_kind = AMD_DBGAPI_EVENT_KIND_NONE);
 
 static const target_info amd_dbgapi_target_info = {
@@ -1255,8 +1255,10 @@ amd_dbgapi_target::wait (ptid_t ptid, struct target_waitstatus *ws,
   std::tie (event_ptid, gpu_waitstatus) = consume_one_event (ptid.pid ());
   if (event_ptid == minus_one_ptid)
     {
-      /* Drain the events from the amd_dbgapi and preserve the ordering.  */
-      process_event_queue ();
+      /* Drain the events for the current inferior from the amd_dbgapi and
+	 preserve the ordering.  */
+      auto info = get_amd_dbgapi_inferior_info (current_inferior ());
+      process_event_queue (info->process_id, AMD_DBGAPI_EVENT_KIND_NONE);
 
       std::tie (event_ptid, gpu_waitstatus) = consume_one_event (ptid.pid ());
       if (event_ptid == minus_one_ptid)
diff --git a/gdb/testsuite/gdb.rocm/multi-inferior-gpu.cpp b/gdb/testsuite/gdb.rocm/multi-inferior-gpu.cpp
new file mode 100644
index 00000000000..ca869233b58
--- /dev/null
+++ b/gdb/testsuite/gdb.rocm/multi-inferior-gpu.cpp
@@ -0,0 +1,113 @@
+/* This testcase is part of GDB, the GNU debugger.
+
+   Copyright 2023 Free Software Foundation, Inc.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <hip/hip_runtime.h>
+
+#define CHECK(cmd)                                                           \
+  {                                                                          \
+    hipError_t error = cmd;                                                  \
+    if (error != hipSuccess)                                                 \
+      {                                                                      \
+	fprintf (stderr, "error: '%s'(%d) at %s:%d\n",                       \
+		 hipGetErrorString (error), error, __FILE__, __LINE__);      \
+	exit (EXIT_FAILURE);                                                 \
+      }                                                                      \
+  }
+
+__global__ void
+kern ()
+{
+  asm ("s_sleep 1");
+}
+
+/* Spawn one child process per detected GPU.  */
+
+static int
+parent (int argc, char **argv)
+{
+  /* Identify how many GPUs we have, and spawn one child for each.  */
+  int num_devices;
+  CHECK (hipGetDeviceCount (&num_devices));
+
+  /* Break here.  */
+
+  for (int i = 0; i < num_devices; i++)
+    {
+      char n[32] = {};
+      snprintf (n, sizeof (n), "%d", i);
+      pid_t pid = fork ();
+      if (pid == -1)
+	{
+	  perror ("Fork failed");
+	  return -1;
+	}
+
+      if (pid == 0)
+	{
+	  /* Exec to force the child to re-initialize the ROCm runtime.  */
+	  if (execl (argv[0], argv[0], n) == -1)
+	    {
+	      perror ("Failed to exec");
+	      return -1;
+	    }
+	}
+    }
+
+  /* Wait for all children.  */
+  while (true)
+    {
+      int ws;
+      pid_t ret = waitpid (-1, &ws, 0);
+      if (ret == -1 && errno == ECHILD)
+	break;
+    }
+
+  /* Last break here.  */
+  return 0;
+}
+
+static int
+child (int argc, char **argv)
+{
+  int dev_number;
+  if (sscanf (argv[1], "%d", &dev_number) != 1)
+    {
+      fprintf (stderr, "Invalid argument \"%s\"\n", argv[1]);
+      return -1;
+    }
+
+  CHECK (hipSetDevice (dev_number));
+  kern<<<1, 1>>> ();
+  hipDeviceSynchronize ();
+  return 0;
+}
+
+/* When called with no argument, identify how many AMDGPU devices are
+   available on the system and spawn one worker process per GPU.  If a
+   command-line argument is provided, it is the index of the GPU to use.  */
+
+int
+main (int argc, char **argv)
+{
+  if (argc <= 1)
+    return parent (argc, argv);
+  else
+    return child (argc, argv);
+}
diff --git a/gdb/testsuite/gdb.rocm/multi-inferior-gpu.exp b/gdb/testsuite/gdb.rocm/multi-inferior-gpu.exp
new file mode 100644
index 00000000000..18b4172ff09
--- /dev/null
+++ b/gdb/testsuite/gdb.rocm/multi-inferior-gpu.exp
@@ -0,0 +1,89 @@
+# Copyright 2023 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# This test checks that GDB can debug multiple inferior which uses all
+# the ROCm runtime.
+
+load_lib rocm.exp
+
+standard_testfile .cpp
+
+require allow_hipcc_tests
+require hip_devices_support_debug_multi_process
+
+if {[build_executable "failed to prepare" $testfile $srcfile {debug hip}]} {
+    return
+}
+
+proc do_test {} {
+    clean_restart $::binfile
+    gdb_test_no_output "set non-stop on"
+    gdb_test_no_output "set detach-on-fork off"
+    gdb_test_no_output "set follow-fork parent"
+
+    with_rocm_gpu_lock {
+	gdb_breakpoint [gdb_get_line_number "Break here"]
+	gdb_breakpoint kern allow-pending
+	gdb_breakpoint [gdb_get_line_number "Last break here"]
+
+	# Run until we reach the first breakpoint where we can figure
+	# out how many children will be spawned.
+	gdb_test "run" "hit Breakpoint.*"
+
+	set num_children [get_integer_valueof "num_devices" 0]
+	set bp_to_see $num_children
+	set stopped_gpu_threads [list]
+
+	gdb_test_multiple "continue -a &" "continue to gpu breakpoints" {
+	    -re "Continuing\.\r\n$::gdb_prompt " {
+		pass $gdb_test_name
+	    }
+	}
+
+	gdb_test_multiple "" "wait for gpu stops" {
+	    -re "Thread ($::decimal\.$::decimal)\[^\r\n\]* hit Breakpoint\[^\r\n\]*, kern \(\)\[^\r\n\]*\r\n" {
+		lappend stopped_gpu_threads $expect_out(1,string)
+		incr bp_to_see -1
+		if {$bp_to_see != 0} {
+		    exp_continue
+		} else {
+		    pass $gdb_test_name
+		}
+	    }
+	}
+
+	# Continue all the GPU kernels so all the children processes can reach exit.
+	foreach thread $stopped_gpu_threads {
+	    set infnumber [lindex [split $thread .] 0]
+	    gdb_test "thread $thread" "Switching to thread.*"
+	    gdb_test_multiple "continue $thread" "" {
+		-re "\\\[Inferior $infnumber \[^\n\r\]* exited normally\\]\r\n$::gdb_prompt " {
+		    pass $gdb_test_name
+		}
+	    }
+	}
+
+	gdb_test_multiple "" "reach breakpoint in main" {
+	    -re "hit Breakpoint.*parent" {
+		pass $gdb_test_name
+	    }
+	}
+	# Select main inferior
+	gdb_test "inferior 1" "Switching to inferior 1.*"
+	gdb_continue_to_end "" "continue -a" 1
+    }
+}
+
+do_test
-- 
2.34.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-07-31 14:46 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-07-31 14:30 [pushed v2 0/2] Fix debugging multi inferiors using the ROCm runtime Lancelot SIX
2023-07-31 14:30 ` [pushed v2 1/2] gdb/testsuite/rocm: Add the hip_devices_support_debug_multi_process proc Lancelot SIX
2023-07-31 14:30 ` [pushed v2 2/2] gdb/amdgpu: Fix debugging multiple inferiors using the ROCm runtime Lancelot SIX

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).