public inbox for
 help / color / mirror / Atom feed
From: Kevin Buettner <>
Subject: [commit] Fix problem in which singlestep operations were apparently ignored
Date: Wed, 09 Nov 2005 02:33:00 -0000	[thread overview]
Message-ID: <20051108193308.6b895a95@ironwood.lan> (raw)

I've just committed the patch below.

This patch fixes a problem in which a singlestep operation is
apparently ignored.  This would occur when GDB would tell RDA to
singlestep.  RDA would tell the appropriate thread to singlestep and
the rest of the threads to continue.  If, prior to the singlestepped
thread actually getting scheduled, one of the other threads were to
get an internal signal, say related to thread creation, the singlestep
would never actually occur.  The thread that was supposed to have been
singlestepped would instead be continued.

My fix for this problem is to record in struct lwp when a singlestep
is attempted.  If a SIGTRAP is received for the singlestep thread,
then the flag is cleared.  OTOH, if an attempt is made to continue one
of the threads which is marked as singlestepping, it will instead be

I've been able to easily reproduce this bug by doing repeated "next"
operations through the thread creation loop in the linux-dp program.
On i386, the bug would actually manifest itself as a SIGSEGV due to
a decr_pc_after_break problem.  (Which suggests that there are also
bugs in GDB.  A week or so ago, I had myself convinced that there were
at least three bugs, one in RDA, and at least two in GDB.)  On other
architectures which have a zero decr-pc-after-break value, the singlestep
would apparently turn into a continue.

Understanding the cause of this problem was very difficult and
resulted in the enhanced diagnostics that I committed a short while
ago.  I would not have been able to gain an understanding of this
problem without being able to print out a thread's PC value prior to a
singlestep and then again when the thread was stopped.  (That's my
justification for cluttering the lwp-pool code with that extra `serv'

	* lwp-pool.c (struct lwp): Add new member `do_step'.
	(wait_and_handle): Clear `do_step' when a SIGTRAP is received.
	(continue_or_step_lwp): New function.
	(lwp_pool_continue_all, lwp_pool_continue_lwp): Call
	continue_or_step_lwp() instead of continue_lwp().
	(lwp_singlestep_lwp): Set `do_step' after a successful call to

Index: lwp-pool.c
RCS file: /cvs/src/src/rda/unix/lwp-pool.c,v
retrieving revision 1.3
diff -u -p -r1.3 lwp-pool.c
--- lwp-pool.c	8 Nov 2005 21:58:36 -0000	1.3
+++ lwp-pool.c	8 Nov 2005 22:19:39 -0000
@@ -320,6 +320,12 @@ struct lwp
   /* If STATE is one of the lwp_state_*_interesting states, then
      STATUS is the interesting wait status.  */
   int status;
+  /* Indicates the stepping status.  We must be prepared to step the
+     given lwp upon continue since it's possible to get thread notification
+     signals prior to a step actually occuring.  Receipt of a SIGTRAP is
+     sufficient to clear this flag.  */
+  int do_step;
@@ -879,6 +885,12 @@ wait_and_handle (struct gdbserv *serv, s
       stopsig = WSTOPSIG (status);
+      if (stopsig == SIGTRAP)
+	{
+	  /* No longer stepping once a SIGTRAP is received.  */
+	  l->do_step = 0;
+	}
       switch (l->state)
 	case lwp_state_uninitialized:
@@ -1205,6 +1217,18 @@ lwp_pool_stop_all (struct gdbserv *serv)
+continue_or_step_lwp (struct gdbserv *serv, struct lwp *l, int sig)
+  int status;
+  if (l->do_step)
+    status = singlestep_lwp (serv, l->pid, sig);
+  else
+    status = continue_lwp (l->pid, sig);
+  return status;
 lwp_pool_continue_all (struct gdbserv *serv)
@@ -1237,7 +1261,7 @@ lwp_pool_continue_all (struct gdbserv *s
 	    case lwp_state_stopped:
-	      if (continue_lwp (l->pid, 0) == 0)
+	      if (continue_or_step_lwp (serv, l, 0) == 0)
 		l->state = lwp_state_running;
@@ -1265,7 +1289,7 @@ lwp_pool_continue_all (struct gdbserv *s
                   l->state = lwp_state_running_stop_pending;
                   if (check_stop_pending (serv, l) == 0)
-                      if (continue_lwp (l->pid, 0) == 0)
+                      if (continue_or_step_lwp (serv, l, 0) == 0)
                         l->state = lwp_state_running;
@@ -1314,7 +1338,7 @@ lwp_pool_continue_lwp (struct gdbserv *s
     case lwp_state_stopped:
-      result = continue_lwp (l->pid, signal);
+      result = continue_or_step_lwp (serv, l, signal);
       if (result == 0)
         l->state = lwp_state_running;
@@ -1334,7 +1358,7 @@ lwp_pool_continue_lwp (struct gdbserv *s
           l->state = lwp_state_running_stop_pending;
           if (check_stop_pending (serv, l) == 0)
-              if (continue_lwp (l->pid, 0) == 0)
+              if (continue_or_step_lwp (serv, l, 0) == 0)
                 l->state = lwp_state_running;
@@ -1385,7 +1409,10 @@ lwp_pool_singlestep_lwp (struct gdbserv 
     case lwp_state_stopped:
       result = singlestep_lwp (serv, l->pid, signal);
       if (result == 0)
-        l->state = lwp_state_running;
+	{
+	  l->state = lwp_state_running;
+	  l->do_step = 1;
+	}
     case lwp_state_stopped_stop_pending:
@@ -1404,7 +1431,10 @@ lwp_pool_singlestep_lwp (struct gdbserv 
           if (check_stop_pending (serv, l) == 0)
               if (singlestep_lwp (serv, l->pid, 0) == 0)
-                l->state = lwp_state_running;
+		{
+		  l->state = lwp_state_running;
+		  l->do_step = 1;
+		}

                 reply	other threads:[~2005-11-09  2:33 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20051108193308.6b895a95@ironwood.lan \ \ \

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).