public inbox for gdb-patches@sourceware.org
 help / color / mirror / Atom feed
* [patch 0/3] Displaced stepping for 16-bit Thumb instructions
@ 2010-12-25 14:17 Yao Qi
  2010-12-25 14:22 ` [patch 1/3] " Yao Qi
                   ` (8 more replies)
  0 siblings, 9 replies; 66+ messages in thread
From: Yao Qi @ 2010-12-25 14:17 UTC (permalink / raw)
  To: gdb-patches

Displaced stepping doesn't work for Thumb instructions so far.  This set
of patches are about support displaced stepping of 16-bit Thumb
instructions.  There are much more 32-bit Thumb instructions than 16-bit
Thumb instructions, so it takes more time to support 32-bit Thumb
instructions.  I'd like to send these three patches first to review.
Once these three are done, it is straight forward to support 32-bit
Thumb instructions.

Regression tested these three patches along with another pending patch
on armv7l-unknown-linux-gnueabi.
http://sourceware.org/ml/gdb-patches/2010-12/msg00427.html

No regressions and some test failures are fixed.
-FAIL: gdb.base/moribund-step.exp: running to main in runto
-FAIL: gdb.mi/mi-nonstop-exit.exp: mi runto main (timeout)
-FAIL: gdb.mi/mi-nonstop.exp: mi runto main (timeout)
-FAIL: gdb.mi/mi-ns-stale-regcache.exp: mi runto main (timeout)
-FAIL: gdb.mi/mi-nsintrall.exp: mi runto main (timeout)
-FAIL: gdb.mi/mi-nsmoribund.exp: mi runto main (timeout)
-FAIL: gdb.mi/mi-nsthrexec.exp: mi runto main (timeout)

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 1/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
@ 2010-12-25 14:22 ` Yao Qi
  2011-02-17 19:09   ` Ulrich Weigand
  2010-12-25 17:09 ` [patch 2/3] " Yao Qi
                   ` (7 subsequent siblings)
  8 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2010-12-25 14:22 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 91 bytes --]

Patch 1 is about refactoring, without any effect on functionality.

-- 
Yao (齐尧)

[-- Attachment #2: arm_disp_step_refactor_for_thumb_p1.patch --]
[-- Type: text/x-patch, Size: 4415 bytes --]

2010-12-25  Yao Qi  <yao@codesourcery.com>

	* gdb/arm-tdep.c (arm_displaced_step_copy_insn): Move code to ...
	(arm_process_displaced_insn): .. here. Remove parameter INSN.
	(thumb_process_displaced_insn): New.
	* gdb/arm-linux-tdep.c (arm_linux_displaced_step_copy_insn): Update
	call to arm_process_displaced_insn.
	* gdb/arm-tdep.h : Update declaration of arm_process_displaced_insn.

diff --git a/gdb/arm-linux-tdep.c b/gdb/arm-linux-tdep.c
index 4758ded..06f386a 100644
--- a/gdb/arm-linux-tdep.c
+++ b/gdb/arm-linux-tdep.c
@@ -913,18 +913,10 @@ arm_linux_displaced_step_copy_insn (struct gdbarch *gdbarch,
     }
   else
     {
-      enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
-      uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order);
-
-      if (debug_displaced)
-	fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
-			    "at %.8lx\n", (unsigned long) insn,
-			    (unsigned long) from);
-
       /* Override the default handling of SVC instructions.  */
       dsc->u.svc.copy_svc_os = arm_linux_copy_svc;
 
-      arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
+      arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
     }
 
   arm_displaced_init_closure (gdbarch, from, to, dsc);
diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index d4013c6..64aa500 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5837,16 +5837,22 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
   else
     return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
+static void
+thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
+			      CORE_ADDR to, struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  error (_("Displaced stepping is only supported in ARM mode"));
+}
 
 void
-arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
-			    CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
+arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
+			    CORE_ADDR to, struct regcache *regs,
 			    struct displaced_step_closure *dsc)
 {
   int err = 0;
-
-  if (!displaced_in_arm_mode (regs))
-    error (_("Displaced stepping is only supported in ARM mode"));
+  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  uint32_t insn;
 
   /* Most displaced instructions use a 1-instruction scratch space, so set this
      here and override below if/when necessary.  */
@@ -5856,6 +5862,15 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
   dsc->cleanup = NULL;
   dsc->wrote_to_pc = 0;
 
+  if (!displaced_in_arm_mode (regs))
+    return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
+
+  insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
+			"at %.8lx\n", (unsigned long) insn,
+			(unsigned long) from);
+
   if ((insn & 0xf0000000) == 0xf0000000)
     err = decode_unconditional (gdbarch, insn, regs, dsc);
   else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
@@ -5926,15 +5941,7 @@ arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
 {
   struct displaced_step_closure *dsc
     = xmalloc (sizeof (struct displaced_step_closure));
-  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
-  uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
-			"at %.8lx\n", (unsigned long) insn,
-			(unsigned long) from);
-
-  arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
+  arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
   arm_displaced_init_closure (gdbarch, from, to, dsc);
 
   return dsc;
diff --git a/gdb/arm-tdep.h b/gdb/arm-tdep.h
index 61cdb5d..cfb85ff 100644
--- a/gdb/arm-tdep.h
+++ b/gdb/arm-tdep.h
@@ -284,9 +284,8 @@ enum pc_write_style
 };
 
 extern void
-  arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
-			      CORE_ADDR from, CORE_ADDR to,
-			      struct regcache *regs,
+  arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
+			      CORE_ADDR to, struct regcache *regs,
 			      struct displaced_step_closure *dsc);
 extern void
   arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
  2010-12-25 14:22 ` [patch 1/3] " Yao Qi
@ 2010-12-25 17:09 ` Yao Qi
  2011-02-17 19:46   ` Ulrich Weigand
  2010-12-25 17:54 ` [patch 3/3] " Yao Qi
                   ` (6 subsequent siblings)
  8 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2010-12-25 17:09 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 270 bytes --]

Current implementation of displaced stepping in ARM assumes instruction
size is fixed 32-bit.  Patch 2 is to rewrite some infrastructure code to
be ready to handle non-32-bit instructions.  This patch doesn't change
any GDB functionality either.

-- 
Yao (齐尧)

[-- Attachment #2: arm_disp_step_refactor_for_thumb_p2.patch --]
[-- Type: text/x-patch, Size: 15201 bytes --]

2010-12-25  Yao Qi  <yao@codesourcery.com>

	Handle both 32-bit and 16-bit insns for displaced stepping.

	* gdb/arm-tdep.h (struct displaced_step_closure): Add new field
	modinsns.
	Remove field modinsn.
	(RECORD_MOD_32BIT_INSN): New macro.
	(RECORD_MOD_16BIT_INSN): New macro.
	(RECORD_MOD_INSN): New macro.
	* gdb/arm-tdep.c (arm_displaced_step_breakpoint_offset): New.
	(cleanup_branch): Replace magic number by macros.
	(copy_unmodified): Save modified insns by RECORD_MOD_32BIT_INSN.
	(copy_preload): Likewise.
	(copy_preload_reg): Likewise.
	(copy_copro_load_store): Likewise.
	(copy_b_bl_blx): Likewise.
	(copy_bx_blx_reg): Likewise.
	(copy_alu_imm): Likewise.
	(copy_alu_reg): Likewise.
	(copy_alu_shifted_reg): Likewise.
	(copy_extra_ld_st): Likewise.
	(copy_ldr_str_ldrb_strb): Likewise.
	(copy_block_xfer): Likewise.
	(copy_svc): Likewise.
	(copy_undef): Likewise.
	(copy_unpred): Likewise.
	(decode_svc_copro): Likewise.
	(arm_displaced_init_closure): Handle both 32bit and 16bit insns.
	(arm_displaced_step_fixup): Likewise.
	* gdb/arm-linux-tdep.c (arm_linux_copy_svc): Save modified insns by
	RECORD_MOD_32BIT_INSN.
	(arm_catch_kernel_helper_return): Likewise.

diff --git a/gdb/arm-linux-tdep.c b/gdb/arm-linux-tdep.c
index 06f386a..b20b44f 100644
--- a/gdb/arm-linux-tdep.c
+++ b/gdb/arm-linux-tdep.c
@@ -827,7 +827,7 @@ arm_linux_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
      Cleanup: if pc lands in scratch space, pc <- insn_addr + 4
               else leave pc alone.  */
 
-  dsc->modinsn[0] = insn;
+  RECORD_MOD_32BIT_INSN (0, insn);
 
   dsc->cleanup = &arm_linux_cleanup_svc;
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
@@ -885,7 +885,7 @@ arm_catch_kernel_helper_return (struct gdbarch *gdbarch, CORE_ADDR from,
 		       CANNOT_WRITE_PC);
   write_memory_unsigned_integer (to + 8, 4, byte_order, from);
 
-  dsc->modinsn[0] = 0xe59ef004;  /* ldr pc, [lr, #4].  */
+  RECORD_MOD_32BIT_INSN (0, 0xe59ef004); /* ldr pc, [lr, #4].  */
 }
 
 /* Linux-specific displaced step instruction copying function.  Detects when
diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 64aa500..0e97674 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -357,6 +357,20 @@ arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame, 
 				      CORE_ADDR pc, int insert_bkpt);
 
+/* Return the offset of breakpoint instruction that should be put in copy
+   area.  */
+static int
+arm_displaced_step_breakpoint_offset (struct displaced_step_closure * dsc)
+{
+  int i, size;
+  for (i = 0, size = 0; i < dsc->numinsns; i++)
+    {
+      gdb_assert (dsc->modinsns[i].size == 2 ||dsc->modinsns[i].size == 4);
+      size += dsc->modinsns[i].size;
+    }
+  return size;
+}
+
 /* Determine if the program counter specified in MEMADDR is in a Thumb
    function.  This function should be called for addresses unrelated to
    any executing frame; otherwise, prefer arm_frame_is_thumb.  */
@@ -4328,7 +4342,7 @@ copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
 			"opcode/class '%s' unmodified\n", (unsigned long) insn,
 			iname);
 
-  dsc->modinsn[0] = insn;
+  RECORD_MOD_32BIT_INSN (0, insn);
 
   return 0;
 }
@@ -4371,7 +4385,7 @@ copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 
   dsc->u.preload.immed = 1;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
+  RECORD_MOD_32BIT_INSN (0, (insn & 0xfff0ffff));
 
   dsc->cleanup = &cleanup_preload;
 
@@ -4411,7 +4425,7 @@ copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 
   dsc->u.preload.immed = 0;
 
-  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
+  RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff0fff0) | 0x1));
 
   dsc->cleanup = &cleanup_preload;
 
@@ -4464,7 +4478,7 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.ldst.writeback = bit (insn, 25);
   dsc->u.ldst.rn = rn;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
+  RECORD_MOD_32BIT_INSN (0, (insn & 0xfff0ffff));
 
   dsc->cleanup = &cleanup_copro_load_store;
 
@@ -4489,11 +4503,11 @@ cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
 
   if (dsc->u.branch.link)
     {
-      ULONGEST pc = displaced_read_reg (regs, from, 15);
-      displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
+      ULONGEST pc = displaced_read_reg (regs, from, ARM_PC_REGNUM);
+      displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);
     }
 
-  displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
+  displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
 }
 
 /* Copy B/BL/BLX instructions with immediate destinations.  */
@@ -4536,7 +4550,7 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.branch.exchange = exchange;
   dsc->u.branch.dest = from + 8 + offset;
 
-  dsc->modinsn[0] = ARM_NOP;
+  RECORD_MOD_32BIT_INSN (0, ARM_NOP);
 
   dsc->cleanup = &cleanup_branch;
 
@@ -4574,7 +4588,7 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.branch.link = link;
   dsc->u.branch.exchange = 1;
 
-  dsc->modinsn[0] = ARM_NOP;
+  RECORD_MOD_32BIT_INSN (0, ARM_NOP);
 
   dsc->cleanup = &cleanup_branch;
 
@@ -4633,9 +4647,9 @@ copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = insn & 0xfff00fff;
+    RECORD_MOD_32BIT_INSN (0, (insn & 0xfff00fff));
   else
-    dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
+    RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00fff) | 0x10000));
 
   dsc->cleanup = &cleanup_alu_imm;
 
@@ -4702,9 +4716,9 @@ copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
+    RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00ff0) | 0x2));
   else
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
+    RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00ff0) | 0x10002));
 
   dsc->cleanup = &cleanup_alu_reg;
 
@@ -4776,9 +4790,9 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
+    RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff000f0) | 0x302));
   else
-    dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
+    RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff000f0) | 0x10302));
 
   dsc->cleanup = &cleanup_alu_shifted_reg;
 
@@ -4902,12 +4916,12 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
     /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
 	->
        {ldr,str}<width><cond> r0, [r1,] [r2, #imm].  */
-    dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
+    RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00fff) | 0x20000));
   else
     /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
 	->
        {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3].  */
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
+    RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00ff0) | 0x20003));
 
   dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
 
@@ -4982,32 +4996,32 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
 	   ->
 	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
-	dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
+	RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00fff) | 0x20000));
       else
 	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
 	   ->
 	   {ldr,str}[b]<cond> r0, [r2, r3].  */
-	dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
+	RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00ff0) | 0x20003));
     }
   else
     {
       /* We need to use r4 as scratch.  Make sure it's restored afterwards.  */
       dsc->u.ldst.restore_r4 = 1;
 
-      dsc->modinsn[0] = 0xe58ff014;  /* str pc, [pc, #20].  */
-      dsc->modinsn[1] = 0xe59f4010;  /* ldr r4, [pc, #16].  */
-      dsc->modinsn[2] = 0xe044400f;  /* sub r4, r4, pc.  */
-      dsc->modinsn[3] = 0xe2844008;  /* add r4, r4, #8.  */
-      dsc->modinsn[4] = 0xe0800004;  /* add r0, r0, r4.  */
+      RECORD_MOD_32BIT_INSN (0, 0xe58ff014); /* str pc, [pc, #20].  */
+      RECORD_MOD_32BIT_INSN (1, 0xe59f4010); /* ldr r4, [pc, #16].  */
+      RECORD_MOD_32BIT_INSN (2, 0xe044400f); /* sub r4, r4, pc.  */
+      RECORD_MOD_32BIT_INSN (3, 0xe2844008); /* add r4, r4, #8.  */
+      RECORD_MOD_32BIT_INSN (4, 0xe0800004);  /* add r0, r0, r4.  */
 
       /* As above.  */
       if (immed)
-	dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
+	RECORD_MOD_32BIT_INSN (5, ((insn & 0xfff00fff) | 0x20000));
       else
-	dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
+	RECORD_MOD_32BIT_INSN (5, ((insn & 0xfff00ff0) | 0x20003));
 
-      dsc->modinsn[6] = 0x0;  /* breakpoint location.  */
-      dsc->modinsn[7] = 0x0;  /* scratch space.  */
+      RECORD_MOD_32BIT_INSN (6, 0x00); /* breakpoint location.  */
+      RECORD_MOD_32BIT_INSN (7, 0x00); /* scratch space.  */
 
       dsc->numinsns = 6;
     }
@@ -5278,7 +5292,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	     instruction (which might not behave perfectly in all cases, but
 	     these instructions should be rare enough for that not to matter
 	     too much).  */
-	  dsc->modinsn[0] = ARM_NOP;
+	  RECORD_MOD_32BIT_INSN (0, ARM_NOP);
 
 	  dsc->cleanup = &cleanup_block_load_all;
 	}
@@ -5322,7 +5336,8 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 				"list %.4x\n"), rn, writeback ? "!" : "",
 				(int) insn & 0xffff, new_regmask);
 
-	  dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
+	  RECORD_MOD_32BIT_INSN (0,
+				 ((insn & ~0xffff) | (new_regmask & 0xffff)));
 
 	  dsc->cleanup = &cleanup_block_load_pc;
 	}
@@ -5335,7 +5350,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	 Doing things this way has the advantage that we can auto-detect
 	 the offset of the PC write (which is architecture-dependent) in
 	 the cleanup routine.  */
-      dsc->modinsn[0] = insn;
+      RECORD_MOD_32BIT_INSN (0, insn);
 
       dsc->cleanup = &cleanup_block_store_pc;
     }
@@ -5378,7 +5393,7 @@ copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
      Insn: unmodified svc.
      Cleanup: pc <- insn_addr + 4.  */
 
-  dsc->modinsn[0] = insn;
+  RECORD_MOD_32BIT_INSN (0, insn);
 
   dsc->cleanup = &cleanup_svc;
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
@@ -5398,7 +5413,7 @@ copy_undef (struct gdbarch *gdbarch, uint32_t insn,
     fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
 			(unsigned long) insn);
 
-  dsc->modinsn[0] = insn;
+  RECORD_MOD_32BIT_INSN (0, insn);
 
   return 0;
 }
@@ -5413,7 +5428,7 @@ copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
     fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
 			"%.8lx\n", (unsigned long) insn);
 
-  dsc->modinsn[0] = insn;
+  RECORD_MOD_32BIT_INSN (0, insn);
 
   return 0;
 }
@@ -5837,6 +5852,7 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
   else
     return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
+
 static void
 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 			      CORE_ADDR to, struct regcache *regs,
@@ -5901,6 +5917,10 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 		    _("arm_process_displaced_insn: Instruction decode error"));
 }
 
+static const unsigned char * arm_breakpoint_from_pc (struct gdbarch *gdbarch,
+						     CORE_ADDR *pcptr,
+						     int *lenptr);
+
 /* Actually set up the scratch space for a displaced instruction.  */
 
 void
@@ -5908,23 +5928,44 @@ arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
 			    CORE_ADDR to, struct displaced_step_closure *dsc)
 {
   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
-  unsigned int i;
+  unsigned int i, len, offset;
   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
 
+  offset = 0;
   /* Poke modified instruction(s).  */
   for (i = 0; i < dsc->numinsns; i++)
     {
+      unsigned long insn;
+      if (dsc->modinsns[i].size == 4)
+	insn = dsc->modinsns[i].insn.a;
+      else if (dsc->modinsns[i].size == 2)
+	insn = dsc->modinsns[i].insn.t;
+      else
+	gdb_assert (0);
+
       if (debug_displaced)
-	fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
-			    "%.8lx\n", (unsigned long) dsc->modinsn[i],
-			    (unsigned long) to + i * 4);
-      write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
-				     dsc->modinsn[i]);
+	{
+	  fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
+	  if (dsc->modinsns[i].size == 4)
+	    fprintf_unfiltered (gdb_stdlog, "%.8lx",
+				dsc->modinsns[i].insn.a);
+	  else if (dsc->modinsns[i].size == 2)
+	    fprintf_unfiltered (gdb_stdlog, "%.4x",
+				dsc->modinsns[i].insn.t);
+
+	  fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
+			      (unsigned long) to + offset);
+	}
+      write_memory_unsigned_integer (to + offset, dsc->modinsns[i].size,
+				     byte_order_for_code,
+				     insn);
+      offset += dsc->modinsns[i].size;
     }
 
   /* Put breakpoint afterwards.  */
-  write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
-		tdep->arm_breakpoint_size);
+  write_memory (to + arm_displaced_step_breakpoint_offset (dsc),
+		arm_breakpoint_from_pc (gdbarch, &from, &len),
+		len);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
@@ -5960,7 +6001,11 @@ arm_displaced_step_fixup (struct gdbarch *gdbarch,
     dsc->cleanup (gdbarch, regs, dsc);
 
   if (!dsc->wrote_to_pc)
-    regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
+    {
+      struct frame_info *fi = get_current_frame ();
+      regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
+				      arm_get_next_pc_raw(fi, dsc->insn_addr, 0));
+    }
 }
 
 #include "bfd-in2.h"
diff --git a/gdb/arm-tdep.h b/gdb/arm-tdep.h
index cfb85ff..dd8aba8 100644
--- a/gdb/arm-tdep.h
+++ b/gdb/arm-tdep.h
@@ -204,6 +204,22 @@ struct gdbarch_tdep
 
 /* Structures used for displaced stepping.  */
 
+#define RECORD_MOD_INSN(INDEX, MODE, INSN) \
+  dsc->modinsns[INDEX].insn.MODE = INSN;\
+  /* dsc->modinsn[INDEX] = INSN */
+
+#define RECORD_MOD_32BIT_INSN(INDEX, INSN) do \
+{\
+  RECORD_MOD_INSN(INDEX, a, INSN);\
+  dsc->modinsns[INDEX].size = 4;\
+ } while (0)
+
+#define RECORD_MOD_16BIT_INSN(INDEX, INSN) do \
+{\
+  RECORD_MOD_INSN(INDEX, t, INSN);\
+  dsc->modinsns[INDEX].size = 2;\
+ } while (0)
+
 /* The maximum number of temporaries available for displaced instructions.  */
 #define DISPLACED_TEMPS			16
 /* The maximum number of modified instructions generated for one single-stepped
@@ -262,7 +278,17 @@ struct displaced_step_closure
 			  struct displaced_step_closure *dsc);
     } svc;
   } u;
-  unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
+
+  struct insn
+  {
+    union
+    {
+      unsigned long a;
+      unsigned short t;
+    }insn;
+    unsigned short size;
+  }modinsns[DISPLACED_MODIFIED_INSNS];
+
   int numinsns;
   CORE_ADDR insn_addr;
   CORE_ADDR scratch_base;

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 3/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
  2010-12-25 14:22 ` [patch 1/3] " Yao Qi
  2010-12-25 17:09 ` [patch 2/3] " Yao Qi
@ 2010-12-25 17:54 ` Yao Qi
  2010-12-27 15:15   ` Yao Qi
  2011-02-17 20:55   ` Ulrich Weigand
  2010-12-29  5:48 ` [patch 0/3] Displaced stepping " Yao Qi
                   ` (5 subsequent siblings)
  8 siblings, 2 replies; 66+ messages in thread
From: Yao Qi @ 2010-12-25 17:54 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 296 bytes --]

Patch 3 is about supporting 16-bit Thumb displaced stepping.  In this
patch, we decode 16-bit instruction, and process them.  We also leave a
slot for 32-bit Thumb instructions, and put an error there.

Test cases are updated accordingly for some PC-related instructions.

-- 
Yao (齐尧)

[-- Attachment #2: arm_disp_step_thumb_16bit.patch --]
[-- Type: text/x-patch, Size: 28664 bytes --]

gdb/
2010-12-25  Yao Qi  <yao@codesourcery.com>

	Displaced stepping support for 16-bit Thumb insns.

	* gdb/arm-tdep.c (THUMB_NOP): New macro.
	(displaced_read_reg): Support Thumb mode.
	(thumb_copy_unmodified_16bit): New.
	(cleanup_branch): Move some code to ...
	(cleanup_branch_1): ... here.  New.  Support Thumb mode.
	(cleanup_cbz_cbnz): New.
	(copy_b_bl_blx): Move some code to ...
	(arm_copy_b_bl_blx): ... here.  New.
	(thumb_copy_b): New.
	(copy_bx_blx_reg): Move some code to ...
	(arm_copy_bx_blx_reg): ... here.  New.
	(thumb_copy_bx_blx_reg): New.
	(decode_unconditional): Update caller.
	(decode_miscellaneous): Likewise.
	(decode_b_bl_ldmstm): Likewise.
	(copy_ldr_str_ldrb_strb): Replace magic number with macro.
	(thumb_decode_dp): New.
	(thumb_decode_pc_relative): New.
	(thumb_copy_16bit_ldr_literal): New.
	(thumb_copy_cbnz_cbz): New.
	(thumb_process_displaced_16bit_insn): New.
	(thumb_process_displaced_32bit_insn): New.

gdb/testsuite/
2010-12-25  Yao Qi  <yao@codesourcery.com>

	* gdb.arch/arm-disp-step.S: Test cbnz/cbz, adr and ldr.
	* gdb.arch/arm-disp-step.exp: Likewise.

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 93c4e50..4d766c9 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -4326,6 +4326,9 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
+#define THUMB_NOP				0x4600
+
+static int displaced_in_arm_mode (struct regcache *regs);
 
 /* Helper for register reads for displaced stepping.  In particular, this
    returns the PC as it would be seen by the instruction at its original
@@ -4338,10 +4341,15 @@ displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
 
   if (regno == 15)
     {
+      if (displaced_in_arm_mode (regs))
+	from += 8;
+      else
+	from += 6;
+
       if (debug_displaced)
 	fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
-			    (unsigned long) from + 8);
-      return (ULONGEST) from + 8;  /* Pipeline offset.  */
+			    (unsigned long) from);
+      return (ULONGEST) from;  /* Pipeline offset.  */
     }
   else
     {
@@ -4530,6 +4538,21 @@ copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
+			     const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
+			"opcode/class '%s' unmodified\n", insn,
+			iname);
+
+  RECORD_MOD_16BIT_INSN (0, insn);
+
+  return 0;
+}
+
 /* Preload instructions with immediate offset.  */
 
 static void
@@ -4668,16 +4691,11 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
-/* Clean up branch instructions (actually perform the branch, by setting
-   PC).  */
-
 static void
-cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
-		struct displaced_step_closure *dsc)
+cleanup_branch_1 (struct gdbarch *gdbarch, struct regcache *regs,
+		  struct displaced_step_closure *dsc, unsigned int branch_taken)
 {
   ULONGEST from = dsc->insn_addr;
-  uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
-  int branch_taken = condition_true (dsc->u.branch.cond, status);
   enum pc_write_style write_pc = dsc->u.branch.exchange
 				 ? BX_WRITE_PC : BRANCH_WRITE_PC;
 
@@ -4687,29 +4705,45 @@ cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
   if (dsc->u.branch.link)
     {
       ULONGEST pc = displaced_read_reg (regs, from, ARM_PC_REGNUM);
-      displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);
+
+      if (displaced_in_arm_mode (regs))
+	displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);
+      else
+	displaced_write_reg (regs, dsc, ARM_LR_REGNUM, (pc - 2) | 1u,
+			     CANNOT_WRITE_PC);
     }
 
   displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
 }
 
+/* Clean up branch instructions (actually perform the branch, by setting
+   PC).  */
+static void
+cleanup_branch(struct gdbarch *gdbarch, struct regcache *regs,
+	       struct displaced_step_closure *dsc)
+{
+  ULONGEST from = dsc->insn_addr;
+  uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
+  int branch_taken = condition_true (dsc->u.branch.cond, status);
+
+  cleanup_branch_1 (gdbarch, regs, dsc, branch_taken);
+}
+
+static void
+cleanup_cbz_cbnz(struct gdbarch *gdbarch, struct regcache *regs,
+	       struct displaced_step_closure *dsc)
+{
+  cleanup_branch_1 (gdbarch, regs, dsc, dsc->u.branch.cond);
+}
+
 /* Copy B/BL/BLX instructions with immediate destinations.  */
 
 static int
-copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
-	       struct regcache *regs, struct displaced_step_closure *dsc)
+copy_b_bl_blx (struct gdbarch *gdbarch, unsigned int cond, int exchange,
+	       int link, long offset, struct regcache *regs,
+	       struct displaced_step_closure *dsc)
 {
-  unsigned int cond = bits (insn, 28, 31);
-  int exchange = (cond == 0xf);
-  int link = exchange || bit (insn, 24);
   CORE_ADDR from = dsc->insn_addr;
-  long offset;
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
-			"%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
-			(unsigned long) insn);
-
   /* Implement "BL<cond> <label>" as:
 
      Preparation: cond <- instruction condition
@@ -4718,6 +4752,40 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
 
      B<cond> similar, but don't set r14 in cleanup.  */
 
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = link;
+  dsc->u.branch.exchange = exchange;
+
+  if (arm_pc_is_thumb (gdbarch, from))
+    {
+      /* Plus the size of THUMB_NOP and B/BL/BLX.  */
+      dsc->u.branch.dest = from + 2 + 4 + offset;
+      RECORD_MOD_16BIT_INSN (0, THUMB_NOP);
+    }
+  else
+    {
+      dsc->u.branch.dest = from + 8 + offset;
+      RECORD_MOD_32BIT_INSN (0, ARM_NOP);
+    }
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+static int
+arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
+		   struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int cond = bits (insn, 28, 31);
+  int exchange = (cond == 0xf);
+  int link = exchange || bit (insn, 24);
+  long offset;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
+			"%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
+			(unsigned long) insn);
   if (exchange)
     /* For BLX, set bit 0 of the destination.  The cleanup_branch function will
        then arrange the switch into Thumb mode.  */
@@ -4728,12 +4796,40 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   if (bit (offset, 25))
     offset = offset | ~0x3ffffff;
 
+  return copy_b_bl_blx (gdbarch, cond, exchange, link, offset, regs, dsc);
+}
+
+static int
+thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
+	      struct displaced_step_closure *dsc)
+{
+  unsigned int cond = 0;
+  int offset = 0;
+  unsigned short bit_12_15 = bits (insn, 12, 15);
+  CORE_ADDR from = dsc->insn_addr;
+
+  if (bit_12_15 == 0xd)
+    {
+      offset = sbits (insn, 0, 7);
+      cond = bits (insn, 8, 11);
+    }
+  else if (bit_12_15 == 0xe)
+    {
+       offset = sbits (insn, 0, 10);
+       cond = INST_AL;
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying b immediate insn %.4x "
+			"with offset %d\n", insn, offset);
+
   dsc->u.branch.cond = cond;
-  dsc->u.branch.link = link;
-  dsc->u.branch.exchange = exchange;
-  dsc->u.branch.dest = from + 8 + offset;
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+  dsc->u.branch.dest = from + 4 + offset;
 
-  RECORD_MOD_32BIT_INSN (0, ARM_NOP);
+  RECORD_MOD_16BIT_INSN (0, THUMB_NOP);
 
   dsc->cleanup = &cleanup_branch;
 
@@ -4743,19 +4839,12 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
 /* Copy BX/BLX with register-specified destinations.  */
 
 static int
-copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
-		 struct regcache *regs, struct displaced_step_closure *dsc)
+copy_bx_blx_reg (struct gdbarch *gdbarch, unsigned int cond, int link,
+		 unsigned int rm, struct regcache *regs,
+		 struct displaced_step_closure *dsc)
 {
-  unsigned int cond = bits (insn, 28, 31);
-  /* BX:  x12xxx1x
-     BLX: x12xxx3x.  */
-  int link = bit (insn, 5);
-  unsigned int rm = bits (insn, 0, 3);
   CORE_ADDR from = dsc->insn_addr;
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
-			"%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
+  int is_thumb = arm_pc_is_thumb (gdbarch, from);
 
   /* Implement {BX,BLX}<cond> <reg>" as:
 
@@ -4767,16 +4856,56 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
 
   dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
 
-  dsc->u.branch.cond = cond;
   dsc->u.branch.link = link;
   dsc->u.branch.exchange = 1;
 
-  RECORD_MOD_32BIT_INSN (0, ARM_NOP);
+  if (is_thumb)
+    {
+      /* Always true for thumb.  */
+      dsc->u.branch.cond = INST_AL;
+      RECORD_MOD_16BIT_INSN (0, THUMB_NOP);
+    }
+  else
+    {
+      dsc->u.branch.cond = cond;
+      RECORD_MOD_32BIT_INSN (0, ARM_NOP);
+    }
 
   dsc->cleanup = &cleanup_branch;
 
   return 0;
 }
+static int
+arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int cond = bits (insn, 28, 31);
+  /* BX:  x12xxx1x
+     BLX: x12xxx3x.  */
+  int link = bit (insn, 5);
+  unsigned int rm = bits (insn, 0, 3);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
+			"%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
+
+  return copy_bx_blx_reg (gdbarch, cond, link, rm, regs, dsc);
+}
+
+static int
+thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
+		       struct regcache *regs,
+		       struct displaced_step_closure *dsc)
+{
+  int link = bit (insn, 7);
+  unsigned int rm = bits (insn, 3, 6);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
+			"%.4x\n", (link) ? "blx" : "bx", (unsigned short) insn);
+
+  return copy_bx_blx_reg (gdbarch, INST_AL, link, rm, regs, dsc);
+}
 
 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
 
@@ -5171,7 +5300,7 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
      Otherwise we don't know what value to write for PC, since the offset is
      architecture-dependent (sometimes PC+8, sometimes PC+12).  */
 
-  if (load || rt != 15)
+  if (load || rt != ARM_PC_REGNUM)
     {
       dsc->u.ldst.restore_r4 = 0;
 
@@ -5694,7 +5823,7 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
       return copy_unmodified (gdbarch, insn, "rfe", dsc);
 
     case 0x4: case 0x5: case 0x6: case 0x7:
-      return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+      return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
 
     case 0x8:
       switch ((insn & 0xe00000) >> 21)
@@ -5777,7 +5906,7 @@ decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
 
     case 0x1:
       if (op == 0x1)  /* bx.  */
-	return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
       else if (op == 0x3)
 	return copy_unmodified (gdbarch, insn, "clz", dsc);
       else
@@ -5791,8 +5920,8 @@ decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
 	return copy_undef (gdbarch, insn, dsc);
 
     case 0x3:
-      if (op == 0x1)
-	return copy_bx_blx_reg (gdbarch, insn, regs, dsc);  /* blx register.  */
+      if (op == 0x1) /* blx register.  */
+	return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
       else
 	return copy_undef (gdbarch, insn, dsc);
 
@@ -5955,7 +6084,7 @@ decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
 		    struct regcache *regs, struct displaced_step_closure *dsc)
 {
   if (bit (insn, 25))
-    return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+    return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
   else
     return copy_block_xfer (gdbarch, insn, regs, dsc);
 }
@@ -6036,12 +6165,231 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
     return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
 
+static int
+thumb_decode_dp (struct gdbarch *gdbarch, unsigned short insn,
+		 struct displaced_step_closure *dsc)
+{
+  /* 16-bit data-processing insns are not related to PC.  */
+  return thumb_copy_unmodified_16bit (gdbarch, insn,"data-processing", dsc);
+}
+
+static int
+thumb_decode_pc_relative (struct gdbarch *gdbarch, unsigned short insn,
+			  struct regcache *regs,
+			  struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn, 8, 10);
+  unsigned int imm8 = bits (insn, 0, 7);
+  CORE_ADDR from = dsc->insn_addr;
+  int val;
+
+  /* ADR Rd, #imm8
+
+     Rewrite as:
+
+     Preparation: Rd <- PC
+     Insn: ADD Rd, #imm8
+     Cleanup: Null.
+   */
+
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x\n",
+			rd, imm8, insn);
+
+  /* Rd <- PC */
+  val = displaced_read_reg (regs, from, ARM_PC_REGNUM);
+  displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
+
+  /* ADDS Rd, #imm8 */
+  RECORD_MOD_32BIT_INSN (0, 0x3000 | (rd << 8) | imm8);
+
+  return 0;
+}
+
+static int
+thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
+			      struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rt = bits (insn1, 8, 7);
+  unsigned int pc;
+  int imm8 = sbits (insn1, 0, 7);
+  CORE_ADDR from = dsc->insn_addr;
+
+  /* LDR Rd, #imm8
+
+     Rwrite as:
+
+     Preparation: tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
+                  if (Rd is not R0) tmp0 <- R0;
+     Insn: LDR R0, [R2, R3];
+     Cleanup: R2 <- tmp2, R3 <- tmp3,
+              if (Rd is not R0) Rd <- R0, R0 <- tmp0 */
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying thumb ldr literal "
+			"insn %.4x\n", insn1);
+
+  dsc->tmp[0] = displaced_read_reg (regs, from, 0);
+  dsc->tmp[2] = displaced_read_reg (regs, from, 2);
+  dsc->tmp[3] = displaced_read_reg (regs, from, 3);
+  pc = displaced_read_reg (regs, from, ARM_PC_REGNUM);
+
+  displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
+
+  dsc->rd = rt;
+  dsc->u.ldst.xfersize = 4;
+  dsc->u.ldst.rn = 0;
+  dsc->u.ldst.immed = 0;
+  dsc->u.ldst.writeback = 0;
+  dsc->u.ldst.restore_r4 = 0;
+
+  RECORD_MOD_16BIT_INSN (0, 0x58d0); /* ldr r0, [r2, r3]*/
+
+  dsc->cleanup = &cleanup_load;
+
+  return 0;
+}
+
+static int
+thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, unsigned short insn1,
+		     struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  int non_zero = bit (insn1, 11);
+  unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
+  CORE_ADDR from = dsc->insn_addr;
+  int rn = bits (insn1, 0, 2);
+  int rn_val = displaced_read_reg (regs, from, rn);
+
+  dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+
+  dsc->u.branch.dest = from + 4 + imm5;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
+			" insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
+			rn, rn_val, insn1, dsc->u.branch.dest);
+
+  RECORD_MOD_16BIT_INSN (0, THUMB_NOP);
+
+  dsc->cleanup = &cleanup_cbz_cbnz;
+  return 0;
+}
+
+static void
+thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch,
+				    unsigned short insn1, CORE_ADDR to,
+				    struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  unsigned short op_bit_12_15 = bits (insn1, 12, 15);
+  unsigned short op_bit_10_11 = bits (insn1, 10, 11);
+  int err = 0;
+
+  /* 16-bit thumb instructions.  */
+  switch (op_bit_12_15)
+    {
+      /* Shift (imme), add, subtract, move and compare*/
+    case 0: case 1: case 2: case 3:
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"", dsc);
+      break;
+    case 4:
+      switch (op_bit_10_11)
+	{
+	case 0: /* Data-processing */
+	  err = thumb_decode_dp (gdbarch, insn1, dsc);
+	  break;
+	case 1: /* Special data instructions and branch and exchange */
+	  {
+	    unsigned short op = bits (insn1, 7, 9);
+	    if (op == 6 || op == 7) /* BX or BLX */
+	      err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
+						 dsc);
+	  }
+	  break;
+	default: /* LDR (literal) */
+	  err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
+	}
+      break;
+    case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldr/str", dsc);
+      break;
+    case 10:
+      if (op_bit_10_11 < 2) /* Generate PC-relative address */
+	err = thumb_decode_pc_relative (gdbarch, insn1, regs, dsc);
+      else /* Generate SP-relative address */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"sp-relative", dsc);
+      break;
+    case 11: /* Misc 16-bit instructions */
+      {
+	switch (bits (insn1, 8, 11))
+	  {
+	  case 1: case 3:  case 9: case 11: /* CBNZ, CBZ */
+	    err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
+	    break;
+	  default:
+	    err = thumb_copy_unmodified_16bit (gdbarch, insn1,"", dsc);
+	  }
+      }
+      break;
+    case 12:
+      if (op_bit_10_11 < 2) /* Store multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"stm", dsc);
+      else /* Load multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldm", dsc);
+      break;
+    case 13: /* Conditional branch and supervisor call */
+      if (bits (insn1, 9, 11) != 7) /* conditional branch */
+	err = thumb_copy_b (gdbarch, insn1, dsc);
+      else
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"svc", dsc);
+      break;
+    case 14: /* Unconditional branch */
+      err = thumb_copy_b (gdbarch, insn1, dsc);
+      break;
+    default:
+      internal_error (__FILE__, __LINE__,
+		      _("thumb_process_displaced_insn: Instruction decode error"));
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_insn: Instruction decode error"));
+}
+
+static void
+thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, CORE_ADDR from,
+				    CORE_ADDR to, struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+}
+
 static void
 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 			      CORE_ADDR to, struct regcache *regs,
 			      struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode"));
+  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  unsigned short insn1
+    = read_memory_unsigned_integer (from, 2, byte_order_for_code);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
+			"at %.8lx\n", insn1, (unsigned long) from);
+
+  if ((bits (insn1, 13, 15) == 7) && (bits (insn1, 11, 12)))
+    thumb_process_displaced_32bit_insn(gdbarch, from, to, regs, dsc);
+  else
+    thumb_process_displaced_16bit_insn(gdbarch, insn1, to, regs, dsc);
 }
 
 void
diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.S b/gdb/testsuite/gdb.arch/arm-disp-step.S
index d748718..2e5b7ba 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.S
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.S
@@ -48,6 +48,26 @@ test_ret_end:
 	bl test_ldm_stm_pc
 #endif
 
+	/* Test ldrX literal in ARM and Thumb-2 */
+#if !defined (__thumb__)
+	bl test_ldr_literal
+#endif
+
+	/* Test ldr literal in Thumb */
+#if defined(__thumb__)
+	bl test_ldr_literal_16
+#endif
+
+	/* Test cbnz/cbz in Thumb-2 */
+#if defined(__thumb2__)
+	bl test_cbz_cbnz
+#endif
+
+	/* Test adr in Thumb and Thumb-2 */
+#if defined(__thumb) || defined(__thumb2__)
+	bl test_adr
+#endif
+	
 	/* Return */
 	mov     sp, r7
 	sub     sp, sp, #4
@@ -118,3 +138,75 @@ test_ldm_stm_pc_ret:
 	.word	test_ldm_stm_pc_ret
 	.size test_ldm_stm_pc, .-test_ldm_stm_pc
 #endif
+	
+#if !defined (__thumb__)
+	.global test_ldr_literal
+	.type test_ldr_literal, %function
+test_ldr_literal:
+	ldrh	r0, [pc]
+	.global test_ldrsb_literal
+test_ldrsb_literal:
+	ldrsb	r0, [pc]
+	.global test_ldrsh_literal
+test_ldrsh_literal:
+	ldrsh	r0, [pc]
+	.global test_ldr_literal_end
+test_ldr_literal_end:
+	bx lr
+	.size test_ldr_literal, .-test_ldr_literal
+#endif
+
+#if defined(__thumb__)
+	.global test_ldr_literal_16
+	.code   16
+	.thumb_func
+test_ldr_literal_16:
+	ldr	r0, .L2
+	.global test_ldr_literal_16_end
+test_ldr_literal_16_end:
+	bx lr
+	.align	2
+.L2:
+	.word	test_ldr_literal_16
+	.size test_ldr_literal_16, .-test_ldr_literal_16
+#endif
+
+#if defined(__thumb2__)
+	.global test_cbz_cbnz
+	.code   16
+	.thumb_func
+test_cbz_cbnz:
+	movs 	r0, #0
+	.global test_zero_cbnz
+test_zero_cbnz:
+	cbnz	r0, .L3
+	.global test_zero_cbz
+test_zero_cbz:
+	cbz	r0, .L3
+.L3:
+	movs	r0, #1
+	.global test_non_zero_cbz
+test_non_zero_cbz:
+	cbz	r0, .L4
+	.global test_non_zero_cbnz
+test_non_zero_cbnz:
+	cbnz	r0, .L4
+	nop
+.L4:
+	.global test_cbz_cbnz_end
+test_cbz_cbnz_end:
+	bx lr
+	.size test_cbz_cbnz, .-test_cbz_cbnz
+#endif
+
+#if defined(__thumb) || defined(__thumb2__)
+	.global test_adr
+	.code   16
+	.thumb_func
+test_adr:
+	adr	r0, #1
+	.global test_adr_end
+test_adr_end:
+	bx lr
+	.size test_adr, .-test_adr
+#endif
\ No newline at end of file
diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.exp b/gdb/testsuite/gdb.arch/arm-disp-step.exp
index 826f728..51b7951 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.exp
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.exp
@@ -37,6 +37,22 @@ if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable [list
     return -1
 }
 
+# Try to resume program with displaced stepping.  If displaced stepping is
+# not supported, turn it off, and continue.
+
+proc try_continue_with_displaced_step { msg loc } {
+    gdb_test_no_output "set displaced-stepping on"
+    gdb_test_multiple "continue" "continue to test_call_end" {
+	-re ".*$loc.*" {
+	    pass "continue to $msg"
+	}
+	-re "Displaced stepping is only supported in.*" {
+	    gdb_test_no_output "set displaced-stepping off"
+	    gdb_test "continue" ".*"
+	    kfail "gdb/NNNN" $msg
+	}
+    }
+}
 
 #########################################
 # Test ldm/stm related to PC.
@@ -50,7 +66,11 @@ proc test_ldm_stm_pc {} {
 	}
 	-re "Function \"test_ldm_stm_pc\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
 	    gdb_test "n" "" "Test case is compiled in Thumb mode"
-	    return
+	    return 0
+	}
+	-re "No symbol.*" {
+	    pass "break test_ldm_stm_pc"
+	    return 0
 	}
     }
 
@@ -68,10 +88,75 @@ proc test_ldm_stm_pc {} {
     gdb_continue_to_breakpoint "continue to test_ldm_stm_pc_ret" \
 	".*bx lr.*"
 }
+
+#########################################
+# Test ldrX literal
+proc test_ldr_literal {} {
+    global srcfile
+    # Try to set breakpoint on test_ldm_stm_pc.  If symbol 'test_ldm_stm_pc'
+    # can't be resolved, test case is compiled in Thumb mode, skip it.
+    gdb_test_multiple "break *test_ldr_literal" "break test_ldr_literal" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "Function \"test_ldr_literal\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
+	    gdb_test "n" "" "Test case is compiled in Thumb mode"
+	    return 0
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_ldrsb_literal" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldrsb_literal"
+    gdb_test "break *test_ldrsh_literal" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldrsh_literal"
+    gdb_test "break *test_ldr_literal_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_test_ldr_literal_end"
+
+    gdb_continue_to_breakpoint "continue to test_ldr_literal" \
+	".*ldrh.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldrsb_literal" \
+	".*ldrsb.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldrsh_literal" \
+	".*ldrsh.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_ret" \
+	".*bx lr.*"
+
+    gdb_test_multiple "break test_ldr_literal_16" "break test_ldr_literal_16" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "Function \"test_ldr_literal_16\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
+	    gdb_test "n" "" "skip"
+	    return 0
+	}
+    }
+    
+    gdb_test "break *test_ldr_literal_16_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_test_ldr_literal_16_end"
+
+    gdb_continue_to_breakpoint "continue to test_ldr_literal" \
+	".*ldr.*r0\,.*L2.*"
+    gdb_continue_to_breakpoint "continue to test_ldrsb_literal" \
+	".*bx lr.*"
+}
+
 ##########################################
 # Test call/ret.
 proc test_call_ret {} {
     global srcfile
+    global testfile
+
+    gdb_test "break *test_call" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_call"
+
     gdb_test "break *test_call_end" \
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_call_end"
@@ -82,8 +167,10 @@ proc test_call_ret {} {
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_ret_end"
 
-    gdb_continue_to_breakpoint "continue to test_call_end" \
-	".*@ Location test_call_end.*"
+    try_continue_with_displaced_step "test_call" "bl test_call_subr"
+    try_continue_with_displaced_step "test_call_end" \
+	"@ Location test_call_end"
+
     gdb_continue_to_breakpoint "continue to test_ret" \
 	".*bx lr.*"
     gdb_continue_to_breakpoint "continue to test_ret_end" \
@@ -122,7 +209,66 @@ proc test_ldr_from_pc {} {
 
     gdb_continue_to_breakpoint "continue to test_ldr_pc" \
 	".*ldr.*r1\,.*\[pc, #0\].*"
-    gdb_continue_to_breakpoint "continue to Lbranch" \
+    gdb_continue_to_breakpoint "continue to test_ldr_pc_ret" \
+	".*bx lr.*"
+}
+
+#########################################
+
+# Test cbz and cbnz
+proc test_cbz_cbnz {} {
+    global srcfile
+
+    gdb_test_multiple "break *test_zero_cbnz" "break test_zero_cbnz" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_zero_cbz"
+    gdb_test "break *test_non_zero_cbnz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbnz"
+    gdb_test "break *test_non_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbz"
+
+    gdb_continue_to_breakpoint "continue to test_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_zero_cbz" \
+	".*cbz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbz" \
+	".*cbz.*r0\,.*\.L4.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L4.*"
+}
+
+# Test adr
+
+proc test_adr {} {
+    global srcfile
+
+    gdb_test_multiple "break *test_adr" "break test_adr" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_adr"
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_adr_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_end"
+
+    gdb_continue_to_breakpoint "continue to test_adr" \
+	".*adr.*r0\,.*#1.*"
+    gdb_continue_to_breakpoint "continue to test_adr_end" \
 	".*bx lr.*"
 }
 
@@ -143,20 +289,6 @@ if ![runto_main] then {
 gdb_test_no_output "set displaced-stepping on"
 gdb_test "show displaced-stepping" ".* displaced stepping .* is on.*"
 
-gdb_test "break *test_call" \
-	"Breakpoint.*at.* file .*$srcfile, line.*" \
-	"break test_call"
-
-gdb_test_multiple "continue" "continue to test_call" {
-	-re ".*bl test_call_subr.*" {
-	    pass "continue to test_call"
-	}
-	-re "Displaced stepping is only supported in" {
-	    kfail "gdb/NNNN" $testfile
-	    return
-	}
-    }
-
 test_call_ret
 
 test_branch
@@ -165,6 +297,11 @@ test_ldr_from_pc
 
 test_ldm_stm_pc
 
+test_ldr_literal
+
+test_cbz_cbnz
+
+test_adr
 ##########################################
 
 # Done, run program to exit.

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 3/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 17:54 ` [patch 3/3] " Yao Qi
@ 2010-12-27 15:15   ` Yao Qi
  2011-02-17 20:55   ` Ulrich Weigand
  1 sibling, 0 replies; 66+ messages in thread
From: Yao Qi @ 2010-12-27 15:15 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 618 bytes --]

On 12/25/2010 10:21 PM, Yao Qi wrote:
> gdb/testsuite/
> 2010-12-25  Yao Qi  <yao@codesourcery.com>
> 
> 	* gdb.arch/arm-disp-step.S: Test cbnz/cbz, adr and ldr.
> 	* gdb.arch/arm-disp-step.exp: Likewise.

In my previous patch, displaced stepping is turned off in test_call_ret,
but 'forget' to turn it on again.  This patch is to address this problem
by replacing original gdb_continue_to_breakpoint by
try_continue_with_displaced_step, in which displaced stepping is turned
on, and make sure GDB can do displaced stepping on that instruction, so
displaced stepping can't be turned off again.

-- 
Yao (齐尧)

[-- Attachment #2: arm_disp_step_thumb_16bit_test_1227.patch --]
[-- Type: text/x-patch, Size: 9180 bytes --]

diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.S b/gdb/testsuite/gdb.arch/arm-disp-step.S
index d748718..2e5b7ba 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.S
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.S
@@ -48,6 +48,26 @@ test_ret_end:
 	bl test_ldm_stm_pc
 #endif
 
+	/* Test ldrX literal in ARM and Thumb-2 */
+#if !defined (__thumb__)
+	bl test_ldr_literal
+#endif
+
+	/* Test ldr literal in Thumb */
+#if defined(__thumb__)
+	bl test_ldr_literal_16
+#endif
+
+	/* Test cbnz/cbz in Thumb-2 */
+#if defined(__thumb2__)
+	bl test_cbz_cbnz
+#endif
+
+	/* Test adr in Thumb and Thumb-2 */
+#if defined(__thumb) || defined(__thumb2__)
+	bl test_adr
+#endif
+	
 	/* Return */
 	mov     sp, r7
 	sub     sp, sp, #4
@@ -118,3 +138,75 @@ test_ldm_stm_pc_ret:
 	.word	test_ldm_stm_pc_ret
 	.size test_ldm_stm_pc, .-test_ldm_stm_pc
 #endif
+	
+#if !defined (__thumb__)
+	.global test_ldr_literal
+	.type test_ldr_literal, %function
+test_ldr_literal:
+	ldrh	r0, [pc]
+	.global test_ldrsb_literal
+test_ldrsb_literal:
+	ldrsb	r0, [pc]
+	.global test_ldrsh_literal
+test_ldrsh_literal:
+	ldrsh	r0, [pc]
+	.global test_ldr_literal_end
+test_ldr_literal_end:
+	bx lr
+	.size test_ldr_literal, .-test_ldr_literal
+#endif
+
+#if defined(__thumb__)
+	.global test_ldr_literal_16
+	.code   16
+	.thumb_func
+test_ldr_literal_16:
+	ldr	r0, .L2
+	.global test_ldr_literal_16_end
+test_ldr_literal_16_end:
+	bx lr
+	.align	2
+.L2:
+	.word	test_ldr_literal_16
+	.size test_ldr_literal_16, .-test_ldr_literal_16
+#endif
+
+#if defined(__thumb2__)
+	.global test_cbz_cbnz
+	.code   16
+	.thumb_func
+test_cbz_cbnz:
+	movs 	r0, #0
+	.global test_zero_cbnz
+test_zero_cbnz:
+	cbnz	r0, .L3
+	.global test_zero_cbz
+test_zero_cbz:
+	cbz	r0, .L3
+.L3:
+	movs	r0, #1
+	.global test_non_zero_cbz
+test_non_zero_cbz:
+	cbz	r0, .L4
+	.global test_non_zero_cbnz
+test_non_zero_cbnz:
+	cbnz	r0, .L4
+	nop
+.L4:
+	.global test_cbz_cbnz_end
+test_cbz_cbnz_end:
+	bx lr
+	.size test_cbz_cbnz, .-test_cbz_cbnz
+#endif
+
+#if defined(__thumb) || defined(__thumb2__)
+	.global test_adr
+	.code   16
+	.thumb_func
+test_adr:
+	adr	r0, #1
+	.global test_adr_end
+test_adr_end:
+	bx lr
+	.size test_adr, .-test_adr
+#endif
\ No newline at end of file
diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.exp b/gdb/testsuite/gdb.arch/arm-disp-step.exp
index 826f728..a8d2adc 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.exp
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.exp
@@ -37,6 +37,22 @@ if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable [list
     return -1
 }
 
+# Try to resume program with displaced stepping.  If displaced stepping is
+# not supported, turn it off, and continue.
+
+proc try_continue_with_displaced_step { msg loc } {
+    gdb_test_no_output "set displaced-stepping on"
+    gdb_test_multiple "continue" "continue to $msg" {
+	-re ".*$loc.*" {
+	    pass "continue to $msg"
+	}
+	-re "Displaced stepping is only supported in.*" {
+	    gdb_test_no_output "set displaced-stepping off"
+	    gdb_test "continue" ".*"
+	    kfail "gdb/NNNN" $msg
+	}
+    }
+}
 
 #########################################
 # Test ldm/stm related to PC.
@@ -50,7 +66,11 @@ proc test_ldm_stm_pc {} {
 	}
 	-re "Function \"test_ldm_stm_pc\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
 	    gdb_test "n" "" "Test case is compiled in Thumb mode"
-	    return
+	    return 0
+	}
+	-re "No symbol.*" {
+	    pass "break test_ldm_stm_pc"
+	    return 0
 	}
     }
 
@@ -68,10 +88,75 @@ proc test_ldm_stm_pc {} {
     gdb_continue_to_breakpoint "continue to test_ldm_stm_pc_ret" \
 	".*bx lr.*"
 }
+
+#########################################
+# Test ldrX literal
+proc test_ldr_literal {} {
+    global srcfile
+    # Try to set breakpoint on test_ldm_stm_pc.  If symbol 'test_ldm_stm_pc'
+    # can't be resolved, test case is compiled in Thumb mode, skip it.
+    gdb_test_multiple "break *test_ldr_literal" "break test_ldr_literal" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "Function \"test_ldr_literal\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
+	    gdb_test "n" "" "Test case is compiled in Thumb mode"
+	    return 0
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_ldrsb_literal" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldrsb_literal"
+    gdb_test "break *test_ldrsh_literal" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldrsh_literal"
+    gdb_test "break *test_ldr_literal_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_test_ldr_literal_end"
+
+    gdb_continue_to_breakpoint "continue to test_ldr_literal" \
+	".*ldrh.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldrsb_literal" \
+	".*ldrsb.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldrsh_literal" \
+	".*ldrsh.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_ret" \
+	".*bx lr.*"
+
+    gdb_test_multiple "break test_ldr_literal_16" "break test_ldr_literal_16" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "Function \"test_ldr_literal_16\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
+	    gdb_test "n" "" "skip"
+	    return 0
+	}
+    }
+    
+    gdb_test "break *test_ldr_literal_16_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_test_ldr_literal_16_end"
+
+    gdb_continue_to_breakpoint "continue to test_ldr_literal" \
+	".*ldr.*r0\,.*L2.*"
+    gdb_continue_to_breakpoint "continue to test_ldrsb_literal" \
+	".*bx lr.*"
+}
+
 ##########################################
 # Test call/ret.
 proc test_call_ret {} {
     global srcfile
+    global testfile
+
+    gdb_test "break *test_call" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_call"
+
     gdb_test "break *test_call_end" \
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_call_end"
@@ -82,10 +167,12 @@ proc test_call_ret {} {
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_ret_end"
 
-    gdb_continue_to_breakpoint "continue to test_call_end" \
-	".*@ Location test_call_end.*"
-    gdb_continue_to_breakpoint "continue to test_ret" \
+    try_continue_with_displaced_step "test_call" "bl test_call_subr"
+    try_continue_with_displaced_step "test_call_end" \
+	"@ Location test_call_end"
+    try_continue_with_displaced_step "test_ret" \
 	".*bx lr.*"
+
     gdb_continue_to_breakpoint "continue to test_ret_end" \
 	".*@ Location test_ret_end.*"
 }
@@ -122,7 +209,66 @@ proc test_ldr_from_pc {} {
 
     gdb_continue_to_breakpoint "continue to test_ldr_pc" \
 	".*ldr.*r1\,.*\[pc, #0\].*"
-    gdb_continue_to_breakpoint "continue to Lbranch" \
+    gdb_continue_to_breakpoint "continue to test_ldr_pc_ret" \
+	".*bx lr.*"
+}
+
+#########################################
+
+# Test cbz and cbnz
+proc test_cbz_cbnz {} {
+    global srcfile
+
+    gdb_test_multiple "break *test_zero_cbnz" "break test_zero_cbnz" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_zero_cbz"
+    gdb_test "break *test_non_zero_cbnz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbnz"
+    gdb_test "break *test_non_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbz"
+
+    gdb_continue_to_breakpoint "continue to test_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_zero_cbz" \
+	".*cbz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbz" \
+	".*cbz.*r0\,.*\.L4.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L4.*"
+}
+
+# Test adr
+
+proc test_adr {} {
+    global srcfile
+
+    gdb_test_multiple "break *test_adr" "break test_adr" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_adr"
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_adr_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_end"
+
+    try_continue_with_displaced_step "test_adr" \
+	"adr.*r0\,.*#1"
+    try_continue_with_displaced_step "test_adr_end" \
 	".*bx lr.*"
 }
 
@@ -143,20 +289,6 @@ if ![runto_main] then {
 gdb_test_no_output "set displaced-stepping on"
 gdb_test "show displaced-stepping" ".* displaced stepping .* is on.*"
 
-gdb_test "break *test_call" \
-	"Breakpoint.*at.* file .*$srcfile, line.*" \
-	"break test_call"
-
-gdb_test_multiple "continue" "continue to test_call" {
-	-re ".*bl test_call_subr.*" {
-	    pass "continue to test_call"
-	}
-	-re "Displaced stepping is only supported in" {
-	    kfail "gdb/NNNN" $testfile
-	    return
-	}
-    }
-
 test_call_ret
 
 test_branch
@@ -165,6 +297,11 @@ test_ldr_from_pc
 
 test_ldm_stm_pc
 
+test_ldr_literal
+
+test_cbz_cbnz
+
+test_adr
 ##########################################
 
 # Done, run program to exit.

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 0/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
                   ` (2 preceding siblings ...)
  2010-12-25 17:54 ` [patch 3/3] " Yao Qi
@ 2010-12-29  5:48 ` Yao Qi
  2011-01-13 12:38 ` Yao Qi
                   ` (4 subsequent siblings)
  8 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2010-12-29  5:48 UTC (permalink / raw)
  To: gdb-patches

On 12/25/2010 10:03 PM, Yao Qi wrote:
> 
> Regression tested these three patches along with another pending patch
> on armv7l-unknown-linux-gnueabi.

As Pedro suggested, I run the whole test suite with displaced stepping
enabled.  (I force to enable displaced stepping by hacking infrun.c to
set can_use_displaced_stepping to can_use_displaced_stepping_on.  Any
other methods to enable displaced stepping?  I think 'make check
RUNTESTFLAGS='"../gdb -ex "set displaced on""'' might work, but I didn't
try.)

Totally, we got three tests results (run with option
-mthumb/-march=armv5t on an ARM board on which system library is
compiled in ARM mode), they are

base         : GDB cvs trunk,
thumb_disp   : GDB cvs trunk with three patches applied,
thumb_disp_on: GDB cvs trunk with three patches applied, and displaced
stepping turned on

Compared between thumb_disp and thumb_disp_on, test result is the same.
 Compared between base and thumb_disp, some failures in
gdb.base/structs.exp are fixed, which is out of my expectation.

There are three new failures came from `gdb_test_no_output "set
displaced-stepping off"'
FAIL: gdb.sum:gdb.arch/arm-disp-step.exp: set displaced-stepping off [#2]
FAIL: gdb.sum:gdb.arch/arm-disp-step.exp: set displaced-stepping off [#3]
FAIL: gdb.sum:gdb.arch/arm-disp-step.exp: set displaced-stepping off [#4]

> +proc try_continue_with_displaced_step { msg loc } {
> +    gdb_test_no_output "set displaced-stepping on"
> +    gdb_test_multiple "continue" "continue to $msg" {
> +	-re ".*$loc.*" {
> +	    pass "continue to $msg"
> +	}
> +	-re "Displaced stepping is only supported in.*" {
> +	    gdb_test_no_output "set displaced-stepping off"

Fix these failures by changing this line to `gdb_test "set
displaced-stepping off" ""'

Comments?

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 0/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
                   ` (3 preceding siblings ...)
  2010-12-29  5:48 ` [patch 0/3] Displaced stepping " Yao Qi
@ 2011-01-13 12:38 ` Yao Qi
  2011-02-10  6:48 ` Ping 2 " Yao Qi
                   ` (3 subsequent siblings)
  8 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-01-13 12:38 UTC (permalink / raw)
  To: gdb-patches

On 12/25/2010 08:03 AM, Yao Qi wrote:
> Displaced stepping doesn't work for Thumb instructions so far.  This set
> of patches are about support displaced stepping of 16-bit Thumb
> instructions.  There are much more 32-bit Thumb instructions than 16-bit
> Thumb instructions, so it takes more time to support 32-bit Thumb
> instructions.  I'd like to send these three patches first to review.
> Once these three are done, it is straight forward to support 32-bit
> Thumb instructions.
>
> Regression tested these three patches along with another pending patch
> on armv7l-unknown-linux-gnueabi.
> http://sourceware.org/ml/gdb-patches/2010-12/msg00427.html
>
> No regressions and some test failures are fixed.
> -FAIL: gdb.base/moribund-step.exp: running to main in runto
> -FAIL: gdb.mi/mi-nonstop-exit.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-nonstop.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-ns-stale-regcache.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-nsintrall.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-nsmoribund.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-nsthrexec.exp: mi runto main (timeout)
Ping.

-- 
Yao Qi

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Ping 2 [patch 0/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
                   ` (4 preceding siblings ...)
  2011-01-13 12:38 ` Yao Qi
@ 2011-02-10  6:48 ` Yao Qi
  2011-02-26 17:50 ` Displaced stepping 0002: refactor and create some copy helpers Yao Qi
                   ` (2 subsequent siblings)
  8 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-02-10  6:48 UTC (permalink / raw)
  To: gdb-patches

On 12/25/2010 10:03 PM, Yao Qi wrote:
> Displaced stepping doesn't work for Thumb instructions so far.  This set
> of patches are about support displaced stepping of 16-bit Thumb
> instructions.  There are much more 32-bit Thumb instructions than 16-bit
> Thumb instructions, so it takes more time to support 32-bit Thumb
> instructions.  I'd like to send these three patches first to review.
> Once these three are done, it is straight forward to support 32-bit
> Thumb instructions.
> 
> Regression tested these three patches along with another pending patch
> on armv7l-unknown-linux-gnueabi.
> http://sourceware.org/ml/gdb-patches/2010-12/msg00427.html
> 
> No regressions and some test failures are fixed.
> -FAIL: gdb.base/moribund-step.exp: running to main in runto
> -FAIL: gdb.mi/mi-nonstop-exit.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-nonstop.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-ns-stale-regcache.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-nsintrall.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-nsmoribund.exp: mi runto main (timeout)
> -FAIL: gdb.mi/mi-nsthrexec.exp: mi runto main (timeout)
> 

Ping.  Could anyone have a look at these patches?

http://sourceware.org/ml/gdb-patches/2010-12/msg00457.html

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 1/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 14:22 ` [patch 1/3] " Yao Qi
@ 2011-02-17 19:09   ` Ulrich Weigand
  0 siblings, 0 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-17 19:09 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> 	* gdb/arm-tdep.c (arm_displaced_step_copy_insn): Move code to ...
> 	(arm_process_displaced_insn): .. here. Remove parameter INSN.
> 	(thumb_process_displaced_insn): New.
> 	* gdb/arm-linux-tdep.c (arm_linux_displaced_step_copy_insn): Update
> 	call to arm_process_displaced_insn.
> 	* gdb/arm-tdep.h : Update declaration of arm_process_displaced_insn.

This is OK.

Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 17:09 ` [patch 2/3] " Yao Qi
@ 2011-02-17 19:46   ` Ulrich Weigand
  2011-02-18  6:33     ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-17 19:46 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> Current implementation of displaced stepping in ARM assumes instruction
> size is fixed 32-bit.  Patch 2 is to rewrite some infrastructure code to
> be ready to handle non-32-bit instructions.  This patch doesn't change
> any GDB functionality either.

Thanks for working on this!

> 	* gdb/arm-tdep.h (struct displaced_step_closure): Add new field
> 	modinsns.
> 	Remove field modinsn.
> 	(RECORD_MOD_32BIT_INSN): New macro.
> 	(RECORD_MOD_16BIT_INSN): New macro.
> 	(RECORD_MOD_INSN): New macro.

> -  unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
> +
> +  struct insn
> +  {
> +    union
> +    {
> +      unsigned long a;
> +      unsigned short t;
> +    }insn;
> +    unsigned short size;
> +  }modinsns[DISPLACED_MODIFIED_INSNS];
> +

I don't think this is the right way to go.  You cannot have a mixture of
ARM and Thumb instructions in a single modinsn block, and if you have
Thumb instructions, they all need to be transfered in 16-bit chunks,
even the 32-bit Thumb2 instructions, to get the endian conversion right.

So I think you should rather keep a single modinsn array of unsigned long.
When filling it in, ARM instructions are handled as today, 16-bit Thumb
instructions are likewise just filled into one modinsn slot, and 32-bit
Thumb instructions are filled into two modinsn slots.

When copying the modinsn array out to the target, each slot is transfered
as 4 bytes in ARM mode, and as 2 bytes in Thumb mode.  To know in which
mode you are, it is probably best to have a single flag in the struct
displaced_step_closure that indicated whether it is ARM or Thumb; this
flag would be set once at the start of arm_process_displaced_insn, and
used throughout the code whereever we need to know the mode.

This approach would make most of the changes in this patch obsolete.

> 	(cleanup_branch): Replace magic number by macros.

> -      ULONGEST pc = displaced_read_reg (regs, from, 15);
> -      displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
> +      ULONGEST pc = displaced_read_reg (regs, from, ARM_PC_REGNUM);
> +      displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);

I'm not sure about this change -- other callers just pass in plain
register numbers as well ...  Either those should all be changed,
or none of them.  In any case, this is really an unrelated change,
and should be done -if at all- in a separate patch.

> @@ -5908,23 +5928,44 @@ arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
>  			    CORE_ADDR to, struct displaced_step_closure *dsc)
>  {
>    struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
> -  unsigned int i;
> +  unsigned int i, len, offset;
>    enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
>  
> +  offset = 0;
>    /* Poke modified instruction(s).  */
>    for (i = 0; i < dsc->numinsns; i++)
>      {
> +      unsigned long insn;
> +      if (dsc->modinsns[i].size == 4)
> +	insn = dsc->modinsns[i].insn.a;
> +      else if (dsc->modinsns[i].size == 2)
> +	insn = dsc->modinsns[i].insn.t;
> +      else
> +	gdb_assert (0);
> +
>        if (debug_displaced)
> -	fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
> -			    "%.8lx\n", (unsigned long) dsc->modinsn[i],
> -			    (unsigned long) to + i * 4);
> -      write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
> -				     dsc->modinsn[i]);
> +	{
> +	  fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
> +	  if (dsc->modinsns[i].size == 4)
> +	    fprintf_unfiltered (gdb_stdlog, "%.8lx",
> +				dsc->modinsns[i].insn.a);
> +	  else if (dsc->modinsns[i].size == 2)
> +	    fprintf_unfiltered (gdb_stdlog, "%.4x",
> +				dsc->modinsns[i].insn.t);
> +
> +	  fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
> +			      (unsigned long) to + offset);
> +	}
> +      write_memory_unsigned_integer (to + offset, dsc->modinsns[i].size,
> +				     byte_order_for_code,
> +				     insn);
> +      offset += dsc->modinsns[i].size;
>      }

As indicated above, this should just copy dsc->numinsns chunks of size 4
in ARM mode, and of size 2 in Thumb mode.

>    /* Put breakpoint afterwards.  */
> -  write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
> -		tdep->arm_breakpoint_size);
> +  write_memory (to + arm_displaced_step_breakpoint_offset (dsc),
> +		arm_breakpoint_from_pc (gdbarch, &from, &len),
> +		len);

Calling arm_breakpoint_from_pc is not a good idea, since this calls
arm_pc_is_thumb, which may end up getting a wrong result.  Since we
already know whether we're in ARM or Thumb mode, you should just
emit either tdep->arm_breakpoint or tdep->thumb_breakpoint.  (Since
we're not *replacing* any instruction here, there is never a need
to use the Thumb-2 breakpoint.)

> @@ -5960,7 +6001,11 @@ arm_displaced_step_fixup (struct gdbarch *gdbarch,
>      dsc->cleanup (gdbarch, regs, dsc);
>  
>    if (!dsc->wrote_to_pc)
> -    regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
> +    {
> +      struct frame_info *fi = get_current_frame ();
> +      regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
> +				      arm_get_next_pc_raw(fi, dsc->insn_addr, 0));
> +    }

Hmm, arm_get_next_pc_raw tries to follow branches etc, which is probably
not what we want here.  Again, I'd rather just check ARM vs. Thumb state
(in Thumb mode we could then check the instruction to see whether it is
a 16-bit or 32-bit instruction --- or even better, the original decoding
step could have just set a flag in dsc).

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 3/3] Displaced stepping for 16-bit Thumb instructions
  2010-12-25 17:54 ` [patch 3/3] " Yao Qi
  2010-12-27 15:15   ` Yao Qi
@ 2011-02-17 20:55   ` Ulrich Weigand
  2011-02-18  7:30     ` Yao Qi
  2011-02-28  2:04     ` Displaced stepping 0003: " Yao Qi
  1 sibling, 2 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-17 20:55 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> Patch 3 is about supporting 16-bit Thumb displaced stepping.  In this
> patch, we decode 16-bit instruction, and process them.  We also leave a
> slot for 32-bit Thumb instructions, and put an error there.

Thanks.  Any thoughts how much more work the 32-bit instructions would be?
I'm not sure it is a good idea to support Thumb only partially; with the
current setup, you will fail immediately when debugging Thumb, but with
the patch, you might happen to step over a couple of 16-bit instructions
before hitting the first 32-bit instruction ...   (That may not be all
that bad, just wondering ...)

> Test cases are updated accordingly for some PC-related instructions.

I haven't looked in detail at the test cases yet, but here some
comments on the code so far:

> 	(cleanup_branch): Move some code to ...
> 	(cleanup_branch_1): ... here.  New.  Support Thumb mode.
> 	(cleanup_cbz_cbnz): New.
> 	(copy_b_bl_blx): Move some code to ...
> 	(arm_copy_b_bl_blx): ... here.  New.
> 	(thumb_copy_b): New.
> 	(copy_bx_blx_reg): Move some code to ...
> 	(arm_copy_bx_blx_reg): ... here.  New.
> 	(thumb_copy_bx_blx_reg): New.

I'm not sure I like those refactorings ...   It seems to me a nicer way
would be to re-use the cleanup_ routines unchanged for both ARM and Thumb
(because those only depend on the instruction *semantics*, not encoding),
but use fully separate copy_ routines for ARM and Thumb, since those are
really all about decoding the instruction format.

>	(thumb_decode_dp): New.
>	(thumb_decode_pc_relative): New.
>	(thumb_copy_16bit_ldr_literal): New.
>	(thumb_copy_cbnz_cbz): New.
>	(thumb_process_displaced_16bit_insn): New.
>	(thumb_process_displaced_32bit_insn): New.

Just a general note on the ordering of routines in the file: the existing
code has first all the cleanup and copy routines, followed by all the
higher-level decode routines.  I think it would be cleaner to keep it
the same way for the Thumb routines.  In fact, I think it might even make
sense to have the Thumb copy routines next to the corresponding ARM routines,
such that all the copy routines that install a particular cleanup routine
are in fact close by that cleanup routine.  The decode routines can then
come at the end (after the ARM decode routines).

> @@ -4338,10 +4341,15 @@ displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
>  
>    if (regno == 15)
>      {
> +      if (displaced_in_arm_mode (regs))
> +	from += 8;
> +      else
> +	from += 6;

I think the 6 is wrong, it should be 4.  From the ARM manual:

- When executing an ARM instruction, PC reads as the address of the
  current instruction plus 8.
- When executing a Thumb instruction, PC reads as the address of the
  current instruction plus 4.

>        if (debug_displaced)
>  	fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
> -			    (unsigned long) from + 8);
> -      return (ULONGEST) from + 8;  /* Pipeline offset.  */
> +			    (unsigned long) from);
> +      return (ULONGEST) from;  /* Pipeline offset.  */

The "pipeline offset" comment refers to the + 8 (or +4); as this is now
moved further up, the comment should move with it.

> +/* Clean up branch instructions (actually perform the branch, by setting
> +   PC).  */
> +static void
> +cleanup_branch(struct gdbarch *gdbarch, struct regcache *regs,
> +	       struct displaced_step_closure *dsc)
> +{
> +  ULONGEST from = dsc->insn_addr;
> +  uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
> +  int branch_taken = condition_true (dsc->u.branch.cond, status);
> +
> +  cleanup_branch_1 (gdbarch, regs, dsc, branch_taken);
> +}
> +
> +static void
> +cleanup_cbz_cbnz(struct gdbarch *gdbarch, struct regcache *regs,
> +	       struct displaced_step_closure *dsc)
> +{
> +  cleanup_branch_1 (gdbarch, regs, dsc, dsc->u.branch.cond);
> +}

I think this is unnecessary: copy_cbz_cnbz ought to be able to use
cleanup_branch as-is.  If the branch is taken, it should just set
dsc->u.branch.cond to INSN_AL; if the branch is not taken, it
should simply not use any cleanup at all since no further action
is required.

> @@ -4718,6 +4752,40 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
>  
>       B<cond> similar, but don't set r14 in cleanup.  */
>  
> +
> +  dsc->u.branch.cond = cond;
> +  dsc->u.branch.link = link;
> +  dsc->u.branch.exchange = exchange;
> +
> +  if (arm_pc_is_thumb (gdbarch, from))

You should never use arm_pc_is_thumb here; the heuristics it applies are
completely unnecessary, since we know in which mode we are, and may just
result in the wrong outcome.

In any case, as discussed above, this ought to be two separate copy
routines, one for ARM mode and one for Thumb mode anyway.

> +    {
> +      /* Plus the size of THUMB_NOP and B/BL/BLX.  */
> +      dsc->u.branch.dest = from + 2 + 4 + offset;
> +      RECORD_MOD_16BIT_INSN (0, THUMB_NOP);

The + 2 doesn't look right to me.   The offset is relative to the
PC, which is -see above- "from + 8" in ARM mode and "from + 4" in
Thumb mode.  I don't see how the size of the THUMB_NOP is involved
at all here ...

> +static int
> +thumb_decode_dp (struct gdbarch *gdbarch, unsigned short insn,
> +		 struct displaced_step_closure *dsc)
> +{
> +  /* 16-bit data-processing insns are not related to PC.  */
> +  return thumb_copy_unmodified_16bit (gdbarch, insn,"data-processing", dsc);
> +}

This doesn't need to be a separate function, I guess ...


> +static int
> +thumb_decode_pc_relative (struct gdbarch *gdbarch, unsigned short insn,
> +			  struct regcache *regs,
> +			  struct displaced_step_closure *dsc)

This seems to be a copy routine, not a decode routine ...

> +  /* ADDS Rd, #imm8 */
> +  RECORD_MOD_32BIT_INSN (0, 0x3000 | (rd << 8) | imm8);

Should be 16BIT (but see my earlier mail on the usefulness of
those macros in the first place ...).

> +static void
> +thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch,
> +				    unsigned short insn1, CORE_ADDR to,

I don't think this needs TO.

> +				    struct regcache *regs,
> +				    struct displaced_step_closure *dsc)
> +{
> +  unsigned short op_bit_12_15 = bits (insn1, 12, 15);
> +  unsigned short op_bit_10_11 = bits (insn1, 10, 11);
> +  int err = 0;
> +
> +  /* 16-bit thumb instructions.  */
> +  switch (op_bit_12_15)
> +    {
> +      /* Shift (imme), add, subtract, move and compare*/
> +    case 0: case 1: case 2: case 3:
> +      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"", dsc);
> +      break;
> +    case 4:
> +      switch (op_bit_10_11)
> +	{
> +	case 0: /* Data-processing */
> +	  err = thumb_decode_dp (gdbarch, insn1, dsc);
> +	  break;
> +	case 1: /* Special data instructions and branch and exchange */
> +	  {
> +	    unsigned short op = bits (insn1, 7, 9);
> +	    if (op == 6 || op == 7) /* BX or BLX */
> +	      err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
> +	    else
> +	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
> +						 dsc);

These include the ADD / MOV / CMP high register instructions, which
can access the PC, so they'd need special treatment

> +	  }
> +	  break;
> +	default: /* LDR (literal) */
> +	  err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
> +	}
> +      break;
> +    case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
> +      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldr/str", dsc);
> +      break;
> +    case 10:
> +      if (op_bit_10_11 < 2) /* Generate PC-relative address */
> +	err = thumb_decode_pc_relative (gdbarch, insn1, regs, dsc);
> +      else /* Generate SP-relative address */
> +	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"sp-relative", dsc);
> +      break;
> +    case 11: /* Misc 16-bit instructions */
> +      {
> +	switch (bits (insn1, 8, 11))
> +	  {
> +	  case 1: case 3:  case 9: case 11: /* CBNZ, CBZ */
> +	    err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
> +	    break;
> +	  default:
> +	    err = thumb_copy_unmodified_16bit (gdbarch, insn1,"", dsc);

Hmm, what about IT ?

> +	  }
> +      }
> +      break;
> +    case 12:
> +      if (op_bit_10_11 < 2) /* Store multiple registers */
> +	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"stm", dsc);
> +      else /* Load multiple registers */
> +	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldm", dsc);
> +      break;
> +    case 13: /* Conditional branch and supervisor call */
> +      if (bits (insn1, 9, 11) != 7) /* conditional branch */
> +	err = thumb_copy_b (gdbarch, insn1, dsc);
> +      else
> +	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"svc", dsc);

There is special handling in arm-linux-tdep.c for ARM SVC instructions.
Don't we need this for Thumb SVC's as well?

> +      break;
> +    case 14: /* Unconditional branch */
> +      err = thumb_copy_b (gdbarch, insn1, dsc);
> +      break;
> +    default:
> +      internal_error (__FILE__, __LINE__,
> +		      _("thumb_process_displaced_insn: Instruction decode error"));
> +    }
> +
> +  if (err)
> +    internal_error (__FILE__, __LINE__,
> +		    _("thumb_process_displaced_insn: Instruction decode error"));
> +}
> +
> +static void
> +thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, CORE_ADDR from,
> +				    CORE_ADDR to, struct regcache *regs,
> +				    struct displaced_step_closure *dsc)

This really needs neither FROM nor TO, but should rather get the two parts
of the insn (to avoid duplicate memory accesses).
> +{
> +  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
> +}
> +
>  static void
>  thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
>  			      CORE_ADDR to, struct regcache *regs,
>  			      struct displaced_step_closure *dsc)
>  {
> -  error (_("Displaced stepping is only supported in ARM mode"));
> +  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
> +  unsigned short insn1
> +    = read_memory_unsigned_integer (from, 2, byte_order_for_code);
> +
> +  if (debug_displaced)
> +    fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
> +			"at %.8lx\n", insn1, (unsigned long) from);
> +
> +  if ((bits (insn1, 13, 15) == 7) && (bits (insn1, 11, 12)))

You should just use thumb_insn_size ...

> +    thumb_process_displaced_32bit_insn(gdbarch, from, to, regs, dsc);
> +  else
> +    thumb_process_displaced_16bit_insn(gdbarch, insn1, to, regs, dsc);
>  }


Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-17 19:46   ` Ulrich Weigand
@ 2011-02-18  6:33     ` Yao Qi
  2011-02-18 12:18       ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-02-18  6:33 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

On 02/18/2011 03:22 AM, Ulrich Weigand wrote:
> Yao Qi wrote:
> 
> 
>> -  unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
>> +
>> +  struct insn
>> +  {
>> +    union
>> +    {
>> +      unsigned long a;
>> +      unsigned short t;
>> +    }insn;
>> +    unsigned short size;
>> +  }modinsns[DISPLACED_MODIFIED_INSNS];
>> +
> 
> I don't think this is the right way to go.  You cannot have a mixture of
> ARM and Thumb instructions in a single modinsn block, and if you have
> Thumb instructions, they all need to be transfered in 16-bit chunks,
> even the 32-bit Thumb2 instructions, to get the endian conversion right.
> 

I don't have a mixture of ARM and Thumb instructions in a single modinsn
block.  When displace stepping 16-bit instructions, modinsn[].insn.t is
used to record 16-bit instructions and all instructions in copy area are
16-bit also.  In 32-bit case, modinsn[].insn.a is used, and all
instructions in copy area are 32-bit.

> So I think you should rather keep a single modinsn array of unsigned long.
> When filling it in, ARM instructions are handled as today, 16-bit Thumb
> instructions are likewise just filled into one modinsn slot, and 32-bit
> Thumb instructions are filled into two modinsn slots.
> 
> When copying the modinsn array out to the target, each slot is transfered
> as 4 bytes in ARM mode, and as 2 bytes in Thumb mode.  To know in which
> mode you are, it is probably best to have a single flag in the struct
> displaced_step_closure that indicated whether it is ARM or Thumb; this
> flag would be set once at the start of arm_process_displaced_insn, and
> used throughout the code whereever we need to know the mode.

The reason I propose a union here is to try to avoid too-many byte
operations during recording instructions and copying to copy area.  The
union will waste some space in 16-bit instructions case, but IMO, it
doesn't matter too much.

I agree that we should a single flag for mode, and remove field size
from struct insn.

The changes in `struct displaced_step_closure' is like this,

 -  unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
 +
 +  unsigned short flag; /* indicates the mode of instructions in
MODINSNS.  */
 +    union
 +    {
 +      unsigned long a;
 +      unsigned short t;
 +    }modinsns[DISPLACED_MODIFIED_INSNS];

Do you agree on this proposed data structure?  We need an agreement on
this basic data structure before I start to write/change the rest of
patches.

> 
> This approach would make most of the changes in this patch obsolete.
> 
>> 	(cleanup_branch): Replace magic number by macros.
> 
>> -      ULONGEST pc = displaced_read_reg (regs, from, 15);
>> -      displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
>> +      ULONGEST pc = displaced_read_reg (regs, from, ARM_PC_REGNUM);
>> +      displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);
> 
> I'm not sure about this change -- other callers just pass in plain
> register numbers as well ...  Either those should all be changed,
> or none of them.  In any case, this is really an unrelated change,
> and should be done -if at all- in a separate patch.
> 

I'll remove this chunk from my patch, and create another patch specific
to this 'magic number' problem separately.


> 
>>    /* Put breakpoint afterwards.  */
>> -  write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
>> -		tdep->arm_breakpoint_size);
>> +  write_memory (to + arm_displaced_step_breakpoint_offset (dsc),
>> +		arm_breakpoint_from_pc (gdbarch, &from, &len),
>> +		len);
> 
> Calling arm_breakpoint_from_pc is not a good idea, since this calls
> arm_pc_is_thumb, which may end up getting a wrong result.  Since we
> already know whether we're in ARM or Thumb mode, you should just
> emit either tdep->arm_breakpoint or tdep->thumb_breakpoint.  (Since
> we're not *replacing* any instruction here, there is never a need
> to use the Thumb-2 breakpoint.)
> 

Yes, we've already known the mode.  We can use either
tdep->arm_breakpoint or tdep->thumb_breakpoint directly.

>> @@ -5960,7 +6001,11 @@ arm_displaced_step_fixup (struct gdbarch *gdbarch,
>>      dsc->cleanup (gdbarch, regs, dsc);
>>  
>>    if (!dsc->wrote_to_pc)
>> -    regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
>> +    {
>> +      struct frame_info *fi = get_current_frame ();
>> +      regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
>> +				      arm_get_next_pc_raw(fi, dsc->insn_addr, 0));
>> +    }
> 
> Hmm, arm_get_next_pc_raw tries to follow branches etc, which is probably
> not what we want here.  Again, I'd rather just check ARM vs. Thumb state
> (in Thumb mode we could then check the instruction to see whether it is
> a 16-bit or 32-bit instruction --- or even better, the original decoding
> step could have just set a flag in dsc).

`if (!dsc->wrote_to_pc)' guard that we will not follow branch in this
case.  However, since we've known the mode, we can adjust pc directly,
without bothering complicated arm_get_next_pc_raw.

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 3/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-17 20:55   ` Ulrich Weigand
@ 2011-02-18  7:30     ` Yao Qi
  2011-02-18 13:25       ` Ulrich Weigand
  2011-02-28  2:04     ` Displaced stepping 0003: " Yao Qi
  1 sibling, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-02-18  7:30 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

On 02/18/2011 04:11 AM, Ulrich Weigand wrote:
> Thanks.  Any thoughts how much more work the 32-bit instructions would be?

I've had a patch for 32-bit Thumb instructions displaced stepping in my
local tree, and planed to submit once `16-bit' patch is approved.

> I'm not sure it is a good idea to support Thumb only partially; with the
> current setup, you will fail immediately when debugging Thumb, but with
> the patch, you might happen to step over a couple of 16-bit instructions
> before hitting the first 32-bit instruction ...   (That may not be all
> that bad, just wondering ...)

So, my plan is,
1. Get "patch 2/3" agreed and approved,
2. Revise "patch 3/3" per your comments in last mail, and "patch 2/3".
3. Revise patch for `32-bit' thumb instructions in my local tree, and
submit.

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-18  6:33     ` Yao Qi
@ 2011-02-18 12:18       ` Ulrich Weigand
  2011-02-21  7:41         ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-18 12:18 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:
> On 02/18/2011 03:22 AM, Ulrich Weigand wrote:
> > I don't think this is the right way to go.  You cannot have a mixture of
> > ARM and Thumb instructions in a single modinsn block, and if you have
> > Thumb instructions, they all need to be transfered in 16-bit chunks,
> > even the 32-bit Thumb2 instructions, to get the endian conversion right.
> 
> I don't have a mixture of ARM and Thumb instructions in a single modinsn
> block.  When displace stepping 16-bit instructions, modinsn[].insn.t is
> used to record 16-bit instructions and all instructions in copy area are
> 16-bit also.  In 32-bit case, modinsn[].insn.a is used, and all
> instructions in copy area are 32-bit.

I'm not sure if I understood you correctly, but 32-bit Thumb2 instructions
must be transfered in 2 16-bit chunks to get the endian conversion right,
just as is done everywhere else in arm-tdep.c.

But maybe this discussion can wait until we see the Thumb2 patch ...

> The reason I propose a union here is to try to avoid too-many byte
> operations during recording instructions and copying to copy area.  The
> union will waste some space in 16-bit instructions case, but IMO, it
> doesn't matter too much.

Are you talking about operations accessing the target, or computations
done on host?   If the former, the choice of data type for modinsns will
not affect that at all.   If the latter, I don't think there will be
any measurable difference either way ...
 
> I agree that we should a single flag for mode, and remove field size
> from struct insn.
> 
> The changes in `struct displaced_step_closure' is like this,
> 
>  -  unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
>  +
>  +  unsigned short flag; /* indicates the mode of instructions in
> MODINSNS.  */
>  +    union
>  +    {
>  +      unsigned long a;
>  +      unsigned short t;
>  +    }modinsns[DISPLACED_MODIFIED_INSNS];
> 
> Do you agree on this proposed data structure?  We need an agreement on
> this basic data structure before I start to write/change the rest of
> patches.

Well, I just don't see the point.  The arm-tdep code usually does:

   dsc->modinsn[...] = <some integer expression>

Code generated from that will not be significantly different whether
the underlying type of dsc->modinsn is short, int, or long; on some
host platforms, having the destination type short will actually require
extra conversion steps ...

Because I don't see any actual performance benefit, I'd argue for keeping
the source code simple.

> I'll remove this chunk from my patch, and create another patch specific
> to this 'magic number' problem separately.
[snip]
> Yes, we've already known the mode.  We can use either
> tdep->arm_breakpoint or tdep->thumb_breakpoint directly.
[snip]
> `if (!dsc->wrote_to_pc)' guard that we will not follow branch in this
> case.  However, since we've known the mode, we can adjust pc directly,
> without bothering complicated arm_get_next_pc_raw.

OK, thanks!

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 3/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-18  7:30     ` Yao Qi
@ 2011-02-18 13:25       ` Ulrich Weigand
  0 siblings, 0 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-18 13:25 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:
> On 02/18/2011 04:11 AM, Ulrich Weigand wrote:
> > Thanks.  Any thoughts how much more work the 32-bit instructions would be?
> 
> I've had a patch for 32-bit Thumb instructions displaced stepping in my
> local tree, and planed to submit once `16-bit' patch is approved.

Great, thanks!

> So, my plan is,
> 1. Get "patch 2/3" agreed and approved,
> 2. Revise "patch 3/3" per your comments in last mail, and "patch 2/3".
> 3. Revise patch for `32-bit' thumb instructions in my local tree, and
> submit.

Sounds good to me ...

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-18 12:18       ` Ulrich Weigand
@ 2011-02-21  7:41         ` Yao Qi
  2011-02-21 20:14           ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-02-21  7:41 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

On 02/18/2011 08:17 PM, Ulrich Weigand wrote:
> I'm not sure if I understood you correctly, but 32-bit Thumb2 instructions
> must be transfered in 2 16-bit chunks to get the endian conversion right,
> just as is done everywhere else in arm-tdep.c.
> 
> But maybe this discussion can wait until we see the Thumb2 patch ...
> 

Yes, I did that in my Thumb-2 patch.

/* Combine two 16 bit instructions to a 32-bit format.  Some 32-bit Thumb
   instructions have the same encodings as ARM counterparts.  */
#define COMBINE_16BIT_TO_32BIT_INSN(INSN1, INSN2) \
  ((INSN1 << 16) | INSN2)


>> > The reason I propose a union here is to try to avoid too-many byte
>> > operations during recording instructions and copying to copy area.  The
>> > union will waste some space in 16-bit instructions case, but IMO, it
>> > doesn't matter too much.
> Are you talking about operations accessing the target, or computations
> done on host?   If the former, the choice of data type for modinsns will
> not affect that at all.   If the latter, I don't think there will be
> any measurable difference either way ...
>  

The latter one, I think.

>> > I agree that we should a single flag for mode, and remove field size
>> > from struct insn.
>> > 
>> > The changes in `struct displaced_step_closure' is like this,
>> > 
>> >  -  unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
>> >  +
>> >  +  unsigned short flag; /* indicates the mode of instructions in
>> > MODINSNS.  */
>> >  +    union
>> >  +    {
>> >  +      unsigned long a;
>> >  +      unsigned short t;
>> >  +    }modinsns[DISPLACED_MODIFIED_INSNS];
>> > 
>> > Do you agree on this proposed data structure?  We need an agreement on
>> > this basic data structure before I start to write/change the rest of
>> > patches.
> Well, I just don't see the point.  The arm-tdep code usually does:
> 
>    dsc->modinsn[...] = <some integer expression>
> 
> Code generated from that will not be significantly different whether
> the underlying type of dsc->modinsn is short, int, or long; on some
> host platforms, having the destination type short will actually require
> extra conversion steps ...
> 

Yes.  If I understand you correctly, modinsn is a 'unsigned long' array.
  * ARM instruction occupies one slot with flag `ARM',
  * Thumb 16 bit instruction occupies one slot with flag `Thumb'
  * Thumb 32-bit instruction occupies *two* slots with flag `Thumb',
That works, I think.

I just recall one extra benefit of my original approach is about sharing
some copy_* routines for Thumb 32-bit instructions and ARM instructions.
 In ARM manual, I noticed that some encodings of 32-bit Thumb-2
instructions are the same ARM counterparts (such as preload preload_reg,
and svc_copro), so that their copy routines can be shared.

In order to hide the difference of ARM instructions and 32-bit Thumb-2
instructions in these copy_* routines,  two parts of 32-bit Thumb
instructions are combined as a `32-bit instruction' via macro
COMBINE_16BIT_TO_32BIT_INSN.  Inside these copy_* routines, we don't
have to worry about they are ARM instructions or 32-bit Thumb instructions.

In my proposed approach, each instruction only occupies one slot,
  * ARM instruction, with instruction size `4-byte',
  * Thumb 16 bit instruction, with instruction size `2-byte'
  * Thumb 32-bit instruction, converted and with instruction size `4-byte',

Do you think this benefit is strong enough to convince us to adapt to my
original approach  to `complicate source code' to some extent?

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-21  7:41         ` Yao Qi
@ 2011-02-21 20:14           ` Ulrich Weigand
  2011-02-25 18:09             ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-21 20:14 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> Yes.  If I understand you correctly, modinsn is a 'unsigned long' array.
>   * ARM instruction occupies one slot with flag `ARM',
>   * Thumb 16 bit instruction occupies one slot with flag `Thumb'
>   * Thumb 32-bit instruction occupies *two* slots with flag `Thumb',
> That works, I think.

Yes, that's what I suggested.

> I just recall one extra benefit of my original approach is about sharing
> some copy_* routines for Thumb 32-bit instructions and ARM instructions.
>  In ARM manual, I noticed that some encodings of 32-bit Thumb-2
> instructions are the same ARM counterparts (such as preload preload_reg,
> and svc_copro), so that their copy routines can be shared.

Huh.  Sharing copy routines seems a bit dangerous, because it only works
if the copy routine only looks at those bits that are identical between
the ARM and Thumb encoding (e.g. even for all the NEON instuctions, the
position of the U bit is different), *and* the copy routine always only
copies instructions in the correct format into the output buffer.

While this may happen to work in certain cases (e.g. if we look only at
a couple of bits and then do copy_unmodified), it seems a bit fragile
as errors may be easily introduced by future changes.

Having clearly separate copy routines for ARM and Thumb seems preferable
to me, therefore; even if it leads to small amount of duplication.  (If
we're talking *large* amounts of duplicated code here, it might be
possible to factor out some commonalities into helpers ...)

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-21 20:14           ` Ulrich Weigand
@ 2011-02-25 18:09             ` Yao Qi
  2011-02-25 20:17               ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-02-25 18:09 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 683 bytes --]

On 02/22/2011 04:09 AM, Ulrich Weigand wrote:
>> > Yes.  If I understand you correctly, modinsn is a 'unsigned long' array.
>> >   * ARM instruction occupies one slot with flag `ARM',
>> >   * Thumb 16 bit instruction occupies one slot with flag `Thumb'
>> >   * Thumb 32-bit instruction occupies *two* slots with flag `Thumb',
>> > That works, I think.
> Yes, that's what I suggested.
> 

This new patch implements what we discussed above.  There is a minor
difference on rule #3.  "Thumb 32-bit instruction occupies *two* slots
with flag `Thumb-2'", because we have to choose breakpoint type (thumb
breakpoint or thumb-2 breakpoint) according to this flag.

-- 
Yao (齐尧)

[-- Attachment #2: arm-disp-step-02-macros-0225.patch --]
[-- Type: text/x-patch, Size: 14970 bytes --]

gdb/

	* arm-tdep.h (struct displaced_step_closure): New field insn_mode.
	(RECORD_ARM_MODE_INSN, RECORD_THUMB_MODE_INSN, RECORD_THUMB2_MODE_INSN): New macro.
	* arm-tdep.c (copy_unmodified): Save modified insns by RECORD_ARM_MODE_INSN.
	(copy_preload, copy_preload_reg, copy_copro_load_store, copy_b_bl_blx): Likewise.
	(copy_bx_blx_reg, copy_alu_imm, copy_alu_reg, copy_alu_shifted_reg): Likewise.
	(copy_extra_ld_st, copy_ldr_str_ldrb_strb, copy_block_xfer): Likewise.
	(copy_svc, copy_undef, copy_unpred): Likewise.
	(displaced_read_reg): Handle both ARM and Thumb mode when reading PC.
	(arm_displaced_init_closure): Handle both 32bit and 16bit insns.
	(arm_displaced_step_fixup): Likewise.
	* arm-linux-tdep.c (arm_linux_copy_svc): Save modified insns by RECORD_ARM_MODE_INSN.
	(arm_catch_kernel_helper_return): Likewise.

diff --git a/gdb/arm-linux-tdep.c b/gdb/arm-linux-tdep.c
index ff649d6..75a4ea4 100644
--- a/gdb/arm-linux-tdep.c
+++ b/gdb/arm-linux-tdep.c
@@ -827,7 +827,7 @@ arm_linux_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
      Cleanup: if pc lands in scratch space, pc <- insn_addr + 4
               else leave pc alone.  */
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   dsc->cleanup = &arm_linux_cleanup_svc;
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
@@ -885,7 +885,7 @@ arm_catch_kernel_helper_return (struct gdbarch *gdbarch, CORE_ADDR from,
 		       CANNOT_WRITE_PC);
   write_memory_unsigned_integer (to + 8, 4, byte_order, from);
 
-  dsc->modinsn[0] = 0xe59ef004;  /* ldr pc, [lr, #4].  */
+  RECORD_ARM_MODE_INSN (0, 0xe59ef004); /* ldr pc, [lr, #4].  */
 }
 
 /* Linux-specific displaced step instruction copying function.  Detects when
diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index f0e9435..ea4452f 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5106,6 +5106,8 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
 
+static int displaced_in_arm_mode (struct regcache *regs);
+
 /* Helper for register reads for displaced stepping.  In particular, this
    returns the PC as it would be seen by the instruction at its original
    location.  */
@@ -5117,10 +5119,21 @@ displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
 
   if (regno == 15)
     {
+      /* Compute pipeline offset:
+	 - When executing an ARM instruction, PC reads as the address of the
+	 current instruction plus 8.
+	 - When executing a Thumb instruction, PC reads as the address of the
+	 current instruction plus 4.  */
+
+      if (displaced_in_arm_mode (regs))
+	from += 8;
+      else
+	from += 4;
+
       if (debug_displaced)
 	fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
-			    (unsigned long) from + 8);
-      return (ULONGEST) from + 8;  /* Pipeline offset.  */
+			    (unsigned long) from);
+      return (ULONGEST) from;  /* Pipeline offset.  */
     }
   else
     {
@@ -5306,7 +5319,7 @@ copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
 			"opcode/class '%s' unmodified\n", (unsigned long) insn,
 			iname);
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   return 0;
 }
@@ -5349,7 +5362,7 @@ copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 
   dsc->u.preload.immed = 1;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
+  RECORD_ARM_MODE_INSN (0, (insn & 0xfff0ffff));
 
   dsc->cleanup = &cleanup_preload;
 
@@ -5390,7 +5403,7 @@ copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
 
   dsc->u.preload.immed = 0;
 
-  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
+  RECORD_ARM_MODE_INSN (0, ((insn & 0xfff0fff0) | 0x1));
 
   dsc->cleanup = &cleanup_preload;
 
@@ -5443,7 +5456,7 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.ldst.writeback = bit (insn, 25);
   dsc->u.ldst.rn = rn;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
+  RECORD_ARM_MODE_INSN (0, (insn & 0xfff0ffff));
 
   dsc->cleanup = &cleanup_copro_load_store;
 
@@ -5515,7 +5528,7 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.branch.exchange = exchange;
   dsc->u.branch.dest = from + 8 + offset;
 
-  dsc->modinsn[0] = ARM_NOP;
+  RECORD_ARM_MODE_INSN (0, ARM_NOP);
 
   dsc->cleanup = &cleanup_branch;
 
@@ -5554,7 +5567,7 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.branch.link = link;
   dsc->u.branch.exchange = 1;
 
-  dsc->modinsn[0] = ARM_NOP;
+  RECORD_ARM_MODE_INSN (0, ARM_NOP);
 
   dsc->cleanup = &cleanup_branch;
 
@@ -5613,9 +5626,9 @@ copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = insn & 0xfff00fff;
+    RECORD_ARM_MODE_INSN (0, (insn & 0xfff00fff));
   else
-    dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00fff) | 0x10000));
 
   dsc->cleanup = &cleanup_alu_imm;
 
@@ -5682,9 +5695,9 @@ copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x2));
   else
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x10002));
 
   dsc->cleanup = &cleanup_alu_reg;
 
@@ -5757,9 +5770,9 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff000f0) | 0x302));
   else
-    dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff000f0) | 0x10302));
 
   dsc->cleanup = &cleanup_alu_shifted_reg;
 
@@ -5883,12 +5896,12 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
     /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
 	->
        {ldr,str}<width><cond> r0, [r1,] [r2, #imm].  */
-    dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00fff) | 0x20000));
   else
     /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
 	->
        {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3].  */
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x20003));
 
   dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
 
@@ -5971,32 +5984,31 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
 	   ->
 	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
-	dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
+	RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00fff) | 0x20000));
       else
 	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
 	   ->
 	   {ldr,str}[b]<cond> r0, [r2, r3].  */
-	dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
+	RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x20003));
     }
   else
     {
       /* We need to use r4 as scratch.  Make sure it's restored afterwards.  */
       dsc->u.ldst.restore_r4 = 1;
-      dsc->modinsn[0] = 0xe92d8000;  /* push {pc} */
-      dsc->modinsn[1] = 0xe8bd0010;  /* pop  {r4} */
-      dsc->modinsn[2] = 0xe044400f;  /* sub r4, r4, pc.  */
-      dsc->modinsn[3] = 0xe2844008;  /* add r4, r4, #8.  */
-      dsc->modinsn[4] = 0xe0800004;  /* add r0, r0, r4.  */
+      RECORD_ARM_MODE_INSN (0, 0xe92d8000); /* push {pc} */
+      RECORD_ARM_MODE_INSN (1, 0xe8bd0010); /* pop  {r4} */
+      RECORD_ARM_MODE_INSN (2, 0xe044400f); /* sub r4, r4, pc.  */
+      RECORD_ARM_MODE_INSN (3, 0xe2844008); /* add r4, r4, #8.  */
+      RECORD_ARM_MODE_INSN (4, 0xe0800004);  /* add r0, r0, r4.  */
 
       /* As above.  */
       if (immed)
-	dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
+	RECORD_ARM_MODE_INSN (5, ((insn & 0xfff00fff) | 0x20000));
       else
-	dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
-
-      dsc->modinsn[6] = 0x0;  /* breakpoint location.  */
-      dsc->modinsn[7] = 0x0;  /* scratch space.  */
+	RECORD_ARM_MODE_INSN (5, ((insn & 0xfff00ff0) | 0x20003));
 
+      RECORD_ARM_MODE_INSN (6, 0x00); /* breakpoint location.  */
+      RECORD_ARM_MODE_INSN (7, 0x00); /* scratch space.  */
       dsc->numinsns = 6;
     }
 
@@ -6268,7 +6280,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	     instruction (which might not behave perfectly in all cases, but
 	     these instructions should be rare enough for that not to matter
 	     too much).  */
-	  dsc->modinsn[0] = ARM_NOP;
+	  RECORD_ARM_MODE_INSN (0, ARM_NOP);
 
 	  dsc->cleanup = &cleanup_block_load_all;
 	}
@@ -6312,7 +6324,8 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 				"list %.4x\n"), rn, writeback ? "!" : "",
 				(int) insn & 0xffff, new_regmask);
 
-	  dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
+	  RECORD_ARM_MODE_INSN (0,
+				 ((insn & ~0xffff) | (new_regmask & 0xffff)));
 
 	  dsc->cleanup = &cleanup_block_load_pc;
 	}
@@ -6325,7 +6338,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	 Doing things this way has the advantage that we can auto-detect
 	 the offset of the PC write (which is architecture-dependent) in
 	 the cleanup routine.  */
-      dsc->modinsn[0] = insn;
+      RECORD_ARM_MODE_INSN (0, insn);
 
       dsc->cleanup = &cleanup_block_store_pc;
     }
@@ -6368,7 +6381,7 @@ copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
      Insn: unmodified svc.
      Cleanup: pc <- insn_addr + 4.  */
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   dsc->cleanup = &cleanup_svc;
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
@@ -6389,7 +6402,7 @@ copy_undef (struct gdbarch *gdbarch, uint32_t insn,
 			"displaced: copying undefined insn %.8lx\n",
 			(unsigned long) insn);
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   return 0;
 }
@@ -6404,7 +6417,7 @@ copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
     fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
 			"%.8lx\n", (unsigned long) insn);
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   return 0;
 }
@@ -6904,23 +6917,59 @@ arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
 			    CORE_ADDR to, struct displaced_step_closure *dsc)
 {
   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
-  unsigned int i;
+  unsigned int i, len, offset;
   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  int size = (dsc->insn_mode == ARM ? 4 : 2 );
+  const unsigned char *bkp_insn;
 
+  offset = 0;
   /* Poke modified instruction(s).  */
   for (i = 0; i < dsc->numinsns; i++)
     {
+      unsigned long insn;
+
+
       if (debug_displaced)
-	fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
-			    "%.8lx\n", (unsigned long) dsc->modinsn[i],
-			    (unsigned long) to + i * 4);
-      write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
+	{
+	  fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
+	  if (size == 4)
+	    fprintf_unfiltered (gdb_stdlog, "%.8lx",
+				dsc->modinsn[i]);
+	  else if (size == 2)
+	    fprintf_unfiltered (gdb_stdlog, "%.4x",
+				(unsigned short)dsc->modinsn[i]);
+
+	  fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
+			      (unsigned long) to + offset);
+
+	}
+      write_memory_unsigned_integer (to + offset, size,
+				     byte_order_for_code,
 				     dsc->modinsn[i]);
+      offset += size;
     }
 
+  /* Choose the correct breakpoint instruction.  */
+  if (dsc->insn_mode == ARM)
+    {
+      bkp_insn = tdep->arm_breakpoint;
+      len = tdep->arm_breakpoint_size;
+    }
+  else if (dsc->insn_mode == THUMB_2)
+    {
+      bkp_insn = tdep->thumb2_breakpoint;
+      len = tdep->thumb2_breakpoint_size;
+    }
+  else
+    {
+  /* If this insn is Thumb instruction, we should place thumb breakpoint.
+     If this insn is 16-bit Thumb-2 instruction, we should place Thumb-2
+     breakpoint.  Now, we place Thumb breakpoint in a unique way.  */
+      bkp_insn = tdep->thumb_breakpoint;
+      len = tdep->thumb_breakpoint_size;
+    }
   /* Put breakpoint afterwards.  */
-  write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
-		tdep->arm_breakpoint_size);
+  write_memory (to + offset, bkp_insn, len);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
@@ -6956,7 +7005,26 @@ arm_displaced_step_fixup (struct gdbarch *gdbarch,
     dsc->cleanup (gdbarch, regs, dsc);
 
   if (!dsc->wrote_to_pc)
-    regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
+    {
+      CORE_ADDR next_pc;
+      if (dsc->insn_mode == ARM)
+	next_pc = dsc->insn_addr + 4;
+      else if (dsc->insn_mode == THUMB)
+	next_pc = dsc->insn_addr + 2;
+      else
+	{
+	  struct frame_info *fi = get_current_frame ();
+	  enum bfd_endian byte_order_for_code
+	    = gdbarch_byte_order_for_code (gdbarch);
+	  unsigned short inst1
+	    = read_memory_unsigned_integer (dsc->insn_addr, 2,
+					    byte_order_for_code);
+
+	  next_pc =  dsc->insn_addr + thumb_insn_size (inst1);
+	}
+
+      regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, next_pc);
+    }
 }
 
 #include "bfd-in2.h"
diff --git a/gdb/arm-tdep.h b/gdb/arm-tdep.h
index ef02002..1bf7314 100644
--- a/gdb/arm-tdep.h
+++ b/gdb/arm-tdep.h
@@ -202,8 +202,32 @@ struct gdbarch_tdep
   CORE_ADDR (*syscall_next_pc) (struct frame_info *frame);
 };
 
+enum INSN_MODE {ARM, THUMB, THUMB_2};
+
 /* Structures used for displaced stepping.  */
 
+/* Record an ARM mode instruction in one slot.  */
+#define RECORD_ARM_MODE_INSN(INDEX, INSN) do \
+{\
+  dsc->modinsn[INDEX] = INSN;\
+  dsc->insn_mode = ARM;\
+ } while (0)
+
+#define RECORD_THUMB_MODE_INSN(INDEX, INSN) do \
+{\
+  dsc->modinsn[INDEX] = INSN;\
+  dsc->insn_mode = THUMB;\
+ } while (0)
+
+/* Record the two parts of 32-bit Thumb-2 instruction. Each part occupies
+   one array element.  */
+#define RECORD_THUMB2_MODE_INSN(INDEX, INSN1, INSN2) do \
+{ \
+  dsc->modinsn[INDEX] = INSN1;\
+  dsc->modinsn[INDEX + 1] = INSN2;\
+  dsc->insn_mode = THUMB_2;\
+} while (0)
+
 /* The maximum number of temporaries available for displaced instructions.  */
 #define DISPLACED_TEMPS			16
 /* The maximum number of modified instructions generated for one single-stepped
@@ -262,6 +286,14 @@ struct displaced_step_closure
 			  struct displaced_step_closure *dsc);
     } svc;
   } u;
+
+  /* The mode of instructions copied in array MODINSN.  */
+  enum INSN_MODE insn_mode;
+
+  /* The slots in the array is used in this way below,
+     - ARM instruction occupies one slot with flag `ARM',
+     - Thumb 16 bit instruction occupies one slot with flag `Thumb'
+     - Thumb 32-bit instruction occupies *two* slots with flag `Thumb'.  */
   unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
   int numinsns;
   CORE_ADDR insn_addr;

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-25 18:09             ` Yao Qi
@ 2011-02-25 20:17               ` Ulrich Weigand
  2011-02-26 14:07                 ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-25 20:17 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> This new patch implements what we discussed above.  There is a minor
> difference on rule #3.  "Thumb 32-bit instruction occupies *two* slots
> with flag `Thumb-2'", because we have to choose breakpoint type (thumb
> breakpoint or thumb-2 breakpoint) according to this flag.

Actually, there's no need to get complicated w.r.t. breakpoints.
The only reason for using a Thumb-2 breakpoint is if we *replace*
an existing 32-bit instruction and don't want to mess up instruction
stream parsing.  (E.g. if the breakpoint is under an IT block and
happens to be skipped, instruction execution would continue with
the "second half" of the replaced instruction if we had used just
a regular Thumb breakpoint.)

However, with displaced stepping, we construct a full instruction
sequence from scratch.  In this case, we can just always use a
16-bit Thumb breakpoint instruction.  (In fact, throughout the
instruction sequence we construct, we can freely intermix 16-bit
and 32-bit instructions.  We just cannot intermix ARM and Thumb,
of course.)

> +      /* Compute pipeline offset:
> +	 - When executing an ARM instruction, PC reads as the address of the
> +	 current instruction plus 8.
> +	 - When executing a Thumb instruction, PC reads as the address of the
> +	 current instruction plus 4.  */
> +
> +      if (displaced_in_arm_mode (regs))
> +	from += 8;
> +      else
> +	from += 4;
> +
>        if (debug_displaced)
>  	fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
> -			    (unsigned long) from + 8);
> -      return (ULONGEST) from + 8;  /* Pipeline offset.  */
> +			    (unsigned long) from);
> +      return (ULONGEST) from;  /* Pipeline offset.  */

Just remove the comment from that last line here; the offset is now
handled above.

>        dsc->u.ldst.restore_r4 = 1;
> -      dsc->modinsn[0] = 0xe92d8000;  /* push {pc} */
> -      dsc->modinsn[1] = 0xe8bd0010;  /* pop  {r4} */
> -      dsc->modinsn[2] = 0xe044400f;  /* sub r4, r4, pc.  */
> -      dsc->modinsn[3] = 0xe2844008;  /* add r4, r4, #8.  */
> -      dsc->modinsn[4] = 0xe0800004;  /* add r0, r0, r4.  */
> +      RECORD_ARM_MODE_INSN (0, 0xe92d8000); /* push {pc} */
> +      RECORD_ARM_MODE_INSN (1, 0xe8bd0010); /* pop  {r4} */
> +      RECORD_ARM_MODE_INSN (2, 0xe044400f); /* sub r4, r4, pc.  */
> +      RECORD_ARM_MODE_INSN (3, 0xe2844008); /* add r4, r4, #8.  */
> +      RECORD_ARM_MODE_INSN (4, 0xe0800004);  /* add r0, r0, r4.  */
>  
>        /* As above.  */
>        if (immed)
> -	dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
> +	RECORD_ARM_MODE_INSN (5, ((insn & 0xfff00fff) | 0x20000));
>        else
> -	dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
> -
> -      dsc->modinsn[6] = 0x0;  /* breakpoint location.  */
> -      dsc->modinsn[7] = 0x0;  /* scratch space.  */
> +	RECORD_ARM_MODE_INSN (5, ((insn & 0xfff00ff0) | 0x20003));
>  
> +      RECORD_ARM_MODE_INSN (6, 0x00); /* breakpoint location.  */
> +      RECORD_ARM_MODE_INSN (7, 0x00); /* scratch space.  */

This reminds me: after your latest patch in that area, we do not
actually use any scratch space in the instruction stream any more,
so this could be removed ...

> +    {
> +      CORE_ADDR next_pc;
> +      if (dsc->insn_mode == ARM)
> +	next_pc = dsc->insn_addr + 4;
> +      else if (dsc->insn_mode == THUMB)
> +	next_pc = dsc->insn_addr + 2;
> +      else
> +	{
> +	  struct frame_info *fi = get_current_frame ();
> +	  enum bfd_endian byte_order_for_code
> +	    = gdbarch_byte_order_for_code (gdbarch);
> +	  unsigned short inst1
> +	    = read_memory_unsigned_integer (dsc->insn_addr, 2,
> +					    byte_order_for_code);
> +
> +	  next_pc =  dsc->insn_addr + thumb_insn_size (inst1);
> +	}

Huh?  Shouldn't we know this already?  See below ...  [*]

> +enum INSN_MODE {ARM, THUMB, THUMB_2};
> +
>  /* Structures used for displaced stepping.  */
>  
> +/* Record an ARM mode instruction in one slot.  */
> +#define RECORD_ARM_MODE_INSN(INDEX, INSN) do \
> +{\
> +  dsc->modinsn[INDEX] = INSN;\
> +  dsc->insn_mode = ARM;\
> + } while (0)

This doesn't really make sense to me; note how the insn_mode flag
gets overwritten every time one of the macros is used.

Rather, the insn_mode flag should be set *once*, at very top
of arm_process_displaced_insn, and it should indicate the mode
of the *original* instruction we're replacing (whether it is
ARM, 16-bit Thumb, or 32-bit Thumb).

If we do that, we don't have to re-examine the original
instruction as above at [*].

[ In fact, it might be even easier to replace insn_mode with
  *two* separate fields:

  * insn_size  holds the size (4 or 2) of the *original* insn
  * is_thumb   is true if the original insn (and thus all
               replacement insns) are Thumb instead of ARM ]

> +  /* The mode of instructions copied in array MODINSN.  */
> +  enum INSN_MODE insn_mode;
> +
> +  /* The slots in the array is used in this way below,
> +     - ARM instruction occupies one slot with flag `ARM',
> +     - Thumb 16 bit instruction occupies one slot with flag `Thumb'
> +     - Thumb 32-bit instruction occupies *two* slots with flag `Thumb'.  */
>    unsigned long modinsn[DISPLACED_MODIFIED_INSNS];

So this should rather say: "insn_mode" is the mode of the
original instruction.   If insn_mode is ARM, then each entry of
modinsn holds 4 bytes corresponding to one ARM instruction;
if insn_mode is a Thumb mode, then each modinsn slot holds
2 bytes corresponding to either a 16-bit Thumb instruction
or one half of a 32-bit Thumb instruction.

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-25 20:17               ` Ulrich Weigand
@ 2011-02-26 14:07                 ` Yao Qi
  2011-02-28 17:37                   ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-02-26 14:07 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 4462 bytes --]

On 02/26/2011 03:22 AM, Ulrich Weigand wrote:
> Yao Qi wrote:
> 
>> This new patch implements what we discussed above.  There is a minor
>> difference on rule #3.  "Thumb 32-bit instruction occupies *two* slots
>> with flag `Thumb-2'", because we have to choose breakpoint type (thumb
>> breakpoint or thumb-2 breakpoint) according to this flag.
> 
> Actually, there's no need to get complicated w.r.t. breakpoints.
> The only reason for using a Thumb-2 breakpoint is if we *replace*
> an existing 32-bit instruction and don't want to mess up instruction
> stream parsing.  (E.g. if the breakpoint is under an IT block and
> happens to be skipped, instruction execution would continue with
> the "second half" of the replaced instruction if we had used just
> a regular Thumb breakpoint.)
> 
> However, with displaced stepping, we construct a full instruction
> sequence from scratch.  In this case, we can just always use a
> 16-bit Thumb breakpoint instruction.  (In fact, throughout the
> instruction sequence we construct, we can freely intermix 16-bit
> and 32-bit instructions.  We just cannot intermix ARM and Thumb,
> of course.)
> 

Hmm, I think you are right.  Fix it in my new patch.

>> +      /* Compute pipeline offset:
>> +	 - When executing an ARM instruction, PC reads as the address of the
>> +	 current instruction plus 8.
>> +	 - When executing a Thumb instruction, PC reads as the address of the
>> +	 current instruction plus 4.  */
>> +
>> +      if (displaced_in_arm_mode (regs))
>> +	from += 8;
>> +      else
>> +	from += 4;
>> +
>>        if (debug_displaced)
>>  	fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
>> -			    (unsigned long) from + 8);
>> -      return (ULONGEST) from + 8;  /* Pipeline offset.  */
>> +			    (unsigned long) from);
>> +      return (ULONGEST) from;  /* Pipeline offset.  */
> 
> Just remove the comment from that last line here; the offset is now
> handled above.
> 

Fixed.

>>        dsc->u.ldst.restore_r4 = 1;
>> -      dsc->modinsn[0] = 0xe92d8000;  /* push {pc} */
>> -      dsc->modinsn[1] = 0xe8bd0010;  /* pop  {r4} */
>> -      dsc->modinsn[2] = 0xe044400f;  /* sub r4, r4, pc.  */
>> -      dsc->modinsn[3] = 0xe2844008;  /* add r4, r4, #8.  */
>> -      dsc->modinsn[4] = 0xe0800004;  /* add r0, r0, r4.  */
>> +      RECORD_ARM_MODE_INSN (0, 0xe92d8000); /* push {pc} */
>> +      RECORD_ARM_MODE_INSN (1, 0xe8bd0010); /* pop  {r4} */
>> +      RECORD_ARM_MODE_INSN (2, 0xe044400f); /* sub r4, r4, pc.  */
>> +      RECORD_ARM_MODE_INSN (3, 0xe2844008); /* add r4, r4, #8.  */
>> +      RECORD_ARM_MODE_INSN (4, 0xe0800004);  /* add r0, r0, r4.  */
>>  
>>        /* As above.  */
>>        if (immed)
>> -	dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
>> +	RECORD_ARM_MODE_INSN (5, ((insn & 0xfff00fff) | 0x20000));
>>        else
>> -	dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
>> -
>> -      dsc->modinsn[6] = 0x0;  /* breakpoint location.  */
>> -      dsc->modinsn[7] = 0x0;  /* scratch space.  */
>> +	RECORD_ARM_MODE_INSN (5, ((insn & 0xfff00ff0) | 0x20003));
>>  
>> +      RECORD_ARM_MODE_INSN (6, 0x00); /* breakpoint location.  */
>> +      RECORD_ARM_MODE_INSN (7, 0x00); /* scratch space.  */
> 
> This reminds me: after your latest patch in that area, we do not
> actually use any scratch space in the instruction stream any more,
> so this could be removed ...
> 

Oh, Yes.  I'll remove it by another patch.

>> +    {
>> +      CORE_ADDR next_pc;
>> +      if (dsc->insn_mode == ARM)
>> +	next_pc = dsc->insn_addr + 4;
>> +      else if (dsc->insn_mode == THUMB)
>> +	next_pc = dsc->insn_addr + 2;
>> +      else
>> +	{
>> +	  struct frame_info *fi = get_current_frame ();
>> +	  enum bfd_endian byte_order_for_code
>> +	    = gdbarch_byte_order_for_code (gdbarch);
>> +	  unsigned short inst1
>> +	    = read_memory_unsigned_integer (dsc->insn_addr, 2,
>> +					    byte_order_for_code);
>> +
>> +	  next_pc =  dsc->insn_addr + thumb_insn_size (inst1);
>> +	}
> 
> Huh?  Shouldn't we know this already?  See below ...  [*]
> 
> 
> [ In fact, it might be even easier to replace insn_mode with
>   *two* separate fields:
> 
>   * insn_size  holds the size (4 or 2) of the *original* insn
>   * is_thumb   is true if the original insn (and thus all
>                replacement insns) are Thumb instead of ARM ]
> 

OK, two new fields are added in struct displaced_step_closure.  The
computation of next_pc is simplified.

-- 
Yao (齐尧)

[-- Attachment #2: 0001-refactor-to-handle-both-32-bit-and-16-bit.patch --]
[-- Type: text/x-patch, Size: 14602 bytes --]

gdb/

	* arm-tdep.h (struct displaced_step_closure): New fields insn_size and is_thumb.
	(RECORD_ARM_MODE_INSN, RECORD_THUMB_MODE_INSN, RECORD_THUMB2_MODE_INSN): New macro.
	* arm-tdep.c (copy_unmodified): Save modified insns by RECORD_ARM_MODE_INSN.
	(copy_preload, copy_preload_reg, copy_copro_load_store, copy_b_bl_blx): Likewise.
	(copy_bx_blx_reg, copy_alu_imm, copy_alu_reg, copy_alu_shifted_reg): Likewise.
	(copy_extra_ld_st, copy_ldr_str_ldrb_strb, copy_block_xfer): Likewise.
	(copy_svc, copy_undef, copy_unpred): Likewise.
	(displaced_read_reg): Handle both ARM and Thumb mode when reading PC.
	(arm_displaced_init_closure): Handle both 32bit and 16bit insns.
	(arm_displaced_step_fixup): Likewise.
	* arm-linux-tdep.c (arm_linux_copy_svc): Save modified insns by RECORD_ARM_MODE_INSN.
	(arm_catch_kernel_helper_return): Likewise.

---
 gdb/arm-linux-tdep.c |    4 +-
 gdb/arm-tdep.c       |  127 +++++++++++++++++++++++++++++++++----------------
 gdb/arm-tdep.h       |   30 ++++++++++++
 3 files changed, 117 insertions(+), 44 deletions(-)

diff --git a/gdb/arm-linux-tdep.c b/gdb/arm-linux-tdep.c
index ff649d6..75a4ea4 100644
--- a/gdb/arm-linux-tdep.c
+++ b/gdb/arm-linux-tdep.c
@@ -827,7 +827,7 @@ arm_linux_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
      Cleanup: if pc lands in scratch space, pc <- insn_addr + 4
               else leave pc alone.  */
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   dsc->cleanup = &arm_linux_cleanup_svc;
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
@@ -885,7 +885,7 @@ arm_catch_kernel_helper_return (struct gdbarch *gdbarch, CORE_ADDR from,
 		       CANNOT_WRITE_PC);
   write_memory_unsigned_integer (to + 8, 4, byte_order, from);
 
-  dsc->modinsn[0] = 0xe59ef004;  /* ldr pc, [lr, #4].  */
+  RECORD_ARM_MODE_INSN (0, 0xe59ef004); /* ldr pc, [lr, #4].  */
 }
 
 /* Linux-specific displaced step instruction copying function.  Detects when
diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index f0e9435..d1f5d7b 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5106,6 +5106,8 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
 
+static int displaced_in_arm_mode (struct regcache *regs);
+
 /* Helper for register reads for displaced stepping.  In particular, this
    returns the PC as it would be seen by the instruction at its original
    location.  */
@@ -5117,10 +5119,21 @@ displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
 
   if (regno == 15)
     {
+      /* Compute pipeline offset:
+	 - When executing an ARM instruction, PC reads as the address of the
+	 current instruction plus 8.
+	 - When executing a Thumb instruction, PC reads as the address of the
+	 current instruction plus 4.  */
+
+      if (displaced_in_arm_mode (regs))
+	from += 8;
+      else
+	from += 4;
+
       if (debug_displaced)
 	fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
-			    (unsigned long) from + 8);
-      return (ULONGEST) from + 8;  /* Pipeline offset.  */
+			    (unsigned long) from);
+      return (ULONGEST) from;
     }
   else
     {
@@ -5306,7 +5319,7 @@ copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
 			"opcode/class '%s' unmodified\n", (unsigned long) insn,
 			iname);
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   return 0;
 }
@@ -5349,7 +5362,7 @@ copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 
   dsc->u.preload.immed = 1;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
+  RECORD_ARM_MODE_INSN (0, (insn & 0xfff0ffff));
 
   dsc->cleanup = &cleanup_preload;
 
@@ -5390,7 +5403,7 @@ copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
 
   dsc->u.preload.immed = 0;
 
-  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
+  RECORD_ARM_MODE_INSN (0, ((insn & 0xfff0fff0) | 0x1));
 
   dsc->cleanup = &cleanup_preload;
 
@@ -5443,7 +5456,7 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.ldst.writeback = bit (insn, 25);
   dsc->u.ldst.rn = rn;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
+  RECORD_ARM_MODE_INSN (0, (insn & 0xfff0ffff));
 
   dsc->cleanup = &cleanup_copro_load_store;
 
@@ -5515,7 +5528,7 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.branch.exchange = exchange;
   dsc->u.branch.dest = from + 8 + offset;
 
-  dsc->modinsn[0] = ARM_NOP;
+  RECORD_ARM_MODE_INSN (0, ARM_NOP);
 
   dsc->cleanup = &cleanup_branch;
 
@@ -5554,7 +5567,7 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
   dsc->u.branch.link = link;
   dsc->u.branch.exchange = 1;
 
-  dsc->modinsn[0] = ARM_NOP;
+  RECORD_ARM_MODE_INSN (0, ARM_NOP);
 
   dsc->cleanup = &cleanup_branch;
 
@@ -5613,9 +5626,9 @@ copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = insn & 0xfff00fff;
+    RECORD_ARM_MODE_INSN (0, (insn & 0xfff00fff));
   else
-    dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00fff) | 0x10000));
 
   dsc->cleanup = &cleanup_alu_imm;
 
@@ -5682,9 +5695,9 @@ copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x2));
   else
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x10002));
 
   dsc->cleanup = &cleanup_alu_reg;
 
@@ -5757,9 +5770,9 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   dsc->rd = rd;
 
   if (is_mov)
-    dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff000f0) | 0x302));
   else
-    dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff000f0) | 0x10302));
 
   dsc->cleanup = &cleanup_alu_shifted_reg;
 
@@ -5883,12 +5896,12 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
     /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
 	->
        {ldr,str}<width><cond> r0, [r1,] [r2, #imm].  */
-    dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00fff) | 0x20000));
   else
     /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
 	->
        {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3].  */
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
+    RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x20003));
 
   dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
 
@@ -5971,32 +5984,31 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
 	   ->
 	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
-	dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
+	RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00fff) | 0x20000));
       else
 	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
 	   ->
 	   {ldr,str}[b]<cond> r0, [r2, r3].  */
-	dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
+	RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x20003));
     }
   else
     {
       /* We need to use r4 as scratch.  Make sure it's restored afterwards.  */
       dsc->u.ldst.restore_r4 = 1;
-      dsc->modinsn[0] = 0xe92d8000;  /* push {pc} */
-      dsc->modinsn[1] = 0xe8bd0010;  /* pop  {r4} */
-      dsc->modinsn[2] = 0xe044400f;  /* sub r4, r4, pc.  */
-      dsc->modinsn[3] = 0xe2844008;  /* add r4, r4, #8.  */
-      dsc->modinsn[4] = 0xe0800004;  /* add r0, r0, r4.  */
+      RECORD_ARM_MODE_INSN (0, 0xe92d8000); /* push {pc} */
+      RECORD_ARM_MODE_INSN (1, 0xe8bd0010); /* pop  {r4} */
+      RECORD_ARM_MODE_INSN (2, 0xe044400f); /* sub r4, r4, pc.  */
+      RECORD_ARM_MODE_INSN (3, 0xe2844008); /* add r4, r4, #8.  */
+      RECORD_ARM_MODE_INSN (4, 0xe0800004);  /* add r0, r0, r4.  */
 
       /* As above.  */
       if (immed)
-	dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
+	RECORD_ARM_MODE_INSN (5, ((insn & 0xfff00fff) | 0x20000));
       else
-	dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
-
-      dsc->modinsn[6] = 0x0;  /* breakpoint location.  */
-      dsc->modinsn[7] = 0x0;  /* scratch space.  */
+	RECORD_ARM_MODE_INSN (5, ((insn & 0xfff00ff0) | 0x20003));
 
+      RECORD_ARM_MODE_INSN (6, 0x00); /* breakpoint location.  */
+      RECORD_ARM_MODE_INSN (7, 0x00); /* scratch space.  */
       dsc->numinsns = 6;
     }
 
@@ -6268,7 +6280,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	     instruction (which might not behave perfectly in all cases, but
 	     these instructions should be rare enough for that not to matter
 	     too much).  */
-	  dsc->modinsn[0] = ARM_NOP;
+	  RECORD_ARM_MODE_INSN (0, ARM_NOP);
 
 	  dsc->cleanup = &cleanup_block_load_all;
 	}
@@ -6312,7 +6324,8 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 				"list %.4x\n"), rn, writeback ? "!" : "",
 				(int) insn & 0xffff, new_regmask);
 
-	  dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
+	  RECORD_ARM_MODE_INSN (0,
+				 ((insn & ~0xffff) | (new_regmask & 0xffff)));
 
 	  dsc->cleanup = &cleanup_block_load_pc;
 	}
@@ -6325,7 +6338,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	 Doing things this way has the advantage that we can auto-detect
 	 the offset of the PC write (which is architecture-dependent) in
 	 the cleanup routine.  */
-      dsc->modinsn[0] = insn;
+      RECORD_ARM_MODE_INSN (0, insn);
 
       dsc->cleanup = &cleanup_block_store_pc;
     }
@@ -6368,7 +6381,7 @@ copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
      Insn: unmodified svc.
      Cleanup: pc <- insn_addr + 4.  */
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   dsc->cleanup = &cleanup_svc;
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
@@ -6389,7 +6402,7 @@ copy_undef (struct gdbarch *gdbarch, uint32_t insn,
 			"displaced: copying undefined insn %.8lx\n",
 			(unsigned long) insn);
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   return 0;
 }
@@ -6404,7 +6417,7 @@ copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
     fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
 			"%.8lx\n", (unsigned long) insn);
 
-  dsc->modinsn[0] = insn;
+  RECORD_ARM_MODE_INSN (0, insn);
 
   return 0;
 }
@@ -6861,6 +6874,8 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
   if (!displaced_in_arm_mode (regs))
     return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
 
+  dsc->is_thumb = 0;
+  dsc->insn_size = 4;
   insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
@@ -6904,23 +6919,49 @@ arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
 			    CORE_ADDR to, struct displaced_step_closure *dsc)
 {
   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
-  unsigned int i;
+  unsigned int i, len, offset;
   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  int size = dsc->insn_size;
+  const unsigned char *bkp_insn;
 
+  offset = 0;
   /* Poke modified instruction(s).  */
   for (i = 0; i < dsc->numinsns; i++)
     {
       if (debug_displaced)
-	fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
-			    "%.8lx\n", (unsigned long) dsc->modinsn[i],
-			    (unsigned long) to + i * 4);
-      write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
+	{
+	  fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
+	  if (size == 4)
+	    fprintf_unfiltered (gdb_stdlog, "%.8lx",
+				dsc->modinsn[i]);
+	  else if (size == 2)
+	    fprintf_unfiltered (gdb_stdlog, "%.4x",
+				(unsigned short)dsc->modinsn[i]);
+
+	  fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
+			      (unsigned long) to + offset);
+
+	}
+      write_memory_unsigned_integer (to + offset, size,
+				     byte_order_for_code,
 				     dsc->modinsn[i]);
+      offset += size;
+    }
+
+  /* Choose the correct breakpoint instruction.  */
+  if (dsc->is_thumb)
+    {
+      bkp_insn = tdep->thumb_breakpoint;
+      len = tdep->thumb_breakpoint_size;
+    }
+  else
+    {
+      bkp_insn = tdep->arm_breakpoint;
+      len = tdep->arm_breakpoint_size;
     }
 
   /* Put breakpoint afterwards.  */
-  write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
-		tdep->arm_breakpoint_size);
+  write_memory (to + offset, bkp_insn, len);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
@@ -6956,7 +6997,9 @@ arm_displaced_step_fixup (struct gdbarch *gdbarch,
     dsc->cleanup (gdbarch, regs, dsc);
 
   if (!dsc->wrote_to_pc)
-    regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
+    regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
+				    dsc->insn_addr + dsc->insn_size);
+
 }
 
 #include "bfd-in2.h"
diff --git a/gdb/arm-tdep.h b/gdb/arm-tdep.h
index ef02002..1b3f9e4 100644
--- a/gdb/arm-tdep.h
+++ b/gdb/arm-tdep.h
@@ -204,6 +204,25 @@ struct gdbarch_tdep
 
 /* Structures used for displaced stepping.  */
 
+/* Record an ARM mode instruction in one slot.  */
+#define RECORD_ARM_MODE_INSN(INDEX, INSN) do \
+{\
+  dsc->modinsn[INDEX] = INSN;\
+ } while (0)
+
+#define RECORD_THUMB_MODE_INSN(INDEX, INSN) do \
+{\
+  dsc->modinsn[INDEX] = INSN;\
+ } while (0)
+
+/* Record the two parts of 32-bit Thumb-2 instruction. Each part occupies
+   one array element.  */
+#define RECORD_THUMB2_MODE_INSN(INDEX, INSN1, INSN2) do \
+{ \
+  dsc->modinsn[INDEX] = INSN1;\
+  dsc->modinsn[INDEX + 1] = INSN2;\
+} while (0)
+
 /* The maximum number of temporaries available for displaced instructions.  */
 #define DISPLACED_TEMPS			16
 /* The maximum number of modified instructions generated for one single-stepped
@@ -262,6 +281,17 @@ struct displaced_step_closure
 			  struct displaced_step_closure *dsc);
     } svc;
   } u;
+
+  /* The size of original instruction, 2 or 4.  */
+  unsigned int insn_size;
+  /* True if the original insn (and thus all replacement insns) are Thumb
+     instead of ARM.   */
+  unsigned int is_thumb;
+
+  /* The slots in the array is used in this way below,
+     - ARM instruction occupies one slot,
+     - Thumb 16 bit instruction occupies one slot,
+     - Thumb 32-bit instruction occupies *two* slots, one part for each.  */
   unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
   int numinsns;
   CORE_ADDR insn_addr;
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Displaced stepping 0002: refactor and create some copy helpers
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
                   ` (5 preceding siblings ...)
  2011-02-10  6:48 ` Ping 2 " Yao Qi
@ 2011-02-26 17:50 ` Yao Qi
  2011-02-28 17:53   ` Ulrich Weigand
  2011-02-28  2:15 ` Displaced stepping 0004: wip: 32-bit Thumb instructions Yao Qi
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
  8 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-02-26 17:50 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 204 bytes --]

The patch continues to refactor code, in order to
 1) make copy functions separated for ARM and Thumb,
 2) define some copy helper functions for some ARM and Thumb-2 instructions.

-- 
Yao (齐尧)

[-- Attachment #2: 0002-refactor-and-create-some-copy-helpers.patch --]
[-- Type: text/x-patch, Size: 55151 bytes --]

	Refactor code.  Split copy_* routines to mode-dependent part (arm_copy_*)
	and mode-independent part (copy_*).  Define some copy helpers.
	
	* arm-tdep.c (THUMB_NOP): New macro.
	(union instruction_instance): New.
	(copy_unmodified): Renamed to arm_copy_unmodified.
	(arm_copy_unmodified): New.
	(copy_preload, copy_preload_reg, copy_b_bl_blx): Move mode-dependent
	part and leave mode-independent part.
	(copy_bx_blx_reg copy_ldr_str_ldrb_strb, copy_alu_reg): Likewise.
	(copy_block_xfer, copy_copro_load_store): Likewise.
	(arm_copy_preload, arm_copy_preload_reg, arm_copy_b_bl_blx): Mode-dependent
	part.
	(arm_copy_bx_blx_reg, arm_copy_ldr_str_ldrb_strb): Likewise.
	(arm_copy_alu_reg, arm_copy_block_xfer): Likewise.
	(copy_undef): Renamed to arm_copy_undef.
	(arm_copy_undef): New.
	(copy_unpred): Renamed to arm_copy_unpred.
	(arm_copy_unpred): New.
	(arm_copy_alu_shifted_reg): Renamed from copy_alu_shifted_reg.
	(arm_copy_unmodified_helper, arm_copy_undef_helper): Copy helpers for ARM.
	(arm_copy_copro_load_store_helper, arm_copy_ldm_with_pc_helper): Likewise.
	(arm_copy_svc_helper): Likewise.
	(copy_svc): Delete.
	(decode_misc_memhint_neon, decode_unconditional): Update callers.
	(decode_dp_misc, decode_miscellaneous, decode_ld_st_word_ubyte): Likewise.
	(decode_media, decode_b_bl_ldmstm, decode_ext_reg_ld_st): Likewise.
	(decode_svc_copro, copy_alu_imm, copy_extra_ld_st): Likewise.
	(arm_decode_svc_copro): Renamed from decode_svc_copro.  Call copy
	helpers routine.
	(arm_process_displaced_insn): Update callers to decode_svc_copro.
---
 gdb/arm-tdep.c |  926 +++++++++++++++++++++++++++++++++-----------------------
 1 files changed, 554 insertions(+), 372 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index d1f5d7b..2d06d8e 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5105,6 +5105,7 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
+#define THUMB_NOP				0x4600
 
 static int displaced_in_arm_mode (struct regcache *regs);
 
@@ -5310,9 +5311,16 @@ insn_references_pc (uint32_t insn, uint32_t bitmask)
 /* The simplest copy function.  Many instructions have the same effect no
    matter what address they are executed at: in those cases, use this.  */
 
+union instruction_instance
+{
+  uint32_t _32_bit;
+  uint16_t _16_bit[2];
+};
+
+/* Copy ARM instruction without any modification.  */
 static int
-copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
-		 const char *iname, struct displaced_step_closure *dsc)
+arm_copy_unmodified (uint32_t insn, const char *iname,
+		     struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
@@ -5336,20 +5344,12 @@ cleanup_preload (struct gdbarch *gdbarch,
 }
 
 static int
-copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+copy_preload (struct gdbarch *gdbarch, unsigned int rn, struct regcache *regs,
 	      struct displaced_step_closure *dsc)
 {
-  unsigned int rn = bits (insn, 16, 19);
   ULONGEST rn_val;
   CORE_ADDR from = dsc->insn_addr;
 
-  if (!insn_references_pc (insn, 0x000f0000ul))
-    return copy_unmodified (gdbarch, insn, "preload", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
-			(unsigned long) insn);
-
   /* Preload instructions:
 
      {pli/pld} [rn, #+/-imm]
@@ -5362,32 +5362,39 @@ copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 
   dsc->u.preload.immed = 1;
 
-  RECORD_ARM_MODE_INSN (0, (insn & 0xfff0ffff));
-
   dsc->cleanup = &cleanup_preload;
 
   return 0;
 }
 
-/* Preload instructions with register offset.  */
-
 static int
-copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
-		  struct regcache *regs,
-		  struct displaced_step_closure *dsc)
+arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+	      struct displaced_step_closure *dsc)
 {
   unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  ULONGEST rn_val, rm_val;
-  CORE_ADDR from = dsc->insn_addr;
 
-  if (!insn_references_pc (insn, 0x000f000ful))
-    return copy_unmodified (gdbarch, insn, "preload reg", dsc);
+  if (!insn_references_pc (insn, 0x000f0000ul))
+    return arm_copy_unmodified (insn, "preload", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
 			(unsigned long) insn);
 
+  RECORD_ARM_MODE_INSN (0, (insn & 0xfff0ffff));
+
+  return copy_preload (gdbarch, rn, regs, dsc);
+}
+
+/* Preload instructions with register offset.  */
+
+static int
+copy_preload_reg (struct gdbarch *gdbarch, unsigned int rn, unsigned int rm,
+		  struct regcache *regs,
+		  struct displaced_step_closure *dsc)
+{
+  ULONGEST rn_val, rm_val;
+  CORE_ADDR from = dsc->insn_addr;
+
   /* Preload register-offset instructions:
 
      {pli/pld} [rn, rm {, shift}]
@@ -5400,15 +5407,31 @@ copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
   rm_val = displaced_read_reg (regs, from, rm);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
-
   dsc->u.preload.immed = 0;
 
-  RECORD_ARM_MODE_INSN (0, ((insn & 0xfff0fff0) | 0x1));
-
   dsc->cleanup = &cleanup_preload;
 
   return 0;
 }
+static int
+arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
+		      struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn, 16, 19);
+  unsigned int rm = bits (insn, 0, 3);
+
+  if (!insn_references_pc (insn, 0x000f000ful))
+    return arm_copy_unmodified (insn, "preload reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
+			(unsigned long) insn);
+
+  RECORD_ARM_MODE_INSN (0, ((insn & 0xfff0fff0) | 0x1));
+
+  return copy_preload_reg (gdbarch, rn, rm, regs, dsc);
+}
 
 /* Copy/cleanup coprocessor load and store instructions.  */
 
@@ -5426,21 +5449,14 @@ cleanup_copro_load_store (struct gdbarch *gdbarch,
 }
 
 static int
-copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
+copy_copro_load_store (struct gdbarch *gdbarch, unsigned int rn,
 		       struct regcache *regs,
 		       struct displaced_step_closure *dsc)
 {
-  unsigned int rn = bits (insn, 16, 19);
+
   ULONGEST rn_val;
   CORE_ADDR from = dsc->insn_addr;
 
-  if (!insn_references_pc (insn, 0x000f0000ul))
-    return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
-			"load/store insn %.8lx\n", (unsigned long) insn);
-
   /* Coprocessor load/store instructions:
 
      {stc/stc2} [<Rn>, #+/-imm]  (and other immediate addressing modes)
@@ -5453,11 +5469,6 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   rn_val = displaced_read_reg (regs, from, rn);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
 
-  dsc->u.ldst.writeback = bit (insn, 25);
-  dsc->u.ldst.rn = rn;
-
-  RECORD_ARM_MODE_INSN (0, (insn & 0xfff0ffff));
-
   dsc->cleanup = &cleanup_copro_load_store;
 
   return 0;
@@ -5465,10 +5476,9 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
 
 /* Clean up branch instructions (actually perform the branch, by setting
    PC).  */
-
 static void
 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
-		struct displaced_step_closure *dsc)
+	       struct displaced_step_closure *dsc)
 {
   ULONGEST from = dsc->insn_addr;
   uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
@@ -5482,29 +5492,25 @@ cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
   if (dsc->u.branch.link)
     {
       ULONGEST pc = displaced_read_reg (regs, from, 15);
-      displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
+
+      if (displaced_in_arm_mode (regs))
+	displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
+      else
+	displaced_write_reg (regs, dsc, 14, (pc - 2) | 1u,
+			     CANNOT_WRITE_PC);
     }
 
-  displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
+  displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
 }
 
 /* Copy B/BL/BLX instructions with immediate destinations.  */
 
 static int
-copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
-	       struct regcache *regs, struct displaced_step_closure *dsc)
+copy_b_bl_blx (struct gdbarch *gdbarch, unsigned int cond, int exchange,
+	       int link, long offset, struct regcache *regs,
+	       struct displaced_step_closure *dsc)
 {
-  unsigned int cond = bits (insn, 28, 31);
-  int exchange = (cond == 0xf);
-  int link = exchange || bit (insn, 24);
   CORE_ADDR from = dsc->insn_addr;
-  long offset;
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
-			"%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
-			(unsigned long) insn);
-
   /* Implement "BL<cond> <label>" as:
 
      Preparation: cond <- instruction condition
@@ -5513,6 +5519,29 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
 
      B<cond> similar, but don't set r14 in cleanup.  */
 
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = link;
+  dsc->u.branch.exchange = exchange;
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+
+/* Copy B/BL/BLX ARM instructions with immediate destinations.  */
+static int
+arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
+		   struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int cond = bits (insn, 28, 31);
+  int exchange = (cond == 0xf);
+  int link = exchange || bit (insn, 24);
+  long offset;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
+			"%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
+			(unsigned long) insn);
   if (exchange)
     /* For BLX, set bit 0 of the destination.  The cleanup_branch function will
        then arrange the switch into Thumb mode.  */
@@ -5523,36 +5552,21 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   if (bit (offset, 25))
     offset = offset | ~0x3ffffff;
 
-  dsc->u.branch.cond = cond;
-  dsc->u.branch.link = link;
-  dsc->u.branch.exchange = exchange;
-  dsc->u.branch.dest = from + 8 + offset;
-
+  dsc->u.branch.dest = dsc->insn_addr + 8 + offset;
   RECORD_ARM_MODE_INSN (0, ARM_NOP);
 
-  dsc->cleanup = &cleanup_branch;
-
-  return 0;
+  return copy_b_bl_blx (gdbarch, cond, exchange, link, offset, regs, dsc);
 }
 
 /* Copy BX/BLX with register-specified destinations.  */
 
 static int
-copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
-		 struct regcache *regs, struct displaced_step_closure *dsc)
+copy_bx_blx_reg (struct gdbarch *gdbarch, unsigned int cond, int link,
+		 unsigned int rm, struct regcache *regs,
+		 struct displaced_step_closure *dsc)
 {
-  unsigned int cond = bits (insn, 28, 31);
-  /* BX:  x12xxx1x
-     BLX: x12xxx3x.  */
-  int link = bit (insn, 5);
-  unsigned int rm = bits (insn, 0, 3);
   CORE_ADDR from = dsc->insn_addr;
 
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
-			"%.8lx\n", (link) ? "blx" : "bx",
-			(unsigned long) insn);
-
   /* Implement {BX,BLX}<cond> <reg>" as:
 
      Preparation: cond <- instruction condition
@@ -5563,17 +5577,38 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
 
   dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
 
-  dsc->u.branch.cond = cond;
   dsc->u.branch.link = link;
   dsc->u.branch.exchange = 1;
 
-  RECORD_ARM_MODE_INSN (0, ARM_NOP);
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, " %s r%d -> 0x%.8lx\n",
+			(link) ? "blx" : "bx", rm, dsc->u.branch.dest);
 
   dsc->cleanup = &cleanup_branch;
 
   return 0;
 }
 
+static int
+arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int cond = bits (insn, 28, 31);
+  /* BX:  x12xxx1x
+     BLX: x12xxx3x.  */
+  int link = bit (insn, 5);
+  unsigned int rm = bits (insn, 0, 3);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
+			(unsigned long) insn);
+
+  dsc->u.branch.cond = cond;
+  RECORD_ARM_MODE_INSN (0, ARM_NOP);
+
+  return copy_bx_blx_reg (gdbarch, cond, link, rm, regs, dsc);
+}
+
 /* Copy/cleanup arithmetic/logic instruction with immediate RHS.  */
 
 static void
@@ -5598,7 +5633,7 @@ copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   CORE_ADDR from = dsc->insn_addr;
 
   if (!insn_references_pc (insn, 0x000ff000ul))
-    return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
+    return arm_copy_unmodified (insn, "ALU immediate", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
@@ -5652,25 +5687,18 @@ cleanup_alu_reg (struct gdbarch *gdbarch,
   displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
 }
 
+
 static int
-copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-	      struct displaced_step_closure *dsc)
+copy_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
+	      struct displaced_step_closure *dsc, unsigned int reg_ids[])
 {
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  unsigned int rd = bits (insn, 12, 15);
-  unsigned int op = bits (insn, 21, 24);
-  int is_mov = (op == 0xd);
+  unsigned int rn = reg_ids[1];
+  unsigned int rm = reg_ids[2];
+  unsigned int rd = reg_ids[0];
+
   ULONGEST rd_val, rn_val, rm_val;
   CORE_ADDR from = dsc->insn_addr;
 
-  if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
-			is_mov ? "move" : "ALU", (unsigned long) insn);
-
   /* Instruction is of form:
 
      <op><cond> rd, [rn,] rm [, <shift>]
@@ -5694,14 +5722,35 @@ copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
   dsc->rd = rd;
 
+  dsc->cleanup = &cleanup_alu_reg;
+
+  return 0;
+}
+
+static int
+arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+		  struct displaced_step_closure *dsc)
+{
+  unsigned int reg_ids[3];
+  unsigned int op = bits (insn, 21, 24);
+  int is_mov = (op == 0xd);
+
+  reg_ids[1] = bits (insn, 16, 19); /* Rn */
+  reg_ids[2] = bits (insn, 0, 3); /* Rm */
+  reg_ids[0] = bits (insn, 12, 15); /* Rd */
+  if (!insn_references_pc (insn, 0x000ff00ful))
+    return arm_copy_unmodified (insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
+				is_mov ? "move" : "ALU", (unsigned long) insn);
+
   if (is_mov)
     RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x2));
   else
     RECORD_ARM_MODE_INSN (0, ((insn & 0xfff00ff0) | 0x10002));
 
-  dsc->cleanup = &cleanup_alu_reg;
-
-  return 0;
+  return copy_alu_reg (gdbarch, regs, dsc, reg_ids);
 }
 
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
@@ -5721,7 +5770,7 @@ cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
 }
 
 static int
-copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
+arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
 		      struct regcache *regs,
 		      struct displaced_step_closure *dsc)
 {
@@ -5735,7 +5784,7 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   CORE_ADDR from = dsc->insn_addr;
 
   if (!insn_references_pc (insn, 0x000fff0ful))
-    return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
+    return arm_copy_unmodified (insn, "ALU shifted reg", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
@@ -5852,7 +5901,7 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
   CORE_ADDR from = dsc->insn_addr;
 
   if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
+    return arm_copy_unmodified (insn, "extra load/store", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
@@ -5911,49 +5960,32 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
 /* Copy byte/word loads and stores.  */
 
 static int
-copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
-			struct regcache *regs,
+copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
 			struct displaced_step_closure *dsc, int load, int byte,
-			int usermode)
+			int usermode, int writeback, int rm)
 {
-  int immed = !bit (insn, 25);
-  unsigned int rt = bits (insn, 12, 15);
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);  /* Only valid if !immed.  */
   ULONGEST rt_val, rn_val, rm_val = 0;
   CORE_ADDR from = dsc->insn_addr;
 
-  if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "load/store", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
-			load ? (byte ? "ldrb" : "ldr")
-			     : (byte ? "strb" : "str"), usermode ? "t" : "",
-			(unsigned long) insn);
-
   dsc->tmp[0] = displaced_read_reg (regs, from, 0);
   dsc->tmp[2] = displaced_read_reg (regs, from, 2);
-  if (!immed)
+  if (!dsc->u.ldst.immed)
     dsc->tmp[3] = displaced_read_reg (regs, from, 3);
   if (!load)
     dsc->tmp[4] = displaced_read_reg (regs, from, 4);
 
-  rt_val = displaced_read_reg (regs, from, rt);
-  rn_val = displaced_read_reg (regs, from, rn);
-  if (!immed)
+  rt_val = displaced_read_reg (regs, from, dsc->rd);
+  rn_val = displaced_read_reg (regs, from, dsc->u.ldst.rn);
+  if (!dsc->u.ldst.immed)
     rm_val = displaced_read_reg (regs, from, rm);
 
   displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
-  if (!immed)
+  if (!dsc->u.ldst.immed)
     displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
 
-  dsc->rd = rt;
   dsc->u.ldst.xfersize = byte ? 1 : 4;
-  dsc->u.ldst.rn = rn;
-  dsc->u.ldst.immed = immed;
-  dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
+  dsc->u.ldst.writeback = writeback;
 
   /* To write PC we can do:
 
@@ -5976,7 +6008,41 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
      of this can be found in Section "Saving from r15" in
      http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
 
-  if (load || rt != 15)
+  dsc->cleanup = load ? &cleanup_load : &cleanup_store;
+
+  return 0;
+}
+
+static int
+arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
+			    struct regcache *regs,
+			    struct displaced_step_closure *dsc,
+			    int load, int byte, int usermode)
+{
+  int immed = !bit (insn, 25);
+  unsigned int rt = bits (insn, 12, 15);
+  unsigned int rn = bits (insn, 16, 19);
+  unsigned int rm = bits (insn, 0, 3);  /* Only valid if !immed.  */
+
+  if (!insn_references_pc (insn, 0x000ff00ful))
+    return arm_copy_unmodified (insn, "load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.8lx\n",
+			load ? (byte ? "ldrb" : "ldr")
+			     : (byte ? "strb" : "str"), usermode ? "t" : "",
+			rt, rn,
+			(unsigned long) insn);
+
+  dsc->rd = rt;
+  dsc->u.ldst.rn = rn;
+  dsc->u.ldst.immed = immed;
+
+  copy_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, byte, usermode,
+			  (bit (insn, 24) == 0 || bit (insn, 21) != 0), rm);
+
+    if (load || rt != ARM_PC_REGNUM)
     {
       dsc->u.ldst.restore_r4 = 0;
 
@@ -5995,8 +6061,9 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
     {
       /* We need to use r4 as scratch.  Make sure it's restored afterwards.  */
       dsc->u.ldst.restore_r4 = 1;
-      RECORD_ARM_MODE_INSN (0, 0xe92d8000); /* push {pc} */
-      RECORD_ARM_MODE_INSN (1, 0xe8bd0010); /* pop  {r4} */
+
+      RECORD_ARM_MODE_INSN (0, 0xe92d8000);  /* push {pc} */
+      RECORD_ARM_MODE_INSN (1, 0xe8bd0010);  /* pop  {r4} */
       RECORD_ARM_MODE_INSN (2, 0xe044400f); /* sub r4, r4, pc.  */
       RECORD_ARM_MODE_INSN (3, 0xe2844008); /* add r4, r4, #8.  */
       RECORD_ARM_MODE_INSN (4, 0xe0800004);  /* add r0, r0, r4.  */
@@ -6009,12 +6076,11 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 
       RECORD_ARM_MODE_INSN (6, 0x00); /* breakpoint location.  */
       RECORD_ARM_MODE_INSN (7, 0x00); /* scratch space.  */
+
       dsc->numinsns = 6;
     }
 
-  dsc->cleanup = load ? &cleanup_load : &cleanup_store;
-
-  return 0;
+    return 0;
 }
 
 /* Cleanup LDM instructions with fully-populated register list.  This is an
@@ -6228,105 +6294,193 @@ cleanup_block_load_pc (struct gdbarch *gdbarch,
     }
 }
 
+/* Copy helper functions.  */
+typedef int (*copy_undef_helper)(union instruction_instance,
+				 struct displaced_step_closure *);
+typedef int (*copy_unmodified_helper)(union instruction_instance, const char*,
+				      struct displaced_step_closure *);
+typedef int (*copy_copro_load_store_helper)(struct gdbarch *,
+					    union instruction_instance,
+					    struct regcache *,
+					    struct displaced_step_closure *);
+typedef int (*copy_ldm_with_pc_helper) (union instruction_instance,
+					struct displaced_step_closure *,
+					struct regcache *);
+typedef int (*copy_svc_helper)(struct gdbarch *,
+			       union instruction_instance, CORE_ADDR,
+			       struct regcache *,
+			       struct displaced_step_closure *);
+
+/* Define helpers for ARM.  */
+static int
+arm_copy_unmodified_helper (union instruction_instance insn, const char *iname,
+			    struct displaced_step_closure *dsc)
+{
+  return arm_copy_unmodified (insn._32_bit, iname, dsc);
+}
+
+static int arm_copy_undef (uint32_t insn, struct displaced_step_closure *dsc);
+
+static int
+arm_copy_undef_helper (union instruction_instance ii,
+		       struct displaced_step_closure *dsc)
+{
+  return arm_copy_undef (ii._32_bit, dsc);
+}
+
+static int
+arm_copy_copro_load_store_helper (struct gdbarch *gdbarch,
+				  union instruction_instance insn,
+				  struct regcache *regs,
+				  struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn._32_bit, 16, 19);
+  if (rn != ARM_PC_REGNUM)
+    return arm_copy_unmodified (insn._32_bit, "copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.8lx\n",
+			(unsigned long) insn._32_bit);
+
+  dsc->u.ldst.writeback = bit (insn._32_bit, 25);
+  dsc->u.ldst.rn = rn;
+
+  RECORD_ARM_MODE_INSN (0, (insn._32_bit & 0xfff0ffff));
+
+  return copy_copro_load_store (gdbarch, rn, regs, dsc);
+}
+
+static int
+arm_copy_ldm_with_pc_helper(union instruction_instance insn,
+			    struct displaced_step_closure *dsc,
+			    struct regcache *regs)
+{
+  /* LDM of a list of registers which includes PC.  Implement by
+     rewriting the list of registers to be transferred into a
+     contiguous chunk r0...rX before doing the transfer, then shuffling
+     registers into the correct places in the cleanup routine.  */
+  unsigned int regmask = dsc->u.block.regmask;
+  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
+  unsigned int to = 0, from = 0, i, new_rn;
+
+  for (i = 0; i < num_in_list; i++)
+    dsc->tmp[i] = displaced_read_reg (regs, from, i);
+
+  /* Writeback makes things complicated.  We need to avoid clobbering
+     the base register with one of the registers in our modified
+     register list, but just using a different register can't work in
+     all cases, e.g.:
+
+     ldm r14!, {r0-r13,pc}
+
+     which would need to be rewritten as:
+
+     ldm rN!, {r0-r14}
+
+     but that can't work, because there's no free register for N.
+
+     Solve this by turning off the writeback bit, and emulating
+     writeback manually in the cleanup routine.  */
+
+  if (dsc->u.block.writeback )
+    insn._32_bit &= ~(1 << 21);
+
+  new_regmask = (1 << num_in_list) - 1;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
+			"{..., pc}: original reg list %.4x, modified "
+			"list %.4x\n"), dsc->u.block.rn,
+					   dsc->u.block.writeback ? "!" : "",
+					   (int) dsc->u.block.regmask,
+					   new_regmask);
+
+  /* In Thumb encoding, bit 13 should be always zero.  */
+  if (displaced_in_arm_mode (regs))
+    new_regmask &= 0xffff;
+  else
+    new_regmask &= 0xdfff;
+
+  RECORD_ARM_MODE_INSN (0,
+			 ((insn._32_bit & ~0xffff) | (new_regmask & 0xffff)));
+
+  return 0;
+}
+
+static void cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
+			 struct displaced_step_closure *dsc);
+static int
+arm_copy_svc_helper (struct gdbarch *gdbarch, union instruction_instance insn,
+		     CORE_ADDR to, struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  CORE_ADDR from = dsc->insn_addr;
+
+  /* Allow OS-specific code to override SVC handling.  */
+  if (dsc->u.svc.copy_svc_os)
+    return dsc->u.svc.copy_svc_os (gdbarch, insn._32_bit, to, regs, dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
+			(unsigned long) insn._32_bit);
+
+  /* Preparation: none.
+     Insn: unmodified svc.
+     Cleanup: pc <- insn_addr + 4.  */
+
+  RECORD_ARM_MODE_INSN (0, insn._32_bit);
+
+  dsc->cleanup = &cleanup_svc;
+  /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
+     instruction.  */
+  dsc->wrote_to_pc = 1;
+
+  return 0;
+}
+
+
+/* Helper definition is done.  */
+
+
 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
    in user-level code (in particular exception return, ldm rn, {...pc}^).  */
 
 static int
-copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+copy_block_xfer (struct gdbarch *gdbarch, union instruction_instance insn,
+		 struct regcache *regs,
+		 copy_unmodified_helper copy_unmodified,
+		 copy_ldm_with_pc_helper copy_ldm_with_pc,
 		 struct displaced_step_closure *dsc)
 {
-  int load = bit (insn, 20);
-  int user = bit (insn, 22);
-  int increment = bit (insn, 23);
-  int before = bit (insn, 24);
-  int writeback = bit (insn, 21);
-  int rn = bits (insn, 16, 19);
   CORE_ADDR from = dsc->insn_addr;
 
-  /* Block transfers which don't mention PC can be run directly
-     out-of-line.  */
-  if (rn != 15 && (insn & 0x8000) == 0)
-    return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
-
-  if (rn == 15)
+  if (dsc->u.block.rn == 15)
     {
       warning (_("displaced: Unpredictable LDM or STM with "
 		 "base register r15"));
-      return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
+      return copy_unmodified (insn, "unpredictable ldm/stm", dsc);
     }
 
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
-			"%.8lx\n", (unsigned long) insn);
 
-  dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
-  dsc->u.block.rn = rn;
-
-  dsc->u.block.load = load;
-  dsc->u.block.user = user;
-  dsc->u.block.increment = increment;
-  dsc->u.block.before = before;
-  dsc->u.block.writeback = writeback;
-  dsc->u.block.cond = bits (insn, 28, 31);
-
-  dsc->u.block.regmask = insn & 0xffff;
-
-  if (load)
+  dsc->u.block.xfer_addr = displaced_read_reg (regs, from, dsc->u.block.rn);
+  if (dsc->u.block.load)
     {
-      if ((insn & 0xffff) == 0xffff)
+      if (dsc->u.block.regmask == 0xffff)
 	{
 	  /* LDM with a fully-populated register list.  This case is
 	     particularly tricky.  Implement for now by fully emulating the
 	     instruction (which might not behave perfectly in all cases, but
 	     these instructions should be rare enough for that not to matter
-	     too much).  */
+	     too much).  This case is only valid in ARM encoding, so no need
+	     to worry about Thumb encoding here.  */
 	  RECORD_ARM_MODE_INSN (0, ARM_NOP);
 
 	  dsc->cleanup = &cleanup_block_load_all;
 	}
       else
 	{
-	  /* LDM of a list of registers which includes PC.  Implement by
-	     rewriting the list of registers to be transferred into a
-	     contiguous chunk r0...rX before doing the transfer, then shuffling
-	     registers into the correct places in the cleanup routine.  */
-	  unsigned int regmask = insn & 0xffff;
-	  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
-	  unsigned int to = 0, from = 0, i, new_rn;
-
-	  for (i = 0; i < num_in_list; i++)
-	    dsc->tmp[i] = displaced_read_reg (regs, from, i);
-
-	  /* Writeback makes things complicated.  We need to avoid clobbering
-	     the base register with one of the registers in our modified
-	     register list, but just using a different register can't work in
-	     all cases, e.g.:
-
-	       ldm r14!, {r0-r13,pc}
-
-	     which would need to be rewritten as:
-
-	       ldm rN!, {r0-r14}
-
-	     but that can't work, because there's no free register for N.
-
-	     Solve this by turning off the writeback bit, and emulating
-	     writeback manually in the cleanup routine.  */
-
-	  if (writeback)
-	    insn &= ~(1 << 21);
-
-	  new_regmask = (1 << num_in_list) - 1;
-
-	  if (debug_displaced)
-	    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
-				"{..., pc}: original reg list %.4x, modified "
-				"list %.4x\n"), rn, writeback ? "!" : "",
-				(int) insn & 0xffff, new_regmask);
-
-	  RECORD_ARM_MODE_INSN (0,
-				 ((insn & ~0xffff) | (new_regmask & 0xffff)));
-
+	  copy_ldm_with_pc (insn, dsc, regs);
 	  dsc->cleanup = &cleanup_block_load_pc;
 	}
     }
@@ -6338,7 +6492,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	 Doing things this way has the advantage that we can auto-detect
 	 the offset of the PC write (which is architecture-dependent) in
 	 the cleanup routine.  */
-      RECORD_ARM_MODE_INSN (0, insn);
+      RECORD_ARM_MODE_INSN (0, insn._32_bit);
 
       dsc->cleanup = &cleanup_block_store_pc;
     }
@@ -6346,56 +6500,64 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  int load = bit (insn, 20);
+  int user = bit (insn, 22);
+  int increment = bit (insn, 23);
+  int before = bit (insn, 24);
+  int writeback = bit (insn, 21);
+  int rn = bits (insn, 16, 19);
+  union instruction_instance ii;
+
+  /* Block transfers which don't mention PC can be run directly
+     out-of-line.  */
+  if (rn != 15 && (insn & 0x8000) == 0)
+    return arm_copy_unmodified (insn, "ldm/stm", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
+			"%.8lx\n", (unsigned long) insn);
+
+  dsc->u.block.rn = rn;
+
+  dsc->u.block.load = load;
+  dsc->u.block.user = user;
+  dsc->u.block.increment = increment;
+  dsc->u.block.before = before;
+  dsc->u.block.writeback = writeback;
+
+  dsc->u.block.cond = bits (insn, 28, 31);
+  dsc->u.block.regmask = insn & 0xffff;
+
+  ii._32_bit = insn;
+  return copy_block_xfer (gdbarch, ii, regs, arm_copy_unmodified_helper,
+			  arm_copy_ldm_with_pc_helper, dsc);
+
+}
 /* Cleanup/copy SVC (SWI) instructions.  These two functions are overridden
    for Linux, where some SVC instructions must be treated specially.  */
 
 static void
 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
-	     struct displaced_step_closure *dsc)
+            struct displaced_step_closure *dsc)
 {
   CORE_ADDR from = dsc->insn_addr;
   CORE_ADDR resume_addr = from + 4;
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
-			"%.8lx\n", (unsigned long) resume_addr);
+                       "%.8lx\n", (unsigned long) resume_addr);
 
   displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
 }
 
-static int
-copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
-	  struct regcache *regs, struct displaced_step_closure *dsc)
-{
-  CORE_ADDR from = dsc->insn_addr;
-
-  /* Allow OS-specific code to override SVC handling.  */
-  if (dsc->u.svc.copy_svc_os)
-    return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
-			(unsigned long) insn);
-
-  /* Preparation: none.
-     Insn: unmodified svc.
-     Cleanup: pc <- insn_addr + 4.  */
-
-  RECORD_ARM_MODE_INSN (0, insn);
-
-  dsc->cleanup = &cleanup_svc;
-  /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
-     instruction.  */
-  dsc->wrote_to_pc = 1;
-
-  return 0;
-}
-
 /* Copy undefined instructions.  */
 
 static int
-copy_undef (struct gdbarch *gdbarch, uint32_t insn,
-	    struct displaced_step_closure *dsc)
+arm_copy_undef (uint32_t insn, struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog,
@@ -6410,12 +6572,12 @@ copy_undef (struct gdbarch *gdbarch, uint32_t insn,
 /* Copy unpredictable instructions.  */
 
 static int
-copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
+arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
 	     struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
-			"%.8lx\n", (unsigned long) insn);
+                       "%.8lx\n", (unsigned long) insn);
 
   RECORD_ARM_MODE_INSN (0, insn);
 
@@ -6434,54 +6596,54 @@ decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
   unsigned int rn = bits (insn, 16, 19);
 
   if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
-    return copy_unmodified (gdbarch, insn, "cps", dsc);
+    return arm_copy_unmodified (insn, "cps", dsc);
   else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
-    return copy_unmodified (gdbarch, insn, "setend", dsc);
+    return arm_copy_unmodified (insn, "setend", dsc);
   else if ((op1 & 0x60) == 0x20)
-    return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
+    return arm_copy_unmodified (insn, "neon dataproc", dsc);
   else if ((op1 & 0x71) == 0x40)
-    return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
+    return arm_copy_unmodified (insn, "neon elt/struct load/store", dsc);
   else if ((op1 & 0x77) == 0x41)
-    return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
+    return arm_copy_unmodified (insn, "unallocated mem hint", dsc);
   else if ((op1 & 0x77) == 0x45)
-    return copy_preload (gdbarch, insn, regs, dsc);  /* pli.  */
+    return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pli.  */
   else if ((op1 & 0x77) == 0x51)
     {
       if (rn != 0xf)
-	return copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
+	return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
       else
-	return copy_unpred (gdbarch, insn, dsc);
+	return arm_copy_unpred (gdbarch, insn, dsc);
     }
   else if ((op1 & 0x77) == 0x55)
-    return copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
+    return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
   else if (op1 == 0x57)
     switch (op2)
       {
-      case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
-      case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
-      case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
-      case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
-      default: return copy_unpred (gdbarch, insn, dsc);
+      case 0x1: return arm_copy_unmodified (insn, "clrex", dsc);
+      case 0x4: return arm_copy_unmodified (insn, "dsb", dsc);
+      case 0x5: return arm_copy_unmodified (insn, "dmb", dsc);
+      case 0x6: return arm_copy_unmodified (insn, "isb", dsc);
+      default: return arm_copy_unpred (gdbarch, insn, dsc);
       }
   else if ((op1 & 0x63) == 0x43)
-    return copy_unpred (gdbarch, insn, dsc);
+    return arm_copy_unpred (gdbarch, insn, dsc);
   else if ((op2 & 0x1) == 0x0)
     switch (op1 & ~0x80)
       {
       case 0x61:
-	return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
+	return arm_copy_unmodified (insn, "unallocated mem hint", dsc);
       case 0x65:
-	return copy_preload_reg (gdbarch, insn, regs, dsc);  /* pli reg.  */
+	return arm_copy_preload_reg (gdbarch, insn, regs, dsc);  /* pli reg.  */
       case 0x71: case 0x75:
         /* pld/pldw reg.  */
-	return copy_preload_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
       case 0x63: case 0x67: case 0x73: case 0x77:
-	return copy_unpred (gdbarch, insn, dsc);
+	return arm_copy_unpred (gdbarch, insn, dsc);
       default:
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (insn, dsc);
       }
   else
-    return copy_undef (gdbarch, insn, dsc);  /* Probably unreachable.  */
+    return arm_copy_undef (insn, dsc);  /* Probably unreachable.  */
 }
 
 static int
@@ -6495,26 +6657,28 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
   else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
     {
     case 0x0: case 0x2:
-      return copy_unmodified (gdbarch, insn, "srs", dsc);
+      return arm_copy_unmodified (insn, "srs", dsc);
 
     case 0x1: case 0x3:
-      return copy_unmodified (gdbarch, insn, "rfe", dsc);
+      return arm_copy_unmodified (insn, "rfe", dsc);
 
     case 0x4: case 0x5: case 0x6: case 0x7:
-      return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+      return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
 
     case 0x8:
       switch ((insn & 0xe00000) >> 21)
 	{
 	case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
 	  /* stc/stc2.  */
-	  return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	  return arm_copy_copro_load_store_helper (gdbarch,
+						   (union instruction_instance)insn,
+						   regs, dsc);
 
 	case 0x2:
-	  return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
+	  return arm_copy_unmodified (insn, "mcrr/mcrr2", dsc);
 
 	default:
-	  return copy_undef (gdbarch, insn, dsc);
+	  return arm_copy_undef (insn, dsc);
 	}
 
     case 0x9:
@@ -6524,46 +6688,46 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 	  {
 	  case 0x1: case 0x3:
 	    /* ldc/ldc2 imm (undefined for rn == pc).  */
-	    return rn_f ? copy_undef (gdbarch, insn, dsc)
-			: copy_copro_load_store (gdbarch, insn, regs, dsc);
+	    return rn_f ? arm_copy_undef (insn, dsc)
+			: arm_copy_copro_load_store_helper (gdbarch, (union instruction_instance)insn, regs, dsc);
 
 	  case 0x2:
-	    return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
+	    return arm_copy_unmodified (insn, "mrrc/mrrc2", dsc);
 
 	  case 0x4: case 0x5: case 0x6: case 0x7:
 	    /* ldc/ldc2 lit (undefined for rn != pc).  */
-	    return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
-			: copy_undef (gdbarch, insn, dsc);
+	    return rn_f ? arm_copy_copro_load_store_helper (gdbarch, (union instruction_instance)insn, regs, dsc)
+			: arm_copy_undef (insn, dsc);
 
 	  default:
-	    return copy_undef (gdbarch, insn, dsc);
+	    return arm_copy_undef (insn, dsc);
 	  }
       }
 
     case 0xa:
-      return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
+      return arm_copy_unmodified (insn, "stc/stc2", dsc);
 
     case 0xb:
       if (bits (insn, 16, 19) == 0xf)
         /* ldc/ldc2 lit.  */
-	return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	return arm_copy_copro_load_store_helper (gdbarch, (union instruction_instance)insn, regs, dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (insn, dsc);
 
     case 0xc:
       if (bit (insn, 4))
-	return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
+	return arm_copy_unmodified (insn, "mcr/mcr2", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (insn, "cdp/cdp2", dsc);
 
     case 0xd:
       if (bit (insn, 4))
-	return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
+	return arm_copy_unmodified (insn, "mrc/mrc2", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (insn, "cdp/cdp2", dsc);
 
     default:
-      return copy_undef (gdbarch, insn, dsc);
+      return arm_copy_undef (insn, dsc);
     }
 }
 
@@ -6581,42 +6745,41 @@ decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
   switch (op2)
     {
     case 0x0:
-      return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
+      return arm_copy_unmodified (insn, "mrs/msr", dsc);
 
     case 0x1:
       if (op == 0x1)  /* bx.  */
-	return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
       else if (op == 0x3)
-	return copy_unmodified (gdbarch, insn, "clz", dsc);
+	return arm_copy_unmodified (insn, "clz", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (insn, dsc);
 
     case 0x2:
       if (op == 0x1)
         /* Not really supported.  */
-	return copy_unmodified (gdbarch, insn, "bxj", dsc);
+	return arm_copy_unmodified (insn, "bxj", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (insn, dsc);
 
     case 0x3:
-      if (op == 0x1)
-	return copy_bx_blx_reg (gdbarch, insn,
-				regs, dsc);  /* blx register.  */
+      if (op == 0x1) /* blx register.  */
+	return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (insn, dsc);
 
     case 0x5:
-      return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
+      return arm_copy_unmodified (insn, "saturating add/sub", dsc);
 
     case 0x7:
       if (op == 0x1)
-	return copy_unmodified (gdbarch, insn, "bkpt", dsc);
+	return arm_copy_unmodified (insn, "bkpt", dsc);
       else if (op == 0x3)
         /* Not really supported.  */
-	return copy_unmodified (gdbarch, insn, "smc", dsc);
+	return arm_copy_unmodified (insn, "smc", dsc);
 
     default:
-      return copy_undef (gdbarch, insn, dsc);
+      return arm_copy_undef (insn, dsc);
     }
 }
 
@@ -6628,13 +6791,13 @@ decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
     switch (bits (insn, 20, 24))
       {
       case 0x10:
-	return copy_unmodified (gdbarch, insn, "movw", dsc);
+	return arm_copy_unmodified (insn, "movw", dsc);
 
       case 0x14:
-	return copy_unmodified (gdbarch, insn, "movt", dsc);
+	return arm_copy_unmodified (insn, "movt", dsc);
 
       case 0x12: case 0x16:
-	return copy_unmodified (gdbarch, insn, "msr imm", dsc);
+	return arm_copy_unmodified (insn, "msr imm", dsc);
 
       default:
 	return copy_alu_imm (gdbarch, insn, regs, dsc);
@@ -6644,17 +6807,17 @@ decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
       uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
 
       if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
-	return copy_alu_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
-	return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
 	return decode_miscellaneous (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
-	return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
+	return arm_copy_unmodified (insn, "halfword mul/mla", dsc);
       else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
-	return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
+	return arm_copy_unmodified (insn, "mul/mla", dsc);
       else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
-	return copy_unmodified (gdbarch, insn, "synch", dsc);
+	return arm_copy_unmodified (insn, "synch", dsc);
       else if (op2 == 0xb || (op2 & 0xd) == 0xd)
 	/* 2nd arg means "unpriveleged".  */
 	return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
@@ -6676,28 +6839,28 @@ decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
 
   if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
       || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
   else if ((!a && (op1 & 0x17) == 0x02)
 	    || (a && (op1 & 0x17) == 0x02 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
   else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
 	    || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
   else if ((!a && (op1 & 0x17) == 0x03)
 	   || (a && (op1 & 0x17) == 0x03 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
   else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
 	    || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
   else if ((!a && (op1 & 0x17) == 0x06)
 	   || (a && (op1 & 0x17) == 0x06 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
   else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
 	   || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
   else if ((!a && (op1 & 0x17) == 0x07)
 	   || (a && (op1 & 0x17) == 0x07 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
 
   /* Should be unreachable.  */
   return 1;
@@ -6710,49 +6873,49 @@ decode_media (struct gdbarch *gdbarch, uint32_t insn,
   switch (bits (insn, 20, 24))
     {
     case 0x00: case 0x01: case 0x02: case 0x03:
-      return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
+      return arm_copy_unmodified (insn, "parallel add/sub signed", dsc);
 
     case 0x04: case 0x05: case 0x06: case 0x07:
-      return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
+      return arm_copy_unmodified (insn, "parallel add/sub unsigned", dsc);
 
     case 0x08: case 0x09: case 0x0a: case 0x0b:
     case 0x0c: case 0x0d: case 0x0e: case 0x0f:
-      return copy_unmodified (gdbarch, insn,
+      return arm_copy_unmodified (insn,
 			      "decode/pack/unpack/saturate/reverse", dsc);
 
     case 0x18:
       if (bits (insn, 5, 7) == 0)  /* op2.  */
 	 {
 	  if (bits (insn, 12, 15) == 0xf)
-	    return copy_unmodified (gdbarch, insn, "usad8", dsc);
+	    return arm_copy_unmodified (insn, "usad8", dsc);
 	  else
-	    return copy_unmodified (gdbarch, insn, "usada8", dsc);
+	    return arm_copy_unmodified (insn, "usada8", dsc);
 	}
       else
-	 return copy_undef (gdbarch, insn, dsc);
+	 return arm_copy_undef (insn, dsc);
 
     case 0x1a: case 0x1b:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
-	return copy_unmodified (gdbarch, insn, "sbfx", dsc);
+	return arm_copy_unmodified (insn, "sbfx", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (insn, dsc);
 
     case 0x1c: case 0x1d:
       if (bits (insn, 5, 6) == 0x0)  /* op2[1:0].  */
 	 {
 	  if (bits (insn, 0, 3) == 0xf)
-	    return copy_unmodified (gdbarch, insn, "bfc", dsc);
+	    return arm_copy_unmodified (insn, "bfc", dsc);
 	  else
-	    return copy_unmodified (gdbarch, insn, "bfi", dsc);
+	    return arm_copy_unmodified (insn, "bfi", dsc);
 	}
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (insn, dsc);
 
     case 0x1e: case 0x1f:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
-	return copy_unmodified (gdbarch, insn, "ubfx", dsc);
+	return arm_copy_unmodified (insn, "ubfx", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (insn, dsc);
     }
 
   /* Should be unreachable.  */
@@ -6764,53 +6927,49 @@ decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
 		    struct regcache *regs, struct displaced_step_closure *dsc)
 {
   if (bit (insn, 25))
-    return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+    return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
   else
-    return copy_block_xfer (gdbarch, insn, regs, dsc);
+    return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
 }
 
 static int
-decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
-{
-  unsigned int opcode = bits (insn, 20, 24);
+decode_svc_copro (struct gdbarch *gdbarch, union instruction_instance insn,
+		  copy_unmodified_helper copy_unmodified,
+		  copy_copro_load_store_helper copy_copro_load_store,
+		  copy_undef_helper copy_undef,
+		  copy_svc_helper copy_svc,
+		  struct regcache *regs, struct displaced_step_closure *dsc,
+		  unsigned int ops[])
+{
+  unsigned int op1 = ops[0];
+  unsigned int op = ops[1];
+  unsigned int coproc = ops[2];
+  unsigned int opcode = ops[3];
 
-  switch (opcode)
+  if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
     {
-    case 0x04: case 0x05:  /* VFP/Neon mrrc/mcrr.  */
-      return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
+      switch (opcode)
+	{
+	case 0x04: case 0x05:  /* VFP/Neon mrrc/mcrr.  */
+	  return copy_unmodified (insn, "vfp/neon mrrc/mcrr", dsc);
 
-    case 0x08: case 0x0a: case 0x0c: case 0x0e:
-    case 0x12: case 0x16:
-      return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
+	case 0x08: case 0x0a: case 0x0c: case 0x0e:
+	case 0x12: case 0x16:
+	  return copy_unmodified (insn, "vfp/neon vstm/vpush", dsc);
 
-    case 0x09: case 0x0b: case 0x0d: case 0x0f:
-    case 0x13: case 0x17:
-      return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
+	case 0x09: case 0x0b: case 0x0d: case 0x0f:
+	case 0x13: case 0x17:
+	  return copy_unmodified (insn, "vfp/neon vldm/vpop", dsc);
 
-    case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
-    case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
-      /* Note: no writeback for these instructions.  Bit 25 will always be
-	 zero though (via caller), so the following works OK.  */
-      return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
+	case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
+	  /* Note: no writeback for these instructions.  Bit 25 will always be
+	     zero though (via caller), so the following works OK.  */
+	  return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	}
+      /* Should be unreachable.  */
+      return 1;
     }
-
-  /* Should be unreachable.  */
-  return 1;
-}
-
-static int
-decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
-		  struct regcache *regs, struct displaced_step_closure *dsc)
-{
-  unsigned int op1 = bits (insn, 20, 25);
-  int op = bit (insn, 4);
-  unsigned int coproc = bits (insn, 8, 11);
-  unsigned int rn = bits (insn, 16, 19);
-
-  if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
-    return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
 	   && (coproc & 0xe) != 0xa)
     /* stc/stc2.  */
@@ -6819,31 +6978,54 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 	   && (coproc & 0xe) != 0xa)
     /* ldc/ldc2 imm/lit.  */
     return copy_copro_load_store (gdbarch, insn, regs, dsc);
+  else if ((op1 & 0x30) == 0x30)
+    return copy_svc (gdbarch, insn, dsc->scratch_base, regs, dsc);
+
   else if ((op1 & 0x3e) == 0x00)
-    return copy_undef (gdbarch, insn, dsc);
+    return copy_undef (insn, dsc);
   else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
-    return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
+    return copy_unmodified (insn, "neon 64bit xfer", dsc);
   else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
+    return copy_unmodified (insn, "mcrr/mcrr2", dsc);
   else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
+    return copy_unmodified (insn, "mrrc/mrrc2", dsc);
   else if ((op1 & 0x30) == 0x20 && !op)
     {
       if ((coproc & 0xe) == 0xa)
-	return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
+	return copy_unmodified (insn, "vfp dataproc", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return copy_unmodified (insn, "cdp/cdp2", dsc);
     }
   else if ((op1 & 0x30) == 0x20 && op)
-    return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
+    return copy_unmodified (insn, "neon 8/16/32 bit xfer", dsc);
   else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
+    return copy_unmodified (insn, "mcr/mcr2", dsc);
   else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
-  else if ((op1 & 0x30) == 0x30)
-    return copy_svc (gdbarch, insn, to, regs, dsc);
+    return copy_unmodified (insn, "mrc/mrc2", dsc);
   else
-    return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
+    return copy_undef (insn, dsc);  /* Possibly unreachable.  */
+}
+
+static int
+arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn,
+		      struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int ops[4];
+  union instruction_instance ii;
+  unsigned int rn = bits (insn, 16, 19);
+
+  ops[0] = bits (insn, 20, 25);
+  ops[1] = bit (insn, 4);
+  ops[2] = bits (insn, 8, 11);
+  ops[3] = bits (insn, 20, 24);
+
+
+  ii._32_bit = insn;
+
+  return decode_svc_copro (gdbarch, ii, arm_copy_unmodified_helper,
+			   arm_copy_copro_load_store_helper,
+			   arm_copy_undef_helper, arm_copy_svc_helper,
+			  regs, dsc, ops);
 }
 
 static void
@@ -6903,7 +7085,7 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
       break;
 
     case 0xc: case 0xd: case 0xe: case 0xf:
-      err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
+      err = arm_decode_svc_copro (gdbarch, insn, regs, dsc);
       break;
     }
 
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Displaced stepping 0003: for 16-bit Thumb instructions
  2011-02-17 20:55   ` Ulrich Weigand
  2011-02-18  7:30     ` Yao Qi
@ 2011-02-28  2:04     ` Yao Qi
  1 sibling, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-02-28  2:04 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 5124 bytes --]

On 02/18/2011 04:11 AM, Ulrich Weigand wrote:
>> > @@ -4338,10 +4341,15 @@ displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
>> >  
>> >    if (regno == 15)
>> >      {
>> > +      if (displaced_in_arm_mode (regs))
>> > +	from += 8;
>> > +      else
>> > +	from += 6;
> I think the 6 is wrong, it should be 4.  From the ARM manual:
> 
> - When executing an ARM instruction, PC reads as the address of the
>   current instruction plus 8.
> - When executing a Thumb instruction, PC reads as the address of the
>   current instruction plus 4.
> 

Oh, yes.  Fixed.


>> > +/* Clean up branch instructions (actually perform the branch, by setting
>> > +   PC).  */
>> > +static void
>> > +cleanup_branch(struct gdbarch *gdbarch, struct regcache *regs,
>> > +	       struct displaced_step_closure *dsc)
>> > +{
>> > +  ULONGEST from = dsc->insn_addr;
>> > +  uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
>> > +  int branch_taken = condition_true (dsc->u.branch.cond, status);
>> > +
>> > +  cleanup_branch_1 (gdbarch, regs, dsc, branch_taken);
>> > +}
>> > +
>> > +static void
>> > +cleanup_cbz_cbnz(struct gdbarch *gdbarch, struct regcache *regs,
>> > +	       struct displaced_step_closure *dsc)
>> > +{
>> > +  cleanup_branch_1 (gdbarch, regs, dsc, dsc->u.branch.cond);
>> > +}
> I think this is unnecessary: copy_cbz_cnbz ought to be able to use
> cleanup_branch as-is.  If the branch is taken, it should just set
> dsc->u.branch.cond to INSN_AL; if the branch is not taken, it
> should simply not use any cleanup at all since no further action
> is required.
> 

Done as you suggested.

>> > @@ -4718,6 +4752,40 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
>> >  
>> >       B<cond> similar, but don't set r14 in cleanup.  */
>> >  
>> > +
>> > +  dsc->u.branch.cond = cond;
>> > +  dsc->u.branch.link = link;
>> > +  dsc->u.branch.exchange = exchange;
>> > +
>> > +  if (arm_pc_is_thumb (gdbarch, from))
> You should never use arm_pc_is_thumb here; the heuristics it applies are
> completely unnecessary, since we know in which mode we are, and may just
> result in the wrong outcome.
> 
> In any case, as discussed above, this ought to be two separate copy
> routines, one for ARM mode and one for Thumb mode anyway.
> 

Yes, it is separated to two routines, arm_copy_b_bl_blx and
thumb2_copy_b_bl_blx.

>> > +    {
>> > +      /* Plus the size of THUMB_NOP and B/BL/BLX.  */
>> > +      dsc->u.branch.dest = from + 2 + 4 + offset;
>> > +      RECORD_MOD_16BIT_INSN (0, THUMB_NOP);
> The + 2 doesn't look right to me.   The offset is relative to the
> PC, which is -see above- "from + 8" in ARM mode and "from + 4" in
> Thumb mode.  I don't see how the size of the THUMB_NOP is involved
> at all here ...
> 

Oh, yes.  Fixed.

>> > +static int
>> > +thumb_decode_dp (struct gdbarch *gdbarch, unsigned short insn,
>> > +		 struct displaced_step_closure *dsc)
>> > +{
>> > +  /* 16-bit data-processing insns are not related to PC.  */
>> > +  return thumb_copy_unmodified_16bit (gdbarch, insn,"data-processing", dsc);
>> > +}
> This doesn't need to be a separate function, I guess ...
> 

OK, fixed.

>> > +  /* ADDS Rd, #imm8 */
>> > +  RECORD_MOD_32BIT_INSN (0, 0x3000 | (rd << 8) | imm8);
> Should be 16BIT (but see my earlier mail on the usefulness of
> those macros in the first place ...).
> 

Fixed.

>> > +static void
>> > +thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch,
>> > +				    unsigned short insn1, CORE_ADDR to,
> I don't think this needs TO.
> 

Removed parameter TO.

>> > +	    unsigned short op = bits (insn1, 7, 9);
>> > +	    if (op == 6 || op == 7) /* BX or BLX */
>> > +	      err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
>> > +	    else
>> > +	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
>> > +						 dsc);
> These include the ADD / MOV / CMP high register instructions, which
> can access the PC, so they'd need special treatment
> 

They are processed by thumb_copy_alu_reg now.

>> > +	switch (bits (insn1, 8, 11))
>> > +	  {
>> > +	  case 1: case 3:  case 9: case 11: /* CBNZ, CBZ */
>> > +	    err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
>> > +	    break;
>> > +	  default:
>> > +	    err = thumb_copy_unmodified_16bit (gdbarch, insn1,"", dsc);
> Hmm, what about IT ?
> 

So far, I don't have a good idea on support IT for displaced stepping.
I may need more time to think of this.

>> > +    case 13: /* Conditional branch and supervisor call */
>> > +      if (bits (insn1, 9, 11) != 7) /* conditional branch */
>> > +	err = thumb_copy_b (gdbarch, insn1, dsc);
>> > +      else
>> > +	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"svc", dsc);
> There is special handling in arm-linux-tdep.c for ARM SVC instructions.
> Don't we need this for Thumb SVC's as well?
> 

Yes.  We need that.  SVC and Linux related stuff is new to me.  I
suggest this patch can be in first if it is OK.  I'll think of 'svc
stuff' later.

>> > +
>> > +  if ((bits (insn1, 13, 15) == 7) && (bits (insn1, 11, 12)))
> You should just use thumb_insn_size ...
> 

Fixed.

-- 
Yao (齐尧)

[-- Attachment #2: 0003-displaced-stepping-for-thumb-16-bit-insn.patch --]
[-- Type: text/x-patch, Size: 14742 bytes --]

From 894283a4b306230db70f4df0182b6995778007ea Mon Sep 17 00:00:00 2001
From: Yao Qi <yao@codesourcery.com>
Date: Sat, 26 Feb 2011 14:57:33 +0800
Subject: [PATCH 3/4] displaced stepping for thumb 16-bit insn
 don't support IT in thumb-16bit

---
 gdb/arm-tdep.c |  436 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 435 insertions(+), 1 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 2d06d8e..269c583 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5332,6 +5332,23 @@ arm_copy_unmodified (uint32_t insn, const char *iname,
   return 0;
 }
 
+/* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
+   modification.  */
+static int
+thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
+			     const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
+			"opcode/class '%s' unmodified\n", insn,
+			iname);
+
+  RECORD_THUMB_MODE_INSN (0, insn);
+
+  return 0;
+}
+
 /* Preload instructions with immediate offset.  */
 
 static void
@@ -5558,6 +5575,45 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return copy_b_bl_blx (gdbarch, cond, exchange, link, offset, regs, dsc);
 }
 
+/* Copy B Thumb instructions.  */
+static int
+thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
+	      struct displaced_step_closure *dsc)
+{
+  unsigned int cond = 0;
+  int offset = 0;
+  unsigned short bit_12_15 = bits (insn, 12, 15);
+  CORE_ADDR from = dsc->insn_addr;
+
+  if (bit_12_15 == 0xd)
+    {
+      offset = sbits (insn, 0, 7);
+      cond = bits (insn, 8, 11);
+    }
+  else if (bit_12_15 == 0xe)
+    {
+       offset = sbits (insn, 0, 10);
+       cond = INST_AL;
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying b immediate insn %.4x "
+			"with offset %d\n", insn, offset);
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+  dsc->u.branch.dest = from + 4 + offset;
+
+  RECORD_THUMB_MODE_INSN (0, THUMB_NOP);
+
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+
 /* Copy BX/BLX with register-specified destinations.  */
 
 static int
@@ -5609,6 +5665,25 @@ arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
   return copy_bx_blx_reg (gdbarch, cond, link, rm, regs, dsc);
 }
 
+static int
+thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
+		       struct regcache *regs,
+		       struct displaced_step_closure *dsc)
+{
+  int link = bit (insn, 7);
+  unsigned int rm = bits (insn, 3, 6);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
+			(unsigned short) insn);
+
+  /* Always true for thumb.  */
+  dsc->u.branch.cond = INST_AL;
+  RECORD_THUMB_MODE_INSN (0, THUMB_NOP);
+
+  return copy_bx_blx_reg (gdbarch, INST_AL, link, rm, regs, dsc);
+}
+
 /* Copy/cleanup arithmetic/logic instruction with immediate RHS.  */
 
 static void
@@ -5753,6 +5828,31 @@ arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return copy_alu_reg (gdbarch, regs, dsc, reg_ids);
 }
 
+static int
+thumb_copy_alu_reg (struct gdbarch *gdbarch, unsigned short insn,
+	    struct regcache *regs,
+	    struct displaced_step_closure *dsc)
+{
+  unsigned int reg_ids[3];
+  ULONGEST rd_val, rn_val;
+  CORE_ADDR from = dsc->insn_addr;
+
+  reg_ids[1] = (bit (insn, 7) << 3) | bits (insn, 0, 2);
+  reg_ids[0] = bits (insn, 3, 6);
+  reg_ids[2] = 2;
+
+  if (reg_ids[0] != ARM_PC_REGNUM && reg_ids[1] != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_16bit(gdbarch, insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
+			"ALU", (unsigned short) insn);
+
+  RECORD_THUMB_MODE_INSN (0, ((insn & 0xff00) | 0x08));
+
+  return copy_alu_reg (gdbarch, regs, dsc, reg_ids);
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -7028,12 +7128,346 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn,
 			  regs, dsc, ops);
 }
 
+static int
+copy_pc_relative (struct regcache *regs, struct displaced_step_closure *dsc,
+		  int rd, unsigned int imm, int is_32bit)
+{
+  int val;
+
+    /* ADR Rd, #imm
+
+     Rewrite as:
+
+     Preparation: Rd <- PC
+     Insn: ADD Rd, #imm
+     Cleanup: Null.
+   */
+
+  /* Rd <- PC */
+  val = displaced_read_reg (regs, dsc->insn_addr, ARM_PC_REGNUM);
+  displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
+
+  if (is_32bit)
+    {
+      /* Encoding T3: ADDS Rd, Rd, #imm */
+      RECORD_THUMB_MODE_INSN (0, 0xf100 | rd);
+      RECORD_THUMB_MODE_INSN (1, 0x0 | (rd << 8) | imm);
+
+      dsc->numinsns = 2;
+    }
+  else
+    /* Encoding T2: ADDS Rd, #imm */
+    RECORD_THUMB_MODE_INSN (0, 0x3000 | (rd << 8) | imm);
+
+  return 0;
+}
+
+static int
+thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, unsigned short insn,
+				struct regcache *regs,
+				struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn, 8, 10);
+  unsigned int imm8 = bits (insn, 0, 7);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x\n",
+			rd, imm8, insn);
+
+  return copy_pc_relative (regs, dsc, rd, imm8, 0);
+}
+
+static int
+thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
+			      struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rt = bits (insn1, 8, 7);
+  unsigned int pc;
+  int imm8 = sbits (insn1, 0, 7);
+  CORE_ADDR from = dsc->insn_addr;
+
+  /* LDR Rd, #imm8
+
+     Rwrite as:
+
+     Preparation: tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
+                  if (Rd is not R0) tmp0 <- R0;
+     Insn: LDR R0, [R2, R3];
+     Cleanup: R2 <- tmp2, R3 <- tmp3,
+              if (Rd is not R0) Rd <- R0, R0 <- tmp0 */
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying thumb ldr literal "
+			"insn %.4x\n", insn1);
+
+  dsc->tmp[0] = displaced_read_reg (regs, from, 0);
+  dsc->tmp[2] = displaced_read_reg (regs, from, 2);
+  dsc->tmp[3] = displaced_read_reg (regs, from, 3);
+  pc = displaced_read_reg (regs, from, ARM_PC_REGNUM);
+
+  displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
+
+  dsc->rd = rt;
+  dsc->u.ldst.xfersize = 4;
+  dsc->u.ldst.rn = 0;
+  dsc->u.ldst.immed = 0;
+  dsc->u.ldst.writeback = 0;
+  dsc->u.ldst.restore_r4 = 0;
+
+  RECORD_THUMB_MODE_INSN (0, 0x58d0); /* ldr r0, [r2, r3]*/
+
+  dsc->cleanup = &cleanup_load;
+
+  return 0;
+}
+
+/* Copy Thumb cbnz/cbz insruction.  */
+
+static int
+thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, unsigned short insn1,
+		     struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  int non_zero = bit (insn1, 11);
+  unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
+  CORE_ADDR from = dsc->insn_addr;
+  int rn = bits (insn1, 0, 2);
+  int rn_val = displaced_read_reg (regs, from, rn);
+
+  dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
+  /* CBNZ and CBZ do not affect the condition flags.  If condition is true,
+     set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
+     condition is false, let it be, cleanup_branch will do nothing.  */
+  if (dsc->u.branch.cond)
+    dsc->u.branch.cond = INST_AL;
+
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+
+  dsc->u.branch.dest = from + 2 + imm5;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
+			" insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
+			rn, rn_val, insn1, dsc->u.branch.dest);
+
+  RECORD_THUMB_MODE_INSN (0, THUMB_NOP);
+
+  dsc->cleanup = &cleanup_branch;
+  return 0;
+}
+
+static void
+cleanup_pop_pc_16bit(struct gdbarch *gdbarch, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  CORE_ADDR from = dsc->insn_addr;
+  int rx = dsc->u.block.regmask ? 8 : 0;
+  int rx_val = displaced_read_reg (regs, from, rx);
+
+  displaced_write_reg (regs, dsc, ARM_PC_REGNUM, rx_val, BX_WRITE_PC);
+  displaced_write_reg (regs, dsc, rx, dsc->tmp[0], CANNOT_WRITE_PC);
+}
+
+static int
+thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
+			 struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  CORE_ADDR from = dsc->insn_addr;
+
+  dsc->u.block.regmask = insn1 & 0x00ff;
+
+  /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
+     to :
+
+     (1) register list is not empty,
+     Prepare: tmp[0] <- r8,
+
+     POP {rX};   PC is stored in rX
+     MOV r8, rX; finally, PC is stored in r8
+     POP {rX, rY, ...., rZ}
+
+     Cleanup: PC <-r8, r8 <- tmp[0]
+
+     (2) register list is empty,
+     Prepare: tmp[0] <- r0,
+
+     POP {r0}
+
+     Cleanup: PC <- r0, r0 <- tmp[0]
+  */
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
+			dsc->u.block.regmask, insn1);
+
+  if (dsc->u.block.regmask != 0)
+    {
+      int rx = 0;
+
+       dsc->tmp[0] = displaced_read_reg (regs, from, 8);
+
+      /* Look for the first register in register list.  */
+      for (rx = 0; rx < 8; rx++)
+	if (dsc->u.block.regmask & (1 << rx))
+	  break;
+
+      RECORD_THUMB_MODE_INSN (0, 0xbc00 | (1 << rx)); /* POP {rX} */
+      RECORD_THUMB_MODE_INSN (1, 0x4680 | (rx << 3)); /* MOV r8, rX */
+      RECORD_THUMB_MODE_INSN (2, insn1 & 0xfeff);     /* POP {rX, rY, ..., rZ} */
+      /* RECORD_THUMB_MODE_INSN (3, 0x46c7); */            /* MOV PC, r8 */
+
+      dsc->numinsns = 3;
+    }
+  else
+    {
+      dsc->tmp[0] = displaced_read_reg (regs, from, 0);
+
+      RECORD_THUMB_MODE_INSN (0, 0xbc00); /* POP {r0} */
+      /* RECORD_THUMB_MODE_INSN (1, 0x4683); */ /* MOV PC, r0 */
+
+      dsc->numinsns = 1;
+    }
+
+  dsc->cleanup = &cleanup_pop_pc_16bit;
+  return 0;
+}
+
+static void
+thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch,
+				    unsigned short insn1, struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  unsigned short op_bit_12_15 = bits (insn1, 12, 15);
+  unsigned short op_bit_10_11 = bits (insn1, 10, 11);
+  int err = 0;
+
+  /* 16-bit thumb instructions.  */
+  switch (op_bit_12_15)
+    {
+      /* Shift (imme), add, subtract, move and compare*/
+    case 0: case 1: case 2: case 3:
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"shift/add/sub/mov/cmp",
+					 dsc);
+      break;
+    case 4:
+      switch (op_bit_10_11)
+	{
+	case 0: /* Data-processing */
+	  err = thumb_copy_unmodified_16bit (gdbarch, insn1,"data-processing",
+					     dsc);
+	  break;
+	case 1: /* Special data instructions and branch and exchange */
+	  {
+	    unsigned short op = bits (insn1, 7, 9);
+	    if (op == 6 || op == 7) /* BX or BLX */
+	      err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
+	    else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers.  */
+	      err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
+						 dsc);
+	  }
+	  break;
+	default: /* LDR (literal) */
+	  err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
+	}
+      break;
+    case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldr/str", dsc);
+      break;
+    case 10:
+      if (op_bit_10_11 < 2) /* Generate PC-relative address */
+	err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
+      else /* Generate SP-relative address */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"sp-relative", dsc);
+      break;
+    case 11: /* Misc 16-bit instructions */
+      {
+	switch (bits (insn1, 8, 11))
+	  {
+	  case 1: case 3:  case 9: case 11: /* CBNZ, CBZ */
+	    err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
+	    break;
+	  case 12: case 13: /* POP */
+	    if (bit (insn1, 8)) /* PC is in register list.  */
+	      {
+		err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
+	      }
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"pop", dsc);
+	    break;
+	  case 15: /* If-Then, and hints */
+	    if (bits (insn1, 0, 3))
+	      err = 1; /* Not supported If-Then */
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"hints", dsc);
+	    break;
+	  default:
+	    err = thumb_copy_unmodified_16bit (gdbarch, insn1,"misc", dsc);
+	  }
+      }
+      break;
+    case 12:
+      if (op_bit_10_11 < 2) /* Store multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"stm", dsc);
+      else /* Load multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldm", dsc);
+      break;
+    case 13: /* Conditional branch and supervisor call */
+      if (bits (insn1, 9, 11) != 7) /* conditional branch */
+	err = thumb_copy_b (gdbarch, insn1, dsc);
+      else
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"svc", dsc);
+      break;
+    case 14: /* Unconditional branch */
+      err = thumb_copy_b (gdbarch, insn1, dsc);
+      break;
+    default:
+      internal_error (__FILE__, __LINE__,
+		      _("thumb_process_displaced_insn: Instruction decode error"));
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_insn: Instruction decode error"));
+}
+
+static void
+thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
+				    uint16_t insn2, struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+}
+
 static void
 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 			      CORE_ADDR to, struct regcache *regs,
 			      struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode"));
+  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  unsigned short insn1
+    = read_memory_unsigned_integer (from, 2, byte_order_for_code);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
+			"at %.8lx\n", insn1, (unsigned long) from);
+
+  dsc->is_thumb = 1;
+  dsc->insn_size = thumb_insn_size (insn1);
+  if (thumb_insn_size (insn1) == 4)
+    {
+      unsigned short insn2
+	= read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
+      thumb_process_displaced_32bit_insn(gdbarch, insn1, insn2, regs, dsc);
+    }
+  else
+    thumb_process_displaced_16bit_insn(gdbarch, insn1, regs, dsc);
 }
 
 void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Displaced stepping 0004: wip: 32-bit Thumb instructions
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
                   ` (6 preceding siblings ...)
  2011-02-26 17:50 ` Displaced stepping 0002: refactor and create some copy helpers Yao Qi
@ 2011-02-28  2:15 ` Yao Qi
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
  8 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-02-28  2:15 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 589 bytes --]

This is the last one.  Subject tells everything, however this patch is
*not* for review, but for helping you to understand my design of the
first 3 patches,

0001: refactor displaced stepping to handle 32-bit and 16-bit,
http://sourceware.org/ml/gdb-patches/2011-02/msg00790.html
0002: refactor and create some copy helpers
http://sourceware.org/ml/gdb-patches/2011-02/msg00792.html
0003: for 16-bit Thumb instructions
http://sourceware.org/ml/gdb-patches/2011-02/msg00866.html

There are still some problems in this patch, and I still need some time
to polish it.

-- 
Yao (齐尧)

[-- Attachment #2: 0004-support-thumb-32-bit-insn.patch --]
[-- Type: text/x-patch, Size: 23171 bytes --]


	* arm-tdep.c (thumb_copy_unmodified_32bit): New.
	(thumb2_copy_preload, thumb2_copy_preload_reg): New.
	(thumb2_copy_b_bl_blx, thumb2_copy_alu_reg): New.
	(thumb2_copy_alu_shifted_reg): New.
	(thumb2_copy_ldr_str_ldrb_strb): New.
	(thumb_32bit_copy_undef): New.
	(thumb2_copy_unmodified_helper): Copy helpers for Thumb-2 mode.
	(thumb2_copy_undef_helper): Likewise.
	(thumb2_copy_copro_load_store_helper): Likewise.
	(thumb2_copy_ldm_with_pc_helper): Likewise.
	(thumb2_copy_svc_helper): Likewise.
	(thumb2_decode_svc_copro): New.  Call copy helpers.
	(thumb2_copy_block_xfer): Likewise.
	(thumb_decode_pc_relative_32bit): New.
	(decode_thumb_32bit_ld_mem_hints): New.
---
 gdb/arm-tdep.c |  587 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 582 insertions(+), 5 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 269c583..86bcfaa 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5349,6 +5349,23 @@ thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
   return 0;
 }
 
+/* Copy 32-bit Thumb(32-bit Thumb-2) instruction without any modification.  */
+static int
+thumb_copy_unmodified_32bit (unsigned int insn1, unsigned int insn2,
+			     const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
+			"opcode/class '%s' unmodified\n", insn1, insn2,
+			iname);
+
+  RECORD_THUMB2_MODE_INSN (0, insn1, insn2);
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Preload instructions with immediate offset.  */
 
 static void
@@ -5402,6 +5419,27 @@ arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return copy_preload (gdbarch, rn, regs, dsc);
 }
 
+
+static int
+thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (rn != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (insn1, insn2, "preload", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x %.4x\n",
+			(unsigned short) insn1, (unsigned short) insn2);
+
+
+  RECORD_THUMB2_MODE_INSN (0, (insn1 & 0xfff0), insn2);
+  dsc->numinsns = 2;
+
+  return copy_preload (gdbarch, rn, regs, dsc);
+}
+
 /* Preload instructions with register offset.  */
 
 static int
@@ -5450,6 +5488,29 @@ arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
   return copy_preload_reg (gdbarch, rn, rm, regs, dsc);
 }
 
+static int
+thumb2_copy_preload_reg (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);
+
+  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (insn1, insn2, "preload reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x %.4x\n",
+			(unsigned short) insn1, (unsigned short) insn2);
+
+  RECORD_THUMB2_MODE_INSN (0, insn1 & 0xfff0, (insn2 & 0xfff0) | 0x1);
+  dsc->numinsns = 2;
+
+  return copy_preload_reg (gdbarch, rn, rm, regs, dsc);
+}
+
+
+
 /* Copy/cleanup coprocessor load and store instructions.  */
 
 static void
@@ -5575,6 +5636,60 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return copy_b_bl_blx (gdbarch, cond, exchange, link, offset, regs, dsc);
 }
 
+/* Copy B/BL/BLX Thumb-2 instructions with immediate destinations.  */
+static int
+thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, unsigned short insn1,
+		     unsigned short insn2, struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  int link = bit (insn2, 14);
+  int exchange = link && !bit (insn2, 12);
+  int cond = INST_AL;
+  long offset =0;
+
+  if (!link && !exchange) /* B */
+    {
+      int j1 = bit (insn2, 13);
+      int j2 = bit (insn2, 11);
+      int s = sbits (insn1, 10, 10);
+
+      cond = bits (insn1, 6, 9);
+      offset = (bits (insn2, 0, 10) < 1);
+      if (bit (insn2, 12))
+	{
+	  int i1 = !(j1 ^ bit (insn1, 10));
+	  int i2 = !(j2 ^ bit (insn1, 10));
+
+	  offset |= (bits (insn1, 0, 9) < 12)
+	    | (i2 < 22)
+	    | (i1 < 23)
+	    | (s < 24);
+	}
+      else
+	offset |= (bits (insn1, 0, 5) < 12)
+	  | (j1 < 18)
+	  | (j2 < 19)
+	  | (s < 20);
+    }
+  else
+    {
+      offset = (sbits (insn1, 0, 9) << 12);
+      offset |= exchange ?
+	(bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
+			"%.4x %.4x with offset %.8lx\n",
+			(exchange) ? "blx" : "bl",
+			insn1, insn2, offset);
+
+  /* Plus the size of THUMB_NOP and B/BL/BLX.  */
+  dsc->u.branch.dest = dsc->insn_addr + 4 + offset;
+  RECORD_THUMB_MODE_INSN (0, THUMB_NOP);
+
+  return copy_b_bl_blx (gdbarch, INST_AL, exchange, 1, offset, regs, dsc);
+}
 /* Copy B Thumb instructions.  */
 static int
 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
@@ -5853,6 +5968,23 @@ thumb_copy_alu_reg (struct gdbarch *gdbarch, unsigned short insn,
   return copy_alu_reg (gdbarch, regs, dsc, reg_ids);
 }
 
+static int
+thumb2_copy_alu_reg (struct gdbarch *gdbarch, unsigned short insn1,
+		    unsigned short insn2, struct regcache *regs,
+		    struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);
+  unsigned int rd = bits (insn2, 8, 11);
+
+  /* In Thumb-2, rn, rm and rd can't be r15.  */
+  if (rn == ARM_PC_REGNUM || rm == ARM_PC_REGNUM || rd == ARM_PC_REGNUM)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_copy_alu_reg: rn, rm or rd shouldn't be r15"));
+
+  return thumb_copy_unmodified_32bit (insn1, insn2, "ALU reg", dsc);
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -5928,6 +6060,22 @@ arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_alu_shifted_reg (struct gdbarch *gdbarch, unsigned short insn1,
+			    unsigned short insn2, struct regcache *regs,
+			    struct displaced_step_closure *dsc)
+{
+  unsigned int rm = bits (insn1, 0, 3);
+  unsigned int rd = bits (insn2, 8, 11);
+  unsigned int rs = bits (insn2, 0, 3);
+
+if (rs == ARM_PC_REGNUM || rm == ARM_PC_REGNUM || rd == ARM_PC_REGNUM)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_copy_alu_reg: rn, rm or rd shouldn't be r15"));
+
+   return thumb_copy_unmodified_32bit (insn1, insn2, "ALU shifted reg", dsc);
+}
+
 /* Clean up load instructions.  */
 
 static void
@@ -6114,6 +6262,69 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
+thumb2_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, unsigned short insn1,
+			      unsigned short insn2,  struct regcache *regs,
+			      struct displaced_step_closure *dsc,
+			      int load, int byte, int usermode, int writeback)
+{
+  int immed = !bit (insn1, 9);
+  unsigned int rt = bits (insn2, 12, 15);
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);  /* Only valid if !immed.  */
+
+  if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (insn1, insn2, "load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.4x%.4x\n",
+			load ? (byte ? "ldrb" : "ldr")
+			     : (byte ? "strb" : "str"), usermode ? "t" : "",
+			rt, rn, insn1, insn2);
+
+  dsc->rd = rt;
+  dsc->u.ldst.rn = rn;
+  dsc->u.ldst.immed = immed;
+
+  copy_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, byte, usermode,
+			  writeback, rm);
+
+  if (load || rt != ARM_PC_REGNUM)
+    {
+      dsc->u.ldst.restore_r4 = 0;
+
+      if (immed)
+	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
+	{
+	  RECORD_THUMB_MODE_INSN (0, (insn1 & 0xfff0) | 0x2);
+	  RECORD_THUMB_MODE_INSN (1, insn2 & 0x0fff);
+	}
+      else
+	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, r3].  */
+	{
+	  RECORD_THUMB_MODE_INSN (0, (insn1 & 0xfff0) | 0x2);
+	  RECORD_THUMB_MODE_INSN (1, (insn2 & 0x0ff0) | 0x3);
+	}
+
+      dsc->numinsns = 2;
+    }
+  else
+    {
+      /* In Thumb-32 instructions, the behavior is unpredictable when Rt is
+	 PC, while the behavior is undefined when Rn is PC.  Shortly, neither
+	 Rt nor Rn can be PC.  */
+
+      gdb_assert (0);
+    }
+
+  return 0;
+}
+
+static int
 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 			    struct regcache *regs,
 			    struct displaced_step_closure *dsc,
@@ -6411,7 +6622,7 @@ typedef int (*copy_svc_helper)(struct gdbarch *,
 			       struct regcache *,
 			       struct displaced_step_closure *);
 
-/* Define helpers for ARM.  */
+/* Define helpers for ARM and Thumb-2.  */
 static int
 arm_copy_unmodified_helper (union instruction_instance insn, const char *iname,
 			    struct displaced_step_closure *dsc)
@@ -6419,6 +6630,15 @@ arm_copy_unmodified_helper (union instruction_instance insn, const char *iname,
   return arm_copy_unmodified (insn._32_bit, iname, dsc);
 }
 
+static int
+thumb2_copy_unmodified_helper (union instruction_instance insn,
+			       const char *iname,
+			       struct displaced_step_closure *dsc)
+{
+  return thumb_copy_unmodified_32bit (insn._16_bit[0], insn._16_bit[1], iname,
+				      dsc);
+}
+
 static int arm_copy_undef (uint32_t insn, struct displaced_step_closure *dsc);
 
 static int
@@ -6428,6 +6648,15 @@ arm_copy_undef_helper (union instruction_instance ii,
   return arm_copy_undef (ii._32_bit, dsc);
 }
 
+static int thumb_32bit_copy_undef (uint16_t insn1, uint16_t insn2,
+				   struct displaced_step_closure *dsc);
+static int
+thumb2_copy_undef_helper (union instruction_instance ii,
+			  struct displaced_step_closure *dsc)
+{
+  return thumb_32bit_copy_undef (ii._16_bit[0], ii._16_bit[1], dsc);
+}
+
 static int
 arm_copy_copro_load_store_helper (struct gdbarch *gdbarch,
 				  union instruction_instance insn,
@@ -6452,6 +6681,30 @@ arm_copy_copro_load_store_helper (struct gdbarch *gdbarch,
 }
 
 static int
+thumb2_copy_copro_load_store_helper (struct gdbarch *gdbarch,
+				     union instruction_instance insn,
+				     struct regcache *regs,
+				     struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn._16_bit[0], 0, 3);
+  if (rn != ARM_PC_REGNUM)
+    return arm_copy_unmodified (insn._32_bit, "copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.8lx\n",
+			(unsigned long) insn._32_bit);
+
+  dsc->u.ldst.writeback = bit (insn._16_bit[0], 9);
+  dsc->u.ldst.rn = rn;
+
+  RECORD_THUMB2_MODE_INSN(0, insn._16_bit[0] & 0xfff0,
+			  insn._16_bit[1]);
+
+  return copy_copro_load_store (gdbarch, rn, regs, dsc);
+}
+
+static int
 arm_copy_ldm_with_pc_helper(union instruction_instance insn,
 			    struct displaced_step_closure *dsc,
 			    struct regcache *regs)
@@ -6508,6 +6761,61 @@ arm_copy_ldm_with_pc_helper(union instruction_instance insn,
   return 0;
 }
 
+static int
+thumb2_copy_ldm_with_pc_helper (union instruction_instance insn,
+				struct displaced_step_closure *dsc,
+				struct regcache *regs)
+{
+  /* LDM of a list of registers which includes PC.  Implement by
+     rewriting the list of registers to be transferred into a
+     contiguous chunk r0...rX before doing the transfer, then shuffling
+     registers into the correct places in the cleanup routine.  */
+  unsigned int regmask = dsc->u.block.regmask;
+  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
+  unsigned int to = 0, from = 0, i, new_rn;
+
+  for (i = 0; i < num_in_list; i++)
+    dsc->tmp[i] = displaced_read_reg (regs, from, i);
+
+  /* Writeback makes things complicated.  We need to avoid clobbering
+     the base register with one of the registers in our modified
+     register list, but just using a different register can't work in
+     all cases, e.g.:
+
+     ldm r14!, {r0-r13,pc}
+
+     which would need to be rewritten as:
+
+     ldm rN!, {r0-r14}
+
+     but that can't work, because there's no free register for N.
+
+     Solve this by turning off the writeback bit, and emulating
+     writeback manually in the cleanup routine.  */
+
+  if (dsc->u.block.writeback )
+    insn._32_bit &= ~(1 << 21);
+
+  new_regmask = (1 << num_in_list) - 1;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
+				      "{..., pc}: original reg list %.4x, modified "
+				      "list %.4x\n"), dsc->u.block.rn,
+			dsc->u.block.writeback ? "!" : "",
+			(int) dsc->u.block.regmask,
+			new_regmask);
+
+  /* In Thumb encoding, bit 13 should be always zero.  */
+  if (displaced_in_arm_mode (regs))
+    new_regmask &= 0xffff;
+  else
+    new_regmask &= 0xdfff;
+
+  RECORD_THUMB2_MODE_INSN(0, insn._16_bit[0], new_regmask);
+  return 0;
+}
+
 static void cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
 			 struct displaced_step_closure *dsc);
 static int
@@ -6538,7 +6846,15 @@ arm_copy_svc_helper (struct gdbarch *gdbarch, union instruction_instance insn,
 
   return 0;
 }
-
+static int
+thumb2_copy_svc_helper (struct gdbarch *gdbarch,
+			union instruction_instance insn,
+			CORE_ADDR to, struct regcache *regs,
+			struct displaced_step_closure *dsc)
+{
+  /* Not implemented.  */
+  return 0;
+}
 
 /* Helper definition is done.  */
 
@@ -6637,19 +6953,33 @@ arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
 			  arm_copy_ldm_with_pc_helper, dsc);
 
 }
+
+static int
+thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+			struct regcache *regs,
+			struct displaced_step_closure *dsc)
+{
+   union instruction_instance ii;
+   ii._16_bit[0] = insn1;
+   ii._16_bit[1] = insn2;
+
+   return copy_block_xfer (gdbarch, ii, regs, thumb2_copy_unmodified_helper,
+			   thumb2_copy_ldm_with_pc_helper, dsc);
+}
+
 /* Cleanup/copy SVC (SWI) instructions.  These two functions are overridden
    for Linux, where some SVC instructions must be treated specially.  */
 
 static void
 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
-            struct displaced_step_closure *dsc)
+	     struct displaced_step_closure *dsc)
 {
   CORE_ADDR from = dsc->insn_addr;
   CORE_ADDR resume_addr = from + 4;
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
-                       "%.8lx\n", (unsigned long) resume_addr);
+			"%.8lx\n", (unsigned long) resume_addr);
 
   displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
 }
@@ -6669,6 +6999,22 @@ arm_copy_undef (uint32_t insn, struct displaced_step_closure *dsc)
   return 0;
 }
 
+static int
+thumb_32bit_copy_undef (uint16_t insn1, uint16_t insn2,
+			struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
+			"%.4x %.4x\n", (unsigned short) insn1,
+			(unsigned short) insn2);
+
+  RECORD_THUMB2_MODE_INSN (0, insn1, insn2);
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy unpredictable instructions.  */
 
 static int
@@ -7129,6 +7475,28 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn,
 }
 
 static int
+thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  unsigned int ops[4];
+  union instruction_instance ii;
+
+  ops[0] = bits (insn1, 4, 9);
+  ops[1] = bit (insn2, 4);
+  ops[2] = bits (insn2, 8, 11);
+  ops[3] = bits (insn1, 4, 8);
+
+  ii._16_bit[0] = insn1;
+  ii._16_bit[1] = insn2;
+
+  return decode_svc_copro (gdbarch, ii, thumb2_copy_unmodified_helper,
+			   thumb2_copy_copro_load_store_helper,
+			   thumb2_copy_undef_helper, thumb2_copy_svc_helper,
+			  regs, dsc, ops);
+}
+
+static int
 copy_pc_relative (struct regcache *regs, struct displaced_step_closure *dsc,
 		  int rd, unsigned int imm, int is_32bit)
 {
@@ -7179,6 +7547,26 @@ thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, unsigned short insn,
 }
 
 static int
+thumb_decode_pc_relative_32bit (struct gdbarch *gdbarch, unsigned short insn1,
+				unsigned short insn2, struct regcache *regs,
+				struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn2, 8, 11);
+  /* Since immeidate has the same encoding in both ADR and ADDS, so we simply
+     extract raw immediate encoding rather than computing immediate.  When
+     generating ADDS instruction, we can simply perform OR operation to set
+     immediate into ADDS.  */
+  unsigned int imm = (insn2 & 0x70ff) | (bit (insn1, 10) << 26);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x%.4x\n",
+			rd, imm, insn1, insn2);
+
+  return copy_pc_relative (regs, dsc, rd, imm, 1);
+}
+
+static int
 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
 			      struct regcache *regs,
 			      struct displaced_step_closure *dsc)
@@ -7437,12 +7825,201 @@ thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch,
 		    _("thumb_process_displaced_insn: Instruction decode error"));
 }
 
+static int
+decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
+				 unsigned short insn1, unsigned short insn2,
+				 struct regcache *regs,
+				 struct displaced_step_closure *dsc)
+{
+  int rd = bits (insn2, 12, 15);
+  int user_mode = (bits (insn2, 8, 11) == 0xe);
+  int err = 0;
+  int writeback = 0;
+
+  switch (bits (insn1, 5, 6))
+    {
+    case 0: /* Load byte and memory hints */
+      if (rd == 0xf) /* PLD/PLI */
+	{
+	  if (bits (insn2, 6, 11))
+	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
+	}
+      else
+	{
+	  int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	  return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
+					       dsc, 1, 1, user_mode, writeback);
+	}
+
+      break;
+    case 1: /* Load halfword and memory hints */
+      if (rd == 0xf) /* PLD{W} and Unalloc memory hint */
+	{
+	  if (bits (insn2, 6, 11))
+	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    return thumb2_copy_preload_reg (gdbarch,insn1, insn2, regs, dsc);
+	}
+      else
+	{
+	  int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+	  return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
+					       dsc, 1, 0, user_mode, writeback);
+	}
+      break;
+    case 2: /* Load word */
+      {
+	int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs, dsc,
+					     1, 0, user_mode, writeback);
+	break;
+      }
+    default:
+      return thumb_32bit_copy_undef (insn1, insn2, dsc);
+      break;
+    }
+  return 0;
+}
+
 static void
 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
 				    uint16_t insn2, struct regcache *regs,
 				    struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+  int err = 0;
+  unsigned short op;
+
+  op = bit (insn2, 15);
+
+  switch (bits (insn1, 11, 12))
+    {
+    case 1:
+      {
+	switch (bits (insn1, 9, 10))
+	  {
+	  case 0: /* load/store multiple */
+	    switch (bits (insn1, 7, 8))
+	      {
+	      case 0: case 3: /* SRS, RFE */
+		err = thumb_copy_unmodified_32bit (insn1, insn2, "srs/rfe",
+						   dsc);
+		break;
+	      case 1: case 2: /* LDM/STM/PUSH/POP */
+		/* These Thumb 32-bit insns have the same encodings as ARM
+		   counterparts.  */
+		err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
+	      }
+	    break;
+	  case 1: /* Data processing (register) */
+	    err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs, dsc);
+	    break;
+	  default: /* Coprocessor instructions */
+	    /* Thumb 32bit coprocessor instructions have the same encoding
+	       as ARM's.  */
+	    err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	    break;
+	  }
+      break;
+      }
+    case 2:
+      if (op) /* Branch and misc control.  */
+	{
+	  if (bit (insn2, 14)) /* BLX/BL */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else if (!bits (insn2, 12, 14) && bits (insn1, 8, 10) != 0x7)
+	    /* Conditional Branch */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    err = thumb_copy_unmodified_32bit (insn1, insn2, "misc ctrl",
+					       dsc);
+	}
+      else
+	{
+	  if (bit (insn1, 9)) /* Data processing (plain binary imm) */
+	    {
+	      int op = bits (insn1, 4, 8);
+	      int rn = bits (insn1, 0, 4);
+	      if ((op == 0 || op == 0xa) && rn == 0xf)
+		err = thumb_decode_pc_relative_32bit (gdbarch, insn1, insn2,
+						      regs, dsc);
+	      else
+		err = thumb_copy_unmodified_32bit (insn1, insn2, "dp/pb", dsc);
+	    }
+	  else /* Data processing (modified immeidate) */
+	    err = thumb_copy_unmodified_32bit (insn1, insn2, "dp/mi", dsc);
+	}
+      break;
+    case 3:
+      switch (bits (insn1, 9, 10))
+	{
+	case 0:
+	  if (bit (insn1, 4))
+	    err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
+						   regs, dsc);
+	  else
+	    {
+	      if (bit (insn1, 8)) /* NEON Load/Store */
+		err = thumb_copy_unmodified_32bit (insn1, insn2,
+						   "neon elt/struct load/store",
+						   dsc);
+	      else /* Store single data item */
+		{
+		  int user_mode = (bits (insn2, 8, 11) == 0xe);
+		  int byte = (bits (insn1, 5, 7) == 0
+			      || bits (insn1, 5, 7) == 4);
+		  int writeback = 0;
+
+		  if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
+		    writeback = bit (insn2, 8);
+
+		  err = thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2,
+						      regs, dsc, 0, byte,
+						      user_mode, writeback);
+		}
+	    }
+	  break;
+	case 1:
+	  switch (bits (insn1, 7, 8))
+	    {
+	    case 0: case 1: /* Data-processing (shift register) */
+	      err = thumb2_copy_alu_shifted_reg (gdbarch, insn1, insn2, regs,
+						dsc);
+	      break;
+	    case 2: /* Multiply and absolute difference */
+	      err = thumb_copy_unmodified_32bit (insn1, insn2, "mul/mua/diff",
+						 dsc);
+	      break;
+	    case 3: /* Long multiply and divide */
+	      err = thumb_copy_unmodified_32bit (insn1, insn2, "lmul/lmua",
+						 dsc);
+	      break;
+	    }
+	  break;
+	default: /* Coprocessor instructions */
+	  err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	  break;
+	}
+      break;
+    default:
+      err = 1;
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_insn: Instruction decode error"));
 }
 
 static void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-26 14:07                 ` Yao Qi
@ 2011-02-28 17:37                   ` Ulrich Weigand
  2011-03-01  9:01                     ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-28 17:37 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> +/* Record an ARM mode instruction in one slot.  */
> +#define RECORD_ARM_MODE_INSN(INDEX, INSN) do \
> +{\
> +  dsc->modinsn[INDEX] = INSN;\
> + } while (0)
> +
> +#define RECORD_THUMB_MODE_INSN(INDEX, INSN) do \
> +{\
> +  dsc->modinsn[INDEX] = INSN;\
> + } while (0)
> +
> +/* Record the two parts of 32-bit Thumb-2 instruction. Each part occupies
> +   one array element.  */
> +#define RECORD_THUMB2_MODE_INSN(INDEX, INSN1, INSN2) do \
> +{ \
> +  dsc->modinsn[INDEX] = INSN1;\
> +  dsc->modinsn[INDEX + 1] = INSN2;\
> +} while (0)

OK, so at this point I think it's really not necessary to have those
as macros in the first place.  Instead, code should just continue to
fill in dsc->modinsn, which will shorten this patch significantly :-)

> @@ -5117,10 +5119,21 @@ displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
>  
>    if (regno == 15)
>      {
> +      /* Compute pipeline offset:
> +	 - When executing an ARM instruction, PC reads as the address of the
> +	 current instruction plus 8.
> +	 - When executing a Thumb instruction, PC reads as the address of the
> +	 current instruction plus 4.  */
> +
> +      if (displaced_in_arm_mode (regs))

It would be somewhat nicer here to use dsc->is_thumb instead of re-computing
its value.  However, the displaced_read_reg function doesn't have the dsc
argument, which is annoying (and asymmetrical to displaced_write_reg ...).

So if you want to make the effort to change all call sites to pass in dsc,
this would be nice, but I guess I'm also OK with doing it as above.

> @@ -6904,23 +6919,49 @@ arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
>  			    CORE_ADDR to, struct displaced_step_closure *dsc)
>  {
>    struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
> -  unsigned int i;
> +  unsigned int i, len, offset;
>    enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
> +  int size = dsc->insn_size;

Ah, this is wrong: it needs to be "dsc->is_thumb? 2 : 4".  Note that if the
original instruction was 32-bit Thumb2, insn_size will be 4, but we still
need to copy 2-byte chunks here.

Otherwise, this looks OK to me now.  Thanks for your continued effort to work
on this feature!

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: Displaced stepping 0002: refactor and create some copy helpers
  2011-02-26 17:50 ` Displaced stepping 0002: refactor and create some copy helpers Yao Qi
@ 2011-02-28 17:53   ` Ulrich Weigand
  0 siblings, 0 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-02-28 17:53 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> The patch continues to refactor code, in order to
>  1) make copy functions separated for ARM and Thumb,

I think it makes sense to split the copy functions into the part that actually
decodes the instruction and installs the target instruction (which depends on
ARM vs. Thumb), and the part that sets up registers and installs the cleanup etc. 
(which only depends on the semantics of the instruction, not its encoding).

For symmetry, I'd then prefer if *all* copy functions were split up that way
(in the end, you'll probably need all of them anyway, since just about all
ARM instructions have an alternate encoding as Thumb/Thumb-2).

>  2) define some copy helper functions for some ARM and Thumb-2 instructions.

These look really awkward to me, what with the union and all those function
pointers ...   It seems preferable to me to keep instruction decoding
between ARM and Thumb fully separate, even if this means we have a small
amount of duplication.

> -copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
> -		 const char *iname, struct displaced_step_closure *dsc)
> +arm_copy_unmodified (uint32_t insn, const char *iname,
> +		     struct displaced_step_closure *dsc)

Again for symmetry reasons, it's probably better if *all* copy_ functions
get the gdbarch, even if they currently don't need it.

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-02-28 17:37                   ` Ulrich Weigand
@ 2011-03-01  9:01                     ` Yao Qi
  2011-03-01 16:11                       ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-03-01  9:01 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 1978 bytes --]

On 03/01/2011 01:07 AM, Ulrich Weigand wrote:
> OK, so at this point I think it's really not necessary to have those
> as macros in the first place.  Instead, code should just continue to
> fill in dsc->modinsn, which will shorten this patch significantly :-)
> 

I am hesitant to remove these macros, because my following patches are
using these macros.  Now, since most of my following patches should be
re-written, I am fine to remove these macros.

>> > @@ -5117,10 +5119,21 @@ displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
>> >  
>> >    if (regno == 15)
>> >      {
>> > +      /* Compute pipeline offset:
>> > +	 - When executing an ARM instruction, PC reads as the address of the
>> > +	 current instruction plus 8.
>> > +	 - When executing a Thumb instruction, PC reads as the address of the
>> > +	 current instruction plus 4.  */
>> > +
>> > +      if (displaced_in_arm_mode (regs))
> It would be somewhat nicer here to use dsc->is_thumb instead of re-computing
> its value.  However, the displaced_read_reg function doesn't have the dsc
> argument, which is annoying (and asymmetrical to displaced_write_reg ...).
> 
> So if you want to make the effort to change all call sites to pass in dsc,
> this would be nice, but I guess I'm also OK with doing it as above.
> 

Let us move this change to another patch.

>> > @@ -6904,23 +6919,49 @@ arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
>> >  			    CORE_ADDR to, struct displaced_step_closure *dsc)
>> >  {
>> >    struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
>> > -  unsigned int i;
>> > +  unsigned int i, len, offset;
>> >    enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
>> > +  int size = dsc->insn_size;
> Ah, this is wrong: it needs to be "dsc->is_thumb? 2 : 4".  Note that if the
> original instruction was 32-bit Thumb2, insn_size will be 4, but we still
> need to copy 2-byte chunks here.

Right.

-- 
Yao (齐尧)

[-- Attachment #2: 0000-handle-both-32-bit-and-16-bit.patch --]
[-- Type: text/x-patch, Size: 4954 bytes --]

gdb/
	* arm-tdep.h (struct displaced_step_closure): Add two new fields
	is_thumb and insn_size.
	* arm-tdep.c (displaced_read_reg): Adjust correct pipeline offset
	on both ARM and Thumb mode.
	(arm_process_displaced_insn): Set is_thumb and insn_size.
	(arm_displaced_init_closure): Handle both 16-bit and 32-bit.
	(arm_displaced_step_fixup): Likewise.

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index f0e9435..555a6eb 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5106,6 +5106,8 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
 
+static int displaced_in_arm_mode (struct regcache *regs);
+
 /* Helper for register reads for displaced stepping.  In particular, this
    returns the PC as it would be seen by the instruction at its original
    location.  */
@@ -5117,10 +5119,21 @@ displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
 
   if (regno == 15)
     {
+      /* Compute pipeline offset:
+	 - When executing an ARM instruction, PC reads as the address of the
+	 current instruction plus 8.
+	 - When executing a Thumb instruction, PC reads as the address of the
+	 current instruction plus 4.  */
+
+      if (displaced_in_arm_mode (regs))
+	from += 8;
+      else
+	from += 4;
+
       if (debug_displaced)
 	fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
-			    (unsigned long) from + 8);
-      return (ULONGEST) from + 8;  /* Pipeline offset.  */
+			    (unsigned long) from);
+      return (ULONGEST) from;
     }
   else
     {
@@ -6861,6 +6874,8 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
   if (!displaced_in_arm_mode (regs))
     return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
 
+  dsc->is_thumb = 0;
+  dsc->insn_size = 4;
   insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
@@ -6904,23 +6919,49 @@ arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
 			    CORE_ADDR to, struct displaced_step_closure *dsc)
 {
   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
-  unsigned int i;
+  unsigned int i, len, offset;
   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  int size = dsc->is_thumb? 2 : 4;
+  const unsigned char *bkp_insn;
 
+  offset = 0;
   /* Poke modified instruction(s).  */
   for (i = 0; i < dsc->numinsns; i++)
     {
       if (debug_displaced)
-	fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
-			    "%.8lx\n", (unsigned long) dsc->modinsn[i],
-			    (unsigned long) to + i * 4);
-      write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
+	{
+	  fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
+	  if (size == 4)
+	    fprintf_unfiltered (gdb_stdlog, "%.8lx",
+				dsc->modinsn[i]);
+	  else if (size == 2)
+	    fprintf_unfiltered (gdb_stdlog, "%.4x",
+				(unsigned short)dsc->modinsn[i]);
+
+	  fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
+			      (unsigned long) to + offset);
+
+	}
+      write_memory_unsigned_integer (to + offset, size,
+				     byte_order_for_code,
 				     dsc->modinsn[i]);
+      offset += size;
+    }
+
+  /* Choose the correct breakpoint instruction.  */
+  if (dsc->is_thumb)
+    {
+      bkp_insn = tdep->thumb_breakpoint;
+      len = tdep->thumb_breakpoint_size;
+    }
+  else
+    {
+      bkp_insn = tdep->arm_breakpoint;
+      len = tdep->arm_breakpoint_size;
     }
 
   /* Put breakpoint afterwards.  */
-  write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
-		tdep->arm_breakpoint_size);
+  write_memory (to + offset, bkp_insn, len);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
@@ -6956,7 +6997,9 @@ arm_displaced_step_fixup (struct gdbarch *gdbarch,
     dsc->cleanup (gdbarch, regs, dsc);
 
   if (!dsc->wrote_to_pc)
-    regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
+    regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
+				    dsc->insn_addr + dsc->insn_size);
+
 }
 
 #include "bfd-in2.h"
diff --git a/gdb/arm-tdep.h b/gdb/arm-tdep.h
index ef02002..a2293ba 100644
--- a/gdb/arm-tdep.h
+++ b/gdb/arm-tdep.h
@@ -262,6 +262,17 @@ struct displaced_step_closure
 			  struct displaced_step_closure *dsc);
     } svc;
   } u;
+
+  /* The size of original instruction, 2 or 4.  */
+  unsigned int insn_size;
+  /* True if the original insn (and thus all replacement insns) are Thumb
+     instead of ARM.   */
+  unsigned int is_thumb;
+
+  /* The slots in the array is used in this way below,
+     - ARM instruction occupies one slot,
+     - Thumb 16 bit instruction occupies one slot,
+     - Thumb 32-bit instruction occupies *two* slots, one part for each.  */
   unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
   int numinsns;
   CORE_ADDR insn_addr;

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
  2011-03-01  9:01                     ` Yao Qi
@ 2011-03-01 16:11                       ` Ulrich Weigand
  0 siblings, 0 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-03-01 16:11 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> 	* arm-tdep.h (struct displaced_step_closure): Add two new fields
> 	is_thumb and insn_size.
> 	* arm-tdep.c (displaced_read_reg): Adjust correct pipeline offset
> 	on both ARM and Thumb mode.
> 	(arm_process_displaced_insn): Set is_thumb and insn_size.
> 	(arm_displaced_init_closure): Handle both 16-bit and 32-bit.
> 	(arm_displaced_step_fixup): Likewise.

This version is OK, thanks!

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 0/8] Displaced stepping for Thumb instructions
  2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
                   ` (7 preceding siblings ...)
  2011-02-28  2:15 ` Displaced stepping 0004: wip: 32-bit Thumb instructions Yao Qi
@ 2011-03-24 13:49 ` Yao Qi
  2011-03-24 13:56   ` [try 2nd 1/8] Fix cleanup_branch to take Thumb into account Yao Qi
                     ` (7 more replies)
  8 siblings, 8 replies; 66+ messages in thread
From: Yao Qi @ 2011-03-24 13:49 UTC (permalink / raw)
  To: gdb-patches

This is the 2nd try for displaced stepping for Thumb instructions.
Ulrich's comments in last thread[*] are addressed.

[*] "[patch 0/3] Displaced stepping for 16-bit Thumb instructions"
http://sourceware.org/ml/gdb-patches/2010-12/msg00457.html

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 1/8] Fix cleanup_branch to take Thumb into account
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
@ 2011-03-24 13:56   ` Yao Qi
  2011-04-06 20:46     ` Ulrich Weigand
  2011-03-24 13:58   ` [try 2nd 2/8] Rename copy_* functions to arm_copy_* Yao Qi
                     ` (6 subsequent siblings)
  7 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-03-24 13:56 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 214 bytes --]

When writing LR register in cleanup_branch, Thumb mode is not
considered, so `pc - 4' is not value of LR.  Since insn_size and
insn_addr has been in `dsc', it can be easier to calculate LR.

-- 
Yao (齐尧)

[-- Attachment #2: 0001-generic-fix-to-cleanup_branch.patch --]
[-- Type: text/x-patch, Size: 1150 bytes --]

2011-03-24  Yao Qi  <yao@codesourcery.com>

	* arm-tdep.c (cleanup_branch): Set a correct return address in
	LR for ARM and Thumb.

---
 gdb/arm-tdep.c |   12 ++++++++++--
 1 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 6e5f2ab..2ebafad 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5485,8 +5485,16 @@ cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
 
   if (dsc->u.branch.link)
     {
-      ULONGEST pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
-      displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);
+      /* The value of LR should be the next insn of current one.  In order
+       not to confuse logic hanlding later insn `bx lr', if current insn mode
+       is Thumb, the bit 0 of LR value should be set to 1.  */
+      ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
+
+      if (dsc->is_thumb)
+	next_insn_addr |= 0x1;
+
+      displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
+			   CANNOT_WRITE_PC);
     }
 
   displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 2/8] Rename copy_* functions to arm_copy_*
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
  2011-03-24 13:56   ` [try 2nd 1/8] Fix cleanup_branch to take Thumb into account Yao Qi
@ 2011-03-24 13:58   ` Yao Qi
  2011-04-06 20:51     ` Ulrich Weigand
  2011-03-24 14:01   ` [try 2nd 3/8] Refactor copy_svc_os Yao Qi
                     ` (5 subsequent siblings)
  7 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-03-24 13:58 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 322 bytes --]

The copy functions for arm and thumb instructions should be different.
So some copy_* functions are renamed to arm_copy_* functions.  In each
copy functions, there are some arm-thumb-independent part, such as
install cleanup helper, store register, etc.  This part is moved to
install_* functions.

-- 
Yao (齐尧)

[-- Attachment #2: 0002-refactor-rename-functions.patch --]
[-- Type: text/x-patch, Size: 39517 bytes --]

2011-03-24  Yao Qi  <yao@codesourcery.com>

	* gdb/arm-tdep.c (copy_unmodified): Rename to ...
	(arm_copy_unmodified): .. this.  New.
	(copy_preload): Move common part to ...
	(install_preload): .. this.  New.
	(arm_copy_preload): New.
	(copy_preload_reg): Move common part to ...
	(install_preload_reg): ... this.  New.
	(arm_copy_preload_reg): New.
	(copy_b_bl_blx): Move common part to ...
	(install_b_bl_blx): .. this.  New.
	(arm_copy_b_bl_blx): New.
	(copy_bx_blx_reg): Move common part to ...
	(install_bx_blx_reg): ... this. New.
	(arm_copy_bx_blx_reg): New.
	(copy_alu_reg): Move common part to ...
	(install_alu_reg): ... this.  New.
	(arm_copy_alu_reg): New.
	(copy_alu_shifted_reg): Move common part to ...
	(install_alu_shifted_reg): ... this.  New.
	(copy_ldr_str_ldrb_strb): Move common part to ...
	(install_ldr_str_ldrb_strb): ... this.  New.
	(arm_copy_ldr_str_ldrb_strb): New.
	(copy_svc): Delete.
	(arm_copy_svc): Renamed from copy_svc.
	(copy_copro_load_store, copy_alu_imm): update callers.
	(copy_extra_ld_st, copy_block_xfer): Likewise.
	(decode_misc_memhint_neon, decode_unconditional): Likewise.
	(decode_miscellaneous, decode_dp_misc): Likewise.
	(decode_ld_st_word_ubyte, decode_media): Likewise.
	(decode_b_bl_ldmstm, decode_ext_reg_ld_st): Likewise.
	(decode_svc_copro): Likewise.
	* gdb/arm-tdep.h (struct displaced_step_closure): Add two structures
	`alu_reg' and `alu_shifted_reg'.

---
 gdb/arm-tdep.c |  495 +++++++++++++++++++++++++++++++------------------------
 gdb/arm-tdep.h |   14 ++
 2 files changed, 293 insertions(+), 216 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 2ebafad..af81b1e 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5319,7 +5319,7 @@ insn_references_pc (uint32_t insn, uint32_t bitmask)
    matter what address they are executed at: in those cases, use this.  */
 
 static int
-copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
+arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
 		 const char *iname, struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
@@ -5343,20 +5343,11 @@ cleanup_preload (struct gdbarch *gdbarch,
     displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
 }
 
-static int
-copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-	      struct displaced_step_closure *dsc)
+static void
+install_preload (struct gdbarch *gdbarch, struct regcache *regs,
+		 struct displaced_step_closure *dsc, unsigned int rn)
 {
-  unsigned int rn = bits (insn, 16, 19);
   ULONGEST rn_val;
-
-  if (!insn_references_pc (insn, 0x000f0000ul))
-    return copy_unmodified (gdbarch, insn, "preload", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
-			(unsigned long) insn);
-
   /* Preload instructions:
 
      {pli/pld} [rn, #+/-imm]
@@ -5366,34 +5357,40 @@ copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
   rn_val = displaced_read_reg (regs, dsc, rn);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
-
   dsc->u.preload.immed = 1;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
-
   dsc->cleanup = &cleanup_preload;
-
-  return 0;
 }
 
-/* Preload instructions with register offset.  */
-
 static int
-copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
-		  struct regcache *regs,
+arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 		  struct displaced_step_closure *dsc)
 {
   unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  ULONGEST rn_val, rm_val;
 
-  if (!insn_references_pc (insn, 0x000f000ful))
-    return copy_unmodified (gdbarch, insn, "preload reg", dsc);
+  if (!insn_references_pc (insn, 0x000f0000ul))
+    return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
 			(unsigned long) insn);
 
+  dsc->modinsn[0] = insn & 0xfff0ffff;
+
+  install_preload (gdbarch, regs, dsc, rn);
+
+  return 0;
+}
+
+/* Preload instructions with register offset.  */
+
+static void
+install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
+		    struct displaced_step_closure *dsc, unsigned int rn,
+		    unsigned int rm)
+{
+  ULONGEST rn_val, rm_val;
+
   /* Preload register-offset instructions:
 
      {pli/pld} [rn, rm {, shift}]
@@ -5406,13 +5403,30 @@ copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
   rm_val = displaced_read_reg (regs, dsc, rm);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
-
   dsc->u.preload.immed = 0;
 
-  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
-
   dsc->cleanup = &cleanup_preload;
+}
+
+static int
+arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
+		      struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn, 16, 19);
+  unsigned int rm = bits (insn, 0, 3);
+
 
+  if (!insn_references_pc (insn, 0x000f000ful))
+    return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
+			(unsigned long) insn);
+
+  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
+
+  install_preload_reg (gdbarch, regs, dsc, rn, rm);
   return 0;
 }
 
@@ -5440,7 +5454,7 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   ULONGEST rn_val;
 
   if (!insn_references_pc (insn, 0x000f0000ul))
-    return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
@@ -5503,28 +5517,39 @@ cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
 /* Copy B/BL/BLX instructions with immediate destinations.  */
 
 static int
-copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
-	       struct regcache *regs, struct displaced_step_closure *dsc)
+install_b_bl_blx (struct gdbarch *gdbarch, unsigned int cond, int exchange,
+		  int link, long offset, struct regcache *regs,
+		  struct displaced_step_closure *dsc)
+{
+  /* Implement "BL<cond> <label>" as:
+
+     Preparation: cond <- instruction condition
+     Insn: mov r0, r0  (nop)
+     Cleanup: if (condition true) { r14 <- pc; pc <- label }.
+
+     B<cond> similar, but don't set r14 in cleanup.  */
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = link;
+  dsc->u.branch.exchange = exchange;
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+static int
+arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
+		   struct regcache *regs, struct displaced_step_closure *dsc)
 {
   unsigned int cond = bits (insn, 28, 31);
   int exchange = (cond == 0xf);
   int link = exchange || bit (insn, 24);
-  CORE_ADDR from = dsc->insn_addr;
   long offset;
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
 			"%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
 			(unsigned long) insn);
-
-  /* Implement "BL<cond> <label>" as:
-
-     Preparation: cond <- instruction condition
-     Insn: mov r0, r0  (nop)
-     Cleanup: if (condition true) { r14 <- pc; pc <- label }.
-
-     B<cond> similar, but don't set r14 in cleanup.  */
-
   if (exchange)
     /* For BLX, set bit 0 of the destination.  The cleanup_branch function will
        then arrange the switch into Thumb mode.  */
@@ -5535,35 +5560,18 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   if (bit (offset, 25))
     offset = offset | ~0x3ffffff;
 
-  dsc->u.branch.cond = cond;
-  dsc->u.branch.link = link;
-  dsc->u.branch.exchange = exchange;
-  dsc->u.branch.dest = from + 8 + offset;
-
+  dsc->u.branch.dest = dsc->insn_addr + 8 + offset;
   dsc->modinsn[0] = ARM_NOP;
 
-  dsc->cleanup = &cleanup_branch;
-
-  return 0;
+  return install_b_bl_blx (gdbarch, cond, exchange, link, offset, regs, dsc);
 }
 
 /* Copy BX/BLX with register-specified destinations.  */
 
 static int
-copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
-		 struct regcache *regs, struct displaced_step_closure *dsc)
+install_bx_blx_reg (struct gdbarch *gdbarch, unsigned int rm,
+		    struct regcache *regs, struct displaced_step_closure *dsc)
 {
-  unsigned int cond = bits (insn, 28, 31);
-  /* BX:  x12xxx1x
-     BLX: x12xxx3x.  */
-  int link = bit (insn, 5);
-  unsigned int rm = bits (insn, 0, 3);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
-			"%.8lx\n", (link) ? "blx" : "bx",
-			(unsigned long) insn);
-
   /* Implement {BX,BLX}<cond> <reg>" as:
 
      Preparation: cond <- instruction condition
@@ -5573,18 +5581,34 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
      Don't set r14 in cleanup for BX.  */
 
   dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
-
-  dsc->u.branch.cond = cond;
-  dsc->u.branch.link = link;
   dsc->u.branch.exchange = 1;
 
-  dsc->modinsn[0] = ARM_NOP;
-
   dsc->cleanup = &cleanup_branch;
 
   return 0;
 }
 
+static int
+arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int cond = bits (insn, 28, 31);
+  /* BX:  x12xxx1x
+     BLX: x12xxx3x.  */
+  int link = bit (insn, 5);
+  unsigned int rm = bits (insn, 0, 3);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
+			(unsigned long) insn);
+
+  dsc->u.branch.link = link;
+  dsc->u.branch.cond = cond;
+  dsc->modinsn[0] = ARM_NOP;
+
+  return install_bx_blx_reg (gdbarch, rm, regs, dsc);
+}
+
 /* Copy/cleanup arithmetic/logic instruction with immediate RHS.  */
 
 static void
@@ -5608,7 +5632,7 @@ copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   ULONGEST rd_val, rn_val;
 
   if (!insn_references_pc (insn, 0x000ff000ul))
-    return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
@@ -5663,23 +5687,11 @@ cleanup_alu_reg (struct gdbarch *gdbarch,
 }
 
 static int
-copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-	      struct displaced_step_closure *dsc)
+install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
+		 struct displaced_step_closure *dsc)
 {
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  unsigned int rd = bits (insn, 12, 15);
-  unsigned int op = bits (insn, 21, 24);
-  int is_mov = (op == 0xd);
   ULONGEST rd_val, rn_val, rm_val;
 
-  if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
-			is_mov ? "move" : "ALU", (unsigned long) insn);
-
   /* Instruction is of form:
 
      <op><cond> rd, [rn,] rm [, <shift>]
@@ -5695,24 +5707,45 @@ copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
   dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
   dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
-  rd_val = displaced_read_reg (regs, dsc, rd);
-  rn_val = displaced_read_reg (regs, dsc, rn);
-  rm_val = displaced_read_reg (regs, dsc, rm);
+  rd_val = displaced_read_reg (regs, dsc, dsc->rd);
+  rn_val = displaced_read_reg (regs, dsc, dsc->u.alu_reg.rn);
+  rm_val = displaced_read_reg (regs, dsc, dsc->u.alu_reg.rm);
+
   displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
-  dsc->rd = rd;
-
-  if (is_mov)
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
-  else
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
 
   dsc->cleanup = &cleanup_alu_reg;
 
   return 0;
 }
 
+static int
+arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+		  struct displaced_step_closure *dsc)
+{
+  unsigned int op = bits (insn, 21, 24);
+  int is_mov = (op == 0xd);
+
+  dsc->u.alu_reg.rn = bits (insn, 16, 19); /* Rn */
+  dsc->u.alu_reg.rm = bits (insn, 0, 3); /* Rm */
+  dsc->rd = bits (insn, 12, 15); /* Rd */
+
+  if (!insn_references_pc (insn, 0x000ff00ful))
+    return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
+			is_mov ? "move" : "ALU", (unsigned long) insn);
+
+   if (is_mov)
+     dsc->modinsn[0] = ((insn & 0xfff00ff0) | 0x2);
+   else
+     dsc->modinsn[0] = ((insn & 0xfff00ff0) | 0x10002);
+
+   return install_alu_reg (gdbarch, regs, dsc);
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -5729,27 +5762,13 @@ cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
   displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
 }
 
-static int
-copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
+static void
+install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
 {
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  unsigned int rd = bits (insn, 12, 15);
-  unsigned int rs = bits (insn, 8, 11);
-  unsigned int op = bits (insn, 21, 24);
-  int is_mov = (op == 0xd), i;
+  int i;
   ULONGEST rd_val, rn_val, rm_val, rs_val;
 
-  if (!insn_references_pc (insn, 0x000fff0ful))
-    return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
-			"%.8lx\n", is_mov ? "move" : "ALU",
-			(unsigned long) insn);
-
   /* Instruction is of form:
 
      <op><cond> rd, [rn,] rm, <shift> rs
@@ -5767,22 +5786,45 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   for (i = 0; i < 4; i++)
     dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
 
-  rd_val = displaced_read_reg (regs, dsc, rd);
-  rn_val = displaced_read_reg (regs, dsc, rn);
-  rm_val = displaced_read_reg (regs, dsc, rm);
-  rs_val = displaced_read_reg (regs, dsc, rs);
+  rd_val = displaced_read_reg (regs, dsc, dsc->rd);
+  rn_val = displaced_read_reg (regs, dsc, dsc->u.alu_shifted_reg.rn);
+  rm_val = displaced_read_reg (regs, dsc, dsc->u.alu_shifted_reg.rm);
+  rs_val = displaced_read_reg (regs, dsc, dsc->u.alu_shifted_reg.rs);
   displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
-  dsc->rd = rd;
+
+  dsc->cleanup = &cleanup_alu_shifted_reg;
+}
+
+static int
+copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
+		      struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  unsigned int op = bits (insn, 21, 24);
+  int is_mov = (op == 0xd);
+
+  if (!insn_references_pc (insn, 0x000fff0ful))
+    return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
+			"%.8lx\n", is_mov ? "move" : "ALU",
+			(unsigned long) insn);
+
+  dsc->u.alu_shifted_reg.rn = bits (insn, 16, 19);
+  dsc->u.alu_shifted_reg.rm = bits (insn, 0, 3);
+  dsc->u.alu_shifted_reg.rs = bits (insn, 8, 11);
+  dsc->rd = bits (insn, 12, 15);
 
   if (is_mov)
     dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
   else
     dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
 
-  dsc->cleanup = &cleanup_alu_shifted_reg;
+  install_alu_shifted_reg (gdbarch, regs, dsc);
 
   return 0;
 }
@@ -5857,7 +5899,7 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
   ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
 
   if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
@@ -5916,26 +5958,13 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
 /* Copy byte/word loads and stores.  */
 
 static int
-copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
-			struct regcache *regs,
-			struct displaced_step_closure *dsc, int load, int byte,
-			int usermode)
+install_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
+			   struct displaced_step_closure *dsc, int load,
+			   int byte, int usermode, int writeback, int rm,
+			   int immed)
 {
-  int immed = !bit (insn, 25);
-  unsigned int rt = bits (insn, 12, 15);
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);  /* Only valid if !immed.  */
   ULONGEST rt_val, rn_val, rm_val = 0;
 
-  if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "load/store", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
-			load ? (byte ? "ldrb" : "ldr")
-			     : (byte ? "strb" : "str"), usermode ? "t" : "",
-			(unsigned long) insn);
-
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
   dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
   if (!immed)
@@ -5943,8 +5972,8 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
   if (!load)
     dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
 
-  rt_val = displaced_read_reg (regs, dsc, rt);
-  rn_val = displaced_read_reg (regs, dsc, rn);
+  rt_val = displaced_read_reg (regs, dsc, dsc->rd);
+  rn_val = displaced_read_reg (regs, dsc, dsc->u.ldst.rn);
   if (!immed)
     rm_val = displaced_read_reg (regs, dsc, rm);
 
@@ -5953,11 +5982,10 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
   if (!immed)
     displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
 
-  dsc->rd = rt;
   dsc->u.ldst.xfersize = byte ? 1 : 4;
-  dsc->u.ldst.rn = rn;
+
   dsc->u.ldst.immed = immed;
-  dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
+  dsc->u.ldst.writeback = writeback;
 
   /* To write PC we can do:
 
@@ -5980,6 +6008,40 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
      of this can be found in Section "Saving from r15" in
      http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
 
+  dsc->cleanup = load ? &cleanup_load : &cleanup_store;
+
+  return 0;
+}
+
+static int
+arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
+			    struct regcache *regs,
+			    struct displaced_step_closure *dsc,
+			    int load, int byte, int usermode)
+{
+  int immed = !bit (insn, 25);
+  unsigned int rt = bits (insn, 12, 15);
+  unsigned int rn = bits (insn, 16, 19);
+  unsigned int rm = bits (insn, 0, 3);  /* Only valid if !immed.  */
+
+  if (!insn_references_pc (insn, 0x000ff00ful))
+    return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.8lx\n",
+			load ? (byte ? "ldrb" : "ldr")
+			     : (byte ? "strb" : "str"), usermode ? "t" : "",
+			rt, rn,
+			(unsigned long) insn);
+
+  dsc->rd = rt;
+  dsc->u.ldst.rn = rn;
+
+  install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, byte, usermode,
+			     (bit (insn, 24) == 0 || bit (insn, 21) != 0),
+			     rm, immed);
+
   if (load || rt != ARM_PC_REGNUM)
     {
       dsc->u.ldst.restore_r4 = 0;
@@ -6244,13 +6306,13 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   /* Block transfers which don't mention PC can be run directly
      out-of-line.  */
   if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
-    return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
 
   if (rn == ARM_PC_REGNUM)
     {
       warning (_("displaced: Unpredictable LDM or STM with "
 		 "base register r15"));
-      return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
     }
 
   if (debug_displaced)
@@ -6271,7 +6333,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 
   if (load)
     {
-      if ((insn & 0xffff) == 0xffff)
+      if (dsc->u.block.regmask == 0xffff)
 	{
 	  /* LDM with a fully-populated register list.  This case is
 	     particularly tricky.  Implement for now by fully emulating the
@@ -6288,7 +6350,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	     rewriting the list of registers to be transferred into a
 	     contiguous chunk r0...rX before doing the transfer, then shuffling
 	     registers into the correct places in the cleanup routine.  */
-	  unsigned int regmask = insn & 0xffff;
+	  unsigned int regmask = dsc->u.block.regmask;
 	  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
 	  unsigned int to = 0, from = 0, i, new_rn;
 
@@ -6320,7 +6382,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
 				"{..., pc}: original reg list %.4x, modified "
 				"list %.4x\n"), rn, writeback ? "!" : "",
-				(int) insn & 0xffff, new_regmask);
+				(int) dsc->u.block.regmask, new_regmask);
 
 	  dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
 
@@ -6360,8 +6422,8 @@ cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
-copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
-	  struct regcache *regs, struct displaced_step_closure *dsc)
+arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
+	      struct regcache *regs, struct displaced_step_closure *dsc)
 {
   /* Allow OS-specific code to override SVC handling.  */
   if (dsc->u.svc.copy_svc_os)
@@ -6428,33 +6490,34 @@ decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
   unsigned int rn = bits (insn, 16, 19);
 
   if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
-    return copy_unmodified (gdbarch, insn, "cps", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
   else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
-    return copy_unmodified (gdbarch, insn, "setend", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
   else if ((op1 & 0x60) == 0x20)
-    return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
   else if ((op1 & 0x71) == 0x40)
-    return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
+				dsc);
   else if ((op1 & 0x77) == 0x41)
-    return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
   else if ((op1 & 0x77) == 0x45)
-    return copy_preload (gdbarch, insn, regs, dsc);  /* pli.  */
+    return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pli.  */
   else if ((op1 & 0x77) == 0x51)
     {
       if (rn != 0xf)
-	return copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
+	return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
       else
 	return copy_unpred (gdbarch, insn, dsc);
     }
   else if ((op1 & 0x77) == 0x55)
-    return copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
+    return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
   else if (op1 == 0x57)
     switch (op2)
       {
-      case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
-      case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
-      case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
-      case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
+      case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
+      case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
+      case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
+      case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
       default: return copy_unpred (gdbarch, insn, dsc);
       }
   else if ((op1 & 0x63) == 0x43)
@@ -6463,12 +6526,12 @@ decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
     switch (op1 & ~0x80)
       {
       case 0x61:
-	return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
       case 0x65:
-	return copy_preload_reg (gdbarch, insn, regs, dsc);  /* pli reg.  */
+	return arm_copy_preload_reg (gdbarch, insn, regs, dsc);  /* pli reg.  */
       case 0x71: case 0x75:
         /* pld/pldw reg.  */
-	return copy_preload_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
       case 0x63: case 0x67: case 0x73: case 0x77:
 	return copy_unpred (gdbarch, insn, dsc);
       default:
@@ -6489,13 +6552,13 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
   else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
     {
     case 0x0: case 0x2:
-      return copy_unmodified (gdbarch, insn, "srs", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
 
     case 0x1: case 0x3:
-      return copy_unmodified (gdbarch, insn, "rfe", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
 
     case 0x4: case 0x5: case 0x6: case 0x7:
-      return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+      return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
 
     case 0x8:
       switch ((insn & 0xe00000) >> 21)
@@ -6505,7 +6568,7 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 	  return copy_copro_load_store (gdbarch, insn, regs, dsc);
 
 	case 0x2:
-	  return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
+	  return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
 
 	default:
 	  return copy_undef (gdbarch, insn, dsc);
@@ -6522,7 +6585,7 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 			: copy_copro_load_store (gdbarch, insn, regs, dsc);
 
 	  case 0x2:
-	    return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
 
 	  case 0x4: case 0x5: case 0x6: case 0x7:
 	    /* ldc/ldc2 lit (undefined for rn != pc).  */
@@ -6535,7 +6598,7 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
       }
 
     case 0xa:
-      return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
 
     case 0xb:
       if (bits (insn, 16, 19) == 0xf)
@@ -6546,15 +6609,15 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 
     case 0xc:
       if (bit (insn, 4))
-	return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
 
     case 0xd:
       if (bit (insn, 4))
-	return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
 
     default:
       return copy_undef (gdbarch, insn, dsc);
@@ -6575,39 +6638,39 @@ decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
   switch (op2)
     {
     case 0x0:
-      return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
 
     case 0x1:
       if (op == 0x1)  /* bx.  */
-	return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
       else if (op == 0x3)
-	return copy_unmodified (gdbarch, insn, "clz", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
       else
 	return copy_undef (gdbarch, insn, dsc);
 
     case 0x2:
       if (op == 0x1)
         /* Not really supported.  */
-	return copy_unmodified (gdbarch, insn, "bxj", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
       else
 	return copy_undef (gdbarch, insn, dsc);
 
     case 0x3:
       if (op == 0x1)
-	return copy_bx_blx_reg (gdbarch, insn,
+	return arm_copy_bx_blx_reg (gdbarch, insn,
 				regs, dsc);  /* blx register.  */
       else
 	return copy_undef (gdbarch, insn, dsc);
 
     case 0x5:
-      return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
 
     case 0x7:
       if (op == 0x1)
-	return copy_unmodified (gdbarch, insn, "bkpt", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
       else if (op == 0x3)
         /* Not really supported.  */
-	return copy_unmodified (gdbarch, insn, "smc", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
 
     default:
       return copy_undef (gdbarch, insn, dsc);
@@ -6622,13 +6685,13 @@ decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
     switch (bits (insn, 20, 24))
       {
       case 0x10:
-	return copy_unmodified (gdbarch, insn, "movw", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
 
       case 0x14:
-	return copy_unmodified (gdbarch, insn, "movt", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
 
       case 0x12: case 0x16:
-	return copy_unmodified (gdbarch, insn, "msr imm", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
 
       default:
 	return copy_alu_imm (gdbarch, insn, regs, dsc);
@@ -6638,17 +6701,17 @@ decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
       uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
 
       if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
-	return copy_alu_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
 	return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
 	return decode_miscellaneous (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
-	return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
       else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
-	return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
       else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
-	return copy_unmodified (gdbarch, insn, "synch", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
       else if (op2 == 0xb || (op2 & 0xd) == 0xd)
 	/* 2nd arg means "unpriveleged".  */
 	return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
@@ -6670,28 +6733,28 @@ decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
 
   if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
       || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
   else if ((!a && (op1 & 0x17) == 0x02)
 	    || (a && (op1 & 0x17) == 0x02 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
   else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
 	    || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
   else if ((!a && (op1 & 0x17) == 0x03)
 	   || (a && (op1 & 0x17) == 0x03 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
   else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
 	    || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
   else if ((!a && (op1 & 0x17) == 0x06)
 	   || (a && (op1 & 0x17) == 0x06 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
   else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
 	   || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
   else if ((!a && (op1 & 0x17) == 0x07)
 	   || (a && (op1 & 0x17) == 0x07 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
 
   /* Should be unreachable.  */
   return 1;
@@ -6704,30 +6767,30 @@ decode_media (struct gdbarch *gdbarch, uint32_t insn,
   switch (bits (insn, 20, 24))
     {
     case 0x00: case 0x01: case 0x02: case 0x03:
-      return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
 
     case 0x04: case 0x05: case 0x06: case 0x07:
-      return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
 
     case 0x08: case 0x09: case 0x0a: case 0x0b:
     case 0x0c: case 0x0d: case 0x0e: case 0x0f:
-      return copy_unmodified (gdbarch, insn,
+      return arm_copy_unmodified (gdbarch, insn,
 			      "decode/pack/unpack/saturate/reverse", dsc);
 
     case 0x18:
       if (bits (insn, 5, 7) == 0)  /* op2.  */
 	 {
 	  if (bits (insn, 12, 15) == 0xf)
-	    return copy_unmodified (gdbarch, insn, "usad8", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
 	  else
-	    return copy_unmodified (gdbarch, insn, "usada8", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
 	}
       else
 	 return copy_undef (gdbarch, insn, dsc);
 
     case 0x1a: case 0x1b:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
-	return copy_unmodified (gdbarch, insn, "sbfx", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
       else
 	return copy_undef (gdbarch, insn, dsc);
 
@@ -6735,16 +6798,16 @@ decode_media (struct gdbarch *gdbarch, uint32_t insn,
       if (bits (insn, 5, 6) == 0x0)  /* op2[1:0].  */
 	 {
 	  if (bits (insn, 0, 3) == 0xf)
-	    return copy_unmodified (gdbarch, insn, "bfc", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
 	  else
-	    return copy_unmodified (gdbarch, insn, "bfi", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
 	}
       else
 	return copy_undef (gdbarch, insn, dsc);
 
     case 0x1e: case 0x1f:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
-	return copy_unmodified (gdbarch, insn, "ubfx", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
       else
 	return copy_undef (gdbarch, insn, dsc);
     }
@@ -6758,7 +6821,7 @@ decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
 		    struct regcache *regs, struct displaced_step_closure *dsc)
 {
   if (bit (insn, 25))
-    return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+    return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
   else
     return copy_block_xfer (gdbarch, insn, regs, dsc);
 }
@@ -6773,15 +6836,15 @@ decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
   switch (opcode)
     {
     case 0x04: case 0x05:  /* VFP/Neon mrrc/mcrr.  */
-      return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
 
     case 0x08: case 0x0a: case 0x0c: case 0x0e:
     case 0x12: case 0x16:
-      return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
 
     case 0x09: case 0x0b: case 0x0d: case 0x0f:
     case 0x13: case 0x17:
-      return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
 
     case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
     case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
@@ -6816,26 +6879,26 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
   else if ((op1 & 0x3e) == 0x00)
     return copy_undef (gdbarch, insn, dsc);
   else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
-    return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
   else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
   else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
   else if ((op1 & 0x30) == 0x20 && !op)
     {
       if ((coproc & 0xe) == 0xa)
-	return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
     }
   else if ((op1 & 0x30) == 0x20 && op)
-    return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
   else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
   else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
   else if ((op1 & 0x30) == 0x30)
-    return copy_svc (gdbarch, insn, to, regs, dsc);
+    return arm_copy_svc (gdbarch, insn, to, regs, dsc);
   else
     return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
diff --git a/gdb/arm-tdep.h b/gdb/arm-tdep.h
index ebd5e6e..3b1fce9 100644
--- a/gdb/arm-tdep.h
+++ b/gdb/arm-tdep.h
@@ -250,6 +250,20 @@ struct displaced_step_closure
 
     struct
     {
+      unsigned int rn;
+      unsigned int rm;
+      unsigned int rd;
+    } alu_reg;
+
+    struct
+    {
+      unsigned int rn;
+      unsigned int rm;
+      unsigned int rs;
+    } alu_shifted_reg;
+
+    struct
+    {
       unsigned int immed : 1;
     } preload;
 
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 3/8] Refactor copy_svc_os
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
  2011-03-24 13:56   ` [try 2nd 1/8] Fix cleanup_branch to take Thumb into account Yao Qi
  2011-03-24 13:58   ` [try 2nd 2/8] Rename copy_* functions to arm_copy_* Yao Qi
@ 2011-03-24 14:01   ` Yao Qi
  2011-04-06 20:55     ` Ulrich Weigand
  2011-03-24 14:05   ` [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn Yao Qi
                     ` (4 subsequent siblings)
  7 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-03-24 14:01 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 467 bytes --]

copy_svc is a little bit different from other copy helpers, OS stuff is
involved.  arm_linux_copy_svc itself has nothing to do with ARM/Thumb
mode.  It should work well under two modes.  However, one of the
parameters INSN make it hard to be called from both modes.  Fortunately,
INSN is *not* used inside arm_linux_copy_svc except printing debug
message.  We can remove parameter INSN and TO, so it can be called from
two modes respectively.

-- 
Yao (齐尧)

[-- Attachment #2: 0003-refactor-copy_svc_os-to-handle-multiple-modes.patch --]
[-- Type: text/x-patch, Size: 4343 bytes --]

2011-03-24  Yao Qi  <yao@codesourcery.com>

	* gdb/arm-linux-tdep.c (arm_linux_copy_svc): Remove parameters INSN
	and TO.
	* gdb/arm-tdep.c (cleanup_svc): Handle variable instruction size.
	(arm_copy_svc): Remove parameters INSN and TO.
	(decode_svc_copro): Update caller.
	* gdb/arm-tdep.h (struct displaced_step_closure): Remove parameters
	from function pointer `copy_svc_os'.

---
 gdb/arm-linux-tdep.c |   10 ++--------
 gdb/arm-tdep.c       |   20 ++++++++++++--------
 gdb/arm-tdep.h       |    3 +--
 3 files changed, 15 insertions(+), 18 deletions(-)

diff --git a/gdb/arm-linux-tdep.c b/gdb/arm-linux-tdep.c
index 2f3109c..e44ba25 100644
--- a/gdb/arm-linux-tdep.c
+++ b/gdb/arm-linux-tdep.c
@@ -798,19 +798,14 @@ arm_linux_cleanup_svc (struct gdbarch *gdbarch,
 }
 
 static int
-arm_linux_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
-		    struct regcache *regs, struct displaced_step_closure *dsc)
+arm_linux_copy_svc (struct gdbarch *gdbarch, struct regcache *regs,
+		    struct displaced_step_closure *dsc)
 {
   CORE_ADDR return_to = 0;
 
   struct frame_info *frame;
   unsigned int svc_number = displaced_read_reg (regs, dsc, 7);
   int is_sigreturn = 0;
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying Linux svc insn %.8lx\n",
-			(unsigned long) insn);
-
   frame = get_current_frame ();
 
   is_sigreturn = arm_linux_sigreturn_return_addr(frame, svc_number,
@@ -864,7 +859,6 @@ arm_linux_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
      Cleanup: if pc lands in scratch space, pc <- insn_addr + 4
               else leave pc alone.  */
 
-  dsc->modinsn[0] = insn;
 
   dsc->cleanup = &arm_linux_cleanup_svc;
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index af81b1e..3348dcb 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -6412,7 +6412,7 @@ static void
 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
 	     struct displaced_step_closure *dsc)
 {
-  CORE_ADDR resume_addr = dsc->insn_addr + 4;
+  CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
@@ -6422,12 +6422,9 @@ cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
-arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
+arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
 	      struct regcache *regs, struct displaced_step_closure *dsc)
 {
-  /* Allow OS-specific code to override SVC handling.  */
-  if (dsc->u.svc.copy_svc_os)
-    return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
@@ -6439,12 +6436,19 @@ arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 
   dsc->modinsn[0] = insn;
 
-  dsc->cleanup = &cleanup_svc;
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
      instruction.  */
   dsc->wrote_to_pc = 1;
 
-  return 0;
+  /* Allow OS-specific code to override SVC handling.  */
+  if (dsc->u.svc.copy_svc_os)
+    return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
+  else
+    {
+      dsc->cleanup = &cleanup_svc;
+      return 0;
+    }
+
 }
 
 /* Copy undefined instructions.  */
@@ -6898,7 +6902,7 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
   else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
     return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
   else if ((op1 & 0x30) == 0x30)
-    return arm_copy_svc (gdbarch, insn, to, regs, dsc);
+    return arm_copy_svc (gdbarch, insn, regs, dsc);
   else
     return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
diff --git a/gdb/arm-tdep.h b/gdb/arm-tdep.h
index 3b1fce9..3f5e7d6 100644
--- a/gdb/arm-tdep.h
+++ b/gdb/arm-tdep.h
@@ -271,8 +271,7 @@ struct displaced_step_closure
     {
       /* If non-NULL, override generic SVC handling (e.g. for a particular
          OS).  */
-      int (*copy_svc_os) (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
-			  struct regcache *regs,
+      int (*copy_svc_os) (struct gdbarch *gdbarch, struct regcache *regs,
 			  struct displaced_step_closure *dsc);
     } svc;
   } u;
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
                     ` (3 preceding siblings ...)
  2011-03-24 14:05   ` [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn Yao Qi
@ 2011-03-24 14:05   ` Yao Qi
  2011-05-05 13:25     ` Yao Qi
  2011-03-24 14:06   ` [try 2nd 6/8] Rename some functions to arm_* Yao Qi
                     ` (2 subsequent siblings)
  7 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-03-24 14:05 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 74 bytes --]

Displaced stepping for 32-bit Thumb instructions.

-- 
Yao (齐尧)

[-- Attachment #2: 0005-thumb-32bit.patch --]
[-- Type: text/x-patch, Size: 24236 bytes --]

2011-03-24  Yao Qi  <yao@codesourcery.com>

	* gdb/arm-tdep.c (thumb_copy_unmodified_32bit): New.
	(thumb2_copy_preload): New.
	(thumb2_copy_preload_reg): New.
	(thumb2_copy_copro_load_store): New.
	(thumb2_copy_b_bl_blx): New.
	(thumb2_copy_alu_reg): New.
	(thumb2_copy_ldr_str_ldrb_strb): New.
	(thumb2_copy_block_xfer): New.
	(thumb_32bit_copy_undef): New.
	(thumb2_decode_ext_reg_ld_st): New.
	(thumb2_decode_svc_copro): New.
	(thumb_decode_pc_relative_32bit): New.
	(decode_thumb_32bit_ld_mem_hints): New.
	(thumb_process_displaced_32bit_insn): Process 32-bit Thumb insn.
---
 gdb/arm-tdep.c |  701 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 695 insertions(+), 6 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index a356451..6ba7b5b 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5333,6 +5333,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, unsigned int insn1,
+			     unsigned int insn2, const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
+			"opcode/class '%s' unmodified\n", insn1, insn2,
+			iname);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
    modification.  */
 static int
@@ -5400,6 +5417,27 @@ arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+  if (rn == ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
+			insn1, insn2);
+
+  dsc->modinsn[0] = insn1 & 0xfff0;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  install_preload (gdbarch, regs, dsc, rn);
+
+  return 0;
+}
+
 /* Preload instructions with register offset.  */
 
 static void
@@ -5448,6 +5486,31 @@ arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_preload_reg (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);
+
+
+  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload reg",
+					dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
+			insn1, insn1);
+
+  dsc->modinsn[0] = insn1 & 0xfff0;
+  dsc->modinsn[1] = (insn2 & 0xfff0) | 0x1;
+  dsc->numinsns = 2;
+
+  install_preload_reg (gdbarch, regs, dsc, rn, rm);
+  return 0;
+}
+
 /* Copy/cleanup coprocessor load and store instructions.  */
 
 static void
@@ -5500,6 +5563,33 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
+			      uint16_t insn2, struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (rn == ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					"copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.4x%.4x\n", insn1, insn2);
+
+  dsc->u.ldst.writeback = bit (insn1, 9);
+  dsc->u.ldst.rn = rn;
+
+  dsc->modinsn[0] = insn1 & 0xfff0;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  install_copy_copro_load_store (gdbarch, regs, dsc);
+
+  return 0;
+}
+
 /* Clean up branch instructions (actually perform the branch, by setting
    PC).  */
 
@@ -5584,6 +5674,58 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return install_b_bl_blx (gdbarch, cond, exchange, link, offset, regs, dsc);
 }
 
+static int
+thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, unsigned short insn1,
+		      unsigned short insn2, struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  int link = bit (insn2, 14);
+  int exchange = link && !bit (insn2, 12);
+  int cond = INST_AL;
+  long offset =0;
+  int j1 = bit (insn2, 13);
+  int j2 = bit (insn2, 11);
+  int s = sbits (insn1, 10, 10);
+  int i1 = !(j1 ^ bit (insn1, 10));
+  int i2 = !(j2 ^ bit (insn1, 10));
+
+  if (!link && !exchange) /* B */
+    {
+      cond = bits (insn1, 6, 9);
+      offset = (bits (insn2, 0, 10) << 1);
+      if (bit (insn2, 12)) /* Encoding T4 */
+	{
+	  offset |= (bits (insn1, 0, 9) << 12)
+	    | (i2 << 22)
+	    | (i1 << 23)
+	    | (s << 24);
+	}
+      else /* Encoding T3 */
+	offset |= (bits (insn1, 0, 5) << 12)
+	  | (j1 << 18)
+	  | (j2 << 19)
+	  | (s << 20);
+    }
+  else
+    {
+      offset = (bits (insn1, 0, 9) << 12);
+      offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
+      offset |= exchange ?
+	(bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
+			"%.4x %.4x with offset %.8lx\n",
+			(exchange) ? "blx" : "bl",
+			insn1, insn2, offset);
+
+  dsc->u.branch.dest = dsc->insn_addr + 4 + offset;
+  dsc->modinsn[0] = THUMB_NOP;
+
+  return install_b_bl_blx (gdbarch, cond, exchange, 1, offset, regs, dsc);
+}
+
 /* Copy B Thumb instructions.  */
 static int
 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
@@ -5849,6 +5991,40 @@ thumb_copy_alu_reg (struct gdbarch *gdbarch, unsigned short insn,
   return install_alu_reg (gdbarch, regs, dsc);
 }
 
+static int
+thumb2_copy_alu_reg (struct gdbarch *gdbarch, unsigned short insn1,
+		     unsigned short insn2, struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  unsigned int op2 = bits (insn2, 4, 7);
+  int is_mov = (op2 == 0x0);
+
+  dsc->u.alu_reg.rn = bits (insn1, 0, 3); /* Rn */
+  dsc->u.alu_reg.rm = bits (insn2, 0, 3); /* Rm */
+  dsc->rd = bits (insn2, 8, 11); /* Rd */
+
+  /* In Thumb-2, rn, rm and rd can't be r15.  */
+  if (dsc->u.alu_reg.rn != ARM_PC_REGNUM
+      && dsc->u.alu_reg.rm != ARM_PC_REGNUM
+      && dsc->rd != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
+			"ALU", insn1, insn2);
+
+  if (is_mov)
+    dsc->modinsn[0] = insn1;
+  else
+    dsc->modinsn[0] = ((insn1 & 0xfff0) | 0x1);
+
+  dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x2);
+  dsc->numinsns = 2;
+
+  return install_alu_reg (gdbarch, regs, dsc);
+
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -6117,6 +6293,69 @@ install_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
+thumb2_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, unsigned short insn1,
+			       unsigned short insn2,  struct regcache *regs,
+			       struct displaced_step_closure *dsc,
+			       int load, int byte, int usermode, int writeback)
+{
+  int immed = !bit (insn1, 9);
+  unsigned int rt = bits (insn2, 12, 15);
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);  /* Only valid if !immed.  */
+
+  if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load/store",
+					dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.4x%.4x\n",
+			load ? (byte ? "ldrb" : "ldr")
+			     : (byte ? "strb" : "str"), usermode ? "t" : "",
+			rt, rn, insn1, insn2);
+
+  dsc->rd = rt;
+  dsc->u.ldst.rn = rn;
+
+  install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, byte, usermode,
+			  writeback, rm, immed);
+
+  if (load || rt != ARM_PC_REGNUM)
+    {
+      dsc->u.ldst.restore_r4 = 0;
+
+      if (immed)
+	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
+	{
+	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+	  dsc->modinsn[1] = insn2 & 0x0fff;
+	}
+      else
+	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, r3].  */
+	{
+	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+	  dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
+	}
+
+      dsc->numinsns = 2;
+    }
+  else
+    {
+      /* In Thumb-32 instructions, the behavior is unpredictable when Rt is
+	 PC, while the behavior is undefined when Rn is PC.  Shortly, neither
+	 Rt nor Rn can be PC.  */
+
+      gdb_assert (0);
+    }
+
+  return 0;
+}
+
+static int
 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 			    struct regcache *regs,
 			    struct displaced_step_closure *dsc,
@@ -6508,6 +6747,87 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+			struct regcache *regs,
+			struct displaced_step_closure *dsc)
+{
+  int rn = bits (insn1, 0, 3);
+  int load = bit (insn1, 4);
+  int writeback = bit (insn1, 5);
+
+  /* Block transfers which don't mention PC can be run directly
+     out-of-line.  */
+  if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
+
+  if (rn == ARM_PC_REGNUM)
+    {
+      warning (_("displaced: Unpredictable LDM or STM with "
+		 "base register r15"));
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "unpredictable ldm/stm", dsc);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
+			"%.4x%.4x\n", insn1, insn2);
+
+  /* Clear bit 13, since it should be always zero.  */
+  dsc->u.block.regmask = (insn2 & 0xdfff);
+  dsc->u.block.rn = rn;
+
+  dsc->u.block.load = bit (insn1, 4);
+  dsc->u.block.user = bit (insn1, 6);
+  dsc->u.block.increment = bit (insn1, 7);
+  dsc->u.block.before = bit (insn1, 8);
+  dsc->u.block.writeback = writeback;
+  dsc->u.block.cond = INST_AL;
+
+  if (load)
+    {
+      if (dsc->u.block.regmask == 0xffff)
+	{
+	  /* This branch is impossible to happen.  */
+	  gdb_assert (0);
+	}
+      else
+	{
+	  unsigned int regmask = dsc->u.block.regmask;
+	  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
+	  unsigned int to = 0, from = 0, i, new_rn;
+
+	  for (i = 0; i < num_in_list; i++)
+	    dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
+
+	  if (writeback)
+	    insn1 &= ~(1 << 5);
+
+	  new_regmask = (1 << num_in_list) - 1;
+
+	  if (debug_displaced)
+	    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
+				"{..., pc}: original reg list %.4x, modified "
+				"list %.4x\n"), rn, writeback ? "!" : "",
+				(int) dsc->u.block.regmask, new_regmask);
+
+	  dsc->modinsn[0] = insn1;
+	  dsc->modinsn[1] = (new_regmask & 0xffff);
+	  dsc->numinsns = 2;
+
+	  dsc->cleanup = &cleanup_block_load_pc;
+	}
+    }
+  else
+    {
+      dsc->modinsn[0] = insn1;
+      dsc->modinsn[1] = insn2;
+      dsc->numinsns = 2;
+      dsc->cleanup = &cleanup_block_store_pc;
+    }
+  return 0;
+}
+
 /* Cleanup/copy SVC (SWI) instructions.  These two functions are overridden
    for Linux, where some SVC instructions must be treated specially.  */
 
@@ -6599,6 +6919,23 @@ copy_undef (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+                       struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
+                       "%.4x %.4x\n", (unsigned short) insn1,
+                       (unsigned short) insn2);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy unpredictable instructions.  */
 
 static int
@@ -6993,6 +7330,43 @@ decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
   return 1;
 }
 
+/* Decode extension register load/store.  Exactly the same as
+   arm_decode_ext_reg_ld_st.  */
+
+static int
+thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
+			     uint16_t insn2,  struct regcache *regs,
+			     struct displaced_step_closure *dsc)
+{
+  unsigned int opcode = bits (insn1, 4, 8);
+
+  switch (opcode)
+    {
+    case 0x04: case 0x05:
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vmov", dsc);
+
+    case 0x08: case 0x0c: /* 01x00 */
+    case 0x0a: case 0x0e: /* 01x10 */
+    case 0x12: case 0x16: /* 10x10 */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vstm/vpush", dsc);
+
+    case 0x09: case 0x0d: /* 01x01 */
+    case 0x0b: case 0x0f: /* 01x11 */
+    case 0x13: case 0x17: /* 10x11 */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vldm/vpop", dsc);
+
+    case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
+    case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
+      return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
+    }
+
+  /* Should be unreachable.  */
+  return 1;
+}
+
 static int
 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 		  struct regcache *regs, struct displaced_step_closure *dsc)
@@ -7040,7 +7414,105 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 }
 
 static int
-copy_pc_relative (struct regcache *regs, struct displaced_step_closure *dsc,
+thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  unsigned int coproc = bits (insn2, 8, 11);
+  unsigned int op1 = bits (insn1, 4, 9);
+  unsigned int bit_5_8 = bits (insn1, 5, 8);
+  unsigned int bit_9 = bit (insn1, 9);
+  unsigned int bit_4 = bit (insn1, 4);
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (bit_9 == 0)
+    {
+      if (bit_5_8 == 2)
+	{
+	  if ((coproc & 0xe) == 0xa) /* 64-bit xfer.  */
+	    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						"neon 64bit xfer", dsc);
+	  else
+	    {
+	      if (bit_4) /* MRRC/MRRC2 */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "mrrc/mrrc2", dsc);
+	      else /* MCRR/MCRR2 */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "mcrr/mcrr2", dsc);
+	    }
+	}
+      else if (bit_5_8 == 0) /* UNDEFINED.  */
+	return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+      else
+	{
+	   /*coproc is 101x.  SIMD/VFP, ext registers load/store.  */
+	  if ((coproc & 0xe) == 0xa)
+	    return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
+						dsc);
+	  else /* coproc is not 101x.  */
+	    {
+	      if (bit_4 == 0) /* STC/STC2.  */
+		return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+						     regs, dsc);
+	      else
+		{
+		  if (rn == 0xf) /* LDC/LDC2 literal.  */
+		    return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+							 regs, dsc);
+		  else /* LDC/LDC2 immeidate.  */
+		    return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+							 regs, dsc);
+		}
+	    }
+	}
+    }
+  else
+    {
+      unsigned int op = bit (insn2, 4);
+      unsigned int bit_8 = bit (insn1, 8);
+
+      if (bit_8) /* Advanced SIMD */
+	return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					    "neon", dsc);
+      else
+	{
+	  /*coproc is 101x.  */
+	  if ((coproc & 0xe) == 0xa)
+	    {
+	      if (op) /* 8,16,32-bit xfer.  */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "neon 8/16/32 bit xfer",
+						    dsc);
+	      else /* VFP data processing.  */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "vfp dataproc", dsc);
+	    }
+	  else
+	    {
+	      if (op)
+		{
+		  if (bit_4) /* MRC/MRC2 */
+		    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+							"mrc/mrc2", dsc);
+		  else /* MCR/MCR2 */
+		     return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+							"mcr/mcr2", dsc);
+		}
+	      else /* CDP/CDP 2 */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "cdp/cdp2", dsc);
+	    }
+	}
+    }
+
+
+
+  return 0;
+}
+
+static int
+decode_pc_relative (struct regcache *regs, struct displaced_step_closure *dsc,
 		  int rd, unsigned int imm, int is_32bit)
 {
   int val;
@@ -7086,7 +7558,27 @@ thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, unsigned short insn,
 			"displaced: copying thumb adr r%d, #%d insn %.4x\n",
 			rd, imm8, insn);
 
-  return copy_pc_relative (regs, dsc, rd, imm8, 0);
+  return decode_pc_relative (regs, dsc, rd, imm8, 0);
+}
+
+static int
+thumb_decode_pc_relative_32bit (struct gdbarch *gdbarch, unsigned short insn1,
+				unsigned short insn2, struct regcache *regs,
+				struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn2, 8, 11);
+  /* Since immeidate has the same encoding in both ADR and ADDS, so we simply
+     extract raw immediate encoding rather than computing immediate.  When
+     generating ADDS instruction, we can simply perform OR operation to set
+     immediate into ADDS.  */
+  unsigned int imm = (insn2 & 0x70ff) | (bit (insn1, 10) << 26);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x%.4x\n",
+			rd, imm, insn1, insn2);
+
+  return decode_pc_relative (regs, dsc, rd, imm, 1);
 }
 
 static int
@@ -7348,12 +7840,209 @@ thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch,
 		    _("thumb_process_displaced_insn: Instruction decode error"));
 }
 
+static int
+decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
+				 unsigned short insn1, unsigned short insn2,
+				 struct regcache *regs,
+				 struct displaced_step_closure *dsc)
+{
+  int rd = bits (insn2, 12, 15);
+  int user_mode = (bits (insn2, 8, 11) == 0xe);
+  int err = 0;
+  int writeback = 0;
+
+  switch (bits (insn1, 5, 6))
+    {
+    case 0: /* Load byte and memory hints */
+      if (rd == 0xf) /* PLD/PLI */
+	{
+	  if (bits (insn2, 6, 11))
+	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
+	}
+      else
+	{
+	  int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	  return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
+					       dsc, 1, 1, user_mode, writeback);
+	}
+
+      break;
+    case 1: /* Load halfword and memory hints */
+      if (rd == 0xf) /* PLD{W} and Unalloc memory hint */
+	{
+	  if (bits (insn2, 6, 11))
+	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
+	}
+      else
+	{
+	  int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+	  return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
+					       dsc, 1, 0, user_mode, writeback);
+	}
+      break;
+    case 2: /* Load word */
+      {
+	int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs, dsc,
+					     1, 0, user_mode, writeback);
+	break;
+      }
+    default:
+      return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+      break;
+    }
+  return 0;
+}
+
 static void
 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
 				    uint16_t insn2, struct regcache *regs,
 				    struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+  int err = 0;
+  unsigned short op = bit (insn2, 15);
+  unsigned int op1 = bits (insn1, 11, 12);
+
+  switch (op1)
+    {
+    case 1:
+      {
+	switch (bits (insn1, 9, 10))
+	  {
+	  case 0: /* load/store multiple */
+	    switch (bits (insn1, 7, 8))
+	      {
+	      case 0: case 3: /* SRS, RFE */
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "srs/rfe", dsc);
+		break;
+	      case 1: case 2: /* LDM/STM/PUSH/POP */
+		/* These Thumb 32-bit insns have the same encodings as ARM
+		   counterparts.  */
+		err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
+	      }
+	    break;
+	  case 1:
+	    /* Data-processing (shift register).  In ARM archtecture reference
+	       manual, this entry is
+	       "Data-processing (shifted register) on page A6-31".  However,
+	    instructions in table A6-31 shows that they are `alu_reg'
+	    instructions.  There is no alu_shifted_reg instructions in
+	    Thumb-2.  */
+	    err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs,
+					       dsc);
+	    break;
+	  default: /* Coprocessor instructions */
+	    /* Thumb 32bit coprocessor instructions have the same encoding
+	       as ARM's.  */
+	    err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	    break;
+	  }
+      break;
+      }
+    case 2: /* op1 = 2 */
+      if (op) /* Branch and misc control.  */
+	{
+	  if (bit (insn2, 14)) /* BLX/BL */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else if (!bits (insn2, 12, 14) && bits (insn1, 8, 10) != 0x7)
+	    /* Conditional Branch */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					       "misc ctrl", dsc);
+	}
+      else
+	{
+	  if (bit (insn1, 9)) /* Data processing (plain binary imm) */
+	    {
+	      int op = bits (insn1, 4, 8);
+	      int rn = bits (insn1, 0, 4);
+	      if ((op == 0 || op == 0xa) && rn == 0xf)
+		err = thumb_decode_pc_relative_32bit (gdbarch, insn1, insn2,
+						      regs, dsc);
+	      else
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "dp/pb", dsc);
+	    }
+	  else /* Data processing (modified immeidate) */
+	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					       "dp/mi", dsc);
+	}
+      break;
+    case 3: /* op1 = 3 */
+      switch (bits (insn1, 9, 10))
+	{
+	case 0:
+	  if (bit (insn1, 4))
+	    err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
+						   regs, dsc);
+	  else
+	    {
+	      if (bit (insn1, 8)) /* NEON Load/Store */
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "neon elt/struct load/store",
+						   dsc);
+	      else /* Store single data item */
+		{
+		  int user_mode = (bits (insn2, 8, 11) == 0xe);
+		  int byte = (bits (insn1, 5, 7) == 0
+			      || bits (insn1, 5, 7) == 4);
+		  int writeback = 0;
+
+		  if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
+		    writeback = bit (insn2, 8);
+
+		  err = thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2,
+						       regs, dsc, 0, byte,
+						       user_mode, writeback);
+		}
+	    }
+	  break;
+	case 1: /* op1 = 3, bits (9, 10) == 1 */
+	  switch (bits (insn1, 7, 8))
+	    {
+	    case 0: case 1: /* Data processing (register) */
+	      err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs, dsc);
+	      break;
+	    case 2: /* Multiply and absolute difference */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "mul/mua/diff", dsc);
+	      break;
+	    case 3: /* Long multiply and divide */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "lmul/lmua", dsc);
+	      break;
+	    }
+	  break;
+	default: /* Coprocessor instructions */
+	  err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	  break;
+	}
+      break;
+    default:
+      err = 1;
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_insn: Instruction decode error"));
+
 }
 
 static void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
                     ` (2 preceding siblings ...)
  2011-03-24 14:01   ` [try 2nd 3/8] Refactor copy_svc_os Yao Qi
@ 2011-03-24 14:05   ` Yao Qi
  2011-05-05 13:24     ` Yao Qi
  2011-03-24 14:05   ` [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns Yao Qi
                     ` (3 subsequent siblings)
  7 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-03-24 14:05 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 67 bytes --]

Subject tells everything about this patch.

-- 
Yao (齐尧)

[-- Attachment #2: 0004-displaced-stepping-for-16-bit-thumb-instructions.patch --]
[-- Type: text/x-patch, Size: 16379 bytes --]

2011-03-24  Yao Qi  <yao@codesourcery.com>

	* gdb/arm-tdep.c (THUMB_NOP): New macro.
	(thumb_copy_unmodified_16bit): New.
	(thumb_copy_b): New.
	(thumb_copy_bx_blx_reg): New.
	(thumb_copy_alu_reg): New.
	(thumb_copy_svc): New.
	(copy_pc_relative): New.
	(thumb_decode_pc_relative_16bit): New.
	(thumb_copy_16bit_ldr_literal): New.
	(thumb_copy_cbnz_cbz): New.
	(cleanup_pop_pc_16bit): New.
	(thumb_copy_pop_pc_16bit): New.
	(thumb_process_displaced_16bit_insn): New.
	(thumb_process_displaced_32bit_insn): New.

From baed8f464a827a5266b21148c3e674c92d65659f Mon Sep 17 00:00:00 2001
From: Yao Qi <yao@codesourcery.com>
Date: Wed, 16 Mar 2011 10:23:37 +0800
Subject: [PATCH 4/9] displaced stepping for 16-bit thumb instructions
 add thumb_copy_alu_reg
 thumb 16bit copy svc
 update thumb_alu_reg

---
 gdb/arm-tdep.c |  468 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 467 insertions(+), 1 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 3348dcb..a356451 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5110,6 +5110,7 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
+#define THUMB_NOP 0x4600
 
 /* Helper for register reads for displaced stepping.  In particular, this
    returns the PC as it would be seen by the instruction at its original
@@ -5332,6 +5333,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+/* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
+   modification.  */
+static int
+thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
+			     const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
+			"opcode/class '%s' unmodified\n", insn,
+			iname);
+
+  dsc->modinsn[0] = insn;
+
+  return 0;
+}
+
 /* Preload instructions with immediate offset.  */
 
 static void
@@ -5566,6 +5584,44 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return install_b_bl_blx (gdbarch, cond, exchange, link, offset, regs, dsc);
 }
 
+/* Copy B Thumb instructions.  */
+static int
+thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
+	      struct displaced_step_closure *dsc)
+{
+  unsigned int cond = 0;
+  int offset = 0;
+  unsigned short bit_12_15 = bits (insn, 12, 15);
+  CORE_ADDR from = dsc->insn_addr;
+
+  if (bit_12_15 == 0xd)
+    {
+      offset = sbits (insn, 0, 7);
+      cond = bits (insn, 8, 11);
+    }
+  else if (bit_12_15 == 0xe)
+    {
+       offset = sbits (insn, 0, 10);
+       cond = INST_AL;
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying b immediate insn %.4x "
+			"with offset %d\n", insn, offset);
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+  dsc->u.branch.dest = from + 4 + offset;
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+
 /* Copy BX/BLX with register-specified destinations.  */
 
 static int
@@ -5609,6 +5665,28 @@ arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
   return install_bx_blx_reg (gdbarch, rm, regs, dsc);
 }
 
+static int
+thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
+		       struct regcache *regs,
+		       struct displaced_step_closure *dsc)
+{
+  int link = bit (insn, 7);
+  unsigned int rm = bits (insn, 3, 6);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
+			(unsigned short) insn);
+
+  dsc->u.branch.link = link;
+  /* Always true for thumb.  */
+  dsc->u.branch.cond = INST_AL;
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  return install_bx_blx_reg (gdbarch, rm, regs, dsc);
+}
+
+
 /* Copy/cleanup arithmetic/logic instruction with immediate RHS.  */
 
 static void
@@ -5746,6 +5824,31 @@ arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
    return install_alu_reg (gdbarch, regs, dsc);
 }
 
+static int
+thumb_copy_alu_reg (struct gdbarch *gdbarch, unsigned short insn,
+		    struct regcache *regs,
+		    struct displaced_step_closure *dsc)
+{
+  CORE_ADDR from = dsc->insn_addr;
+
+  dsc->u.alu_reg.rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
+  dsc->rd = bits (insn, 3, 6);
+  dsc->u.alu_reg.rm = 2;
+
+  if (dsc->rd != ARM_PC_REGNUM
+      && dsc->u.alu_reg.rm != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_16bit(gdbarch, insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
+			"ALU", (unsigned short) insn);
+
+  dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
+
+
+  return install_alu_reg (gdbarch, regs, dsc);
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -6451,6 +6554,35 @@ arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
 
 }
 
+static int
+thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
+		struct regcache *regs, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
+			insn);
+
+  /* Preparation: none.
+     Insn: unmodified svc.
+     Cleanup: pc <- insn_addr + insn_size.  */
+
+  dsc->modinsn[0] = insn;
+
+  /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
+     instruction.  */
+  dsc->wrote_to_pc = 1;
+
+  /* Allow OS-specific code to override SVC handling.  */
+  if (dsc->u.svc.copy_svc_os)
+    return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
+  else
+    {
+      dsc->cleanup = &cleanup_svc;
+      return 0;
+    }
+}
+
 /* Copy undefined instructions.  */
 
 static int
@@ -6907,12 +7039,346 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
     return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
 
+static int
+copy_pc_relative (struct regcache *regs, struct displaced_step_closure *dsc,
+		  int rd, unsigned int imm, int is_32bit)
+{
+  int val;
+
+    /* ADR Rd, #imm
+
+     Rewrite as:
+
+     Preparation: Rd <- PC
+     Insn: ADD Rd, #imm
+     Cleanup: Null.
+   */
+
+  /* Rd <- PC */
+  val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+  displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
+
+  if (is_32bit)
+    {
+      /* Encoding T3: ADDS Rd, Rd, #imm */
+      dsc->modinsn[0] = (0xf100 | rd);
+      dsc->modinsn[1] = (0x0 | (rd << 8) | imm);
+
+      dsc->numinsns = 2;
+    }
+  else
+    /* Encoding T2: ADDS Rd, #imm */
+    dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
+
+  return 0;
+}
+
+static int
+thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, unsigned short insn,
+				struct regcache *regs,
+				struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn, 8, 10);
+  unsigned int imm8 = bits (insn, 0, 7);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x\n",
+			rd, imm8, insn);
+
+  return copy_pc_relative (regs, dsc, rd, imm8, 0);
+}
+
+static int
+thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
+			      struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rt = bits (insn1, 8, 7);
+  unsigned int pc;
+  int imm8 = sbits (insn1, 0, 7);
+  CORE_ADDR from = dsc->insn_addr;
+
+  /* LDR Rd, #imm8
+
+     Rwrite as:
+
+     Preparation: tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
+                  if (Rd is not R0) tmp0 <- R0;
+     Insn: LDR R0, [R2, R3];
+     Cleanup: R2 <- tmp2, R3 <- tmp3,
+              if (Rd is not R0) Rd <- R0, R0 <- tmp0 */
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying thumb ldr literal "
+			"insn %.4x\n", insn1);
+
+  dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+  dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
+  dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
+  pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+
+  displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
+
+  dsc->rd = rt;
+  dsc->u.ldst.xfersize = 4;
+  dsc->u.ldst.rn = 0;
+  dsc->u.ldst.immed = 0;
+  dsc->u.ldst.writeback = 0;
+  dsc->u.ldst.restore_r4 = 0;
+
+  dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
+
+  dsc->cleanup = &cleanup_load;
+
+  return 0;
+}
+
+/* Copy Thumb cbnz/cbz insruction.  */
+
+static int
+thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, unsigned short insn1,
+		     struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  int non_zero = bit (insn1, 11);
+  unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
+  CORE_ADDR from = dsc->insn_addr;
+  int rn = bits (insn1, 0, 2);
+  int rn_val = displaced_read_reg (regs, dsc, rn);
+
+  dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
+  /* CBNZ and CBZ do not affect the condition flags.  If condition is true,
+     set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
+     condition is false, let it be, cleanup_branch will do nothing.  */
+  if (dsc->u.branch.cond)
+    dsc->u.branch.cond = INST_AL;
+
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+
+  dsc->u.branch.dest = from + 2 + imm5;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
+			" insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
+			rn, rn_val, insn1, dsc->u.branch.dest);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  dsc->cleanup = &cleanup_branch;
+  return 0;
+}
+
+static void
+cleanup_pop_pc_16bit(struct gdbarch *gdbarch, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  CORE_ADDR from = dsc->insn_addr;
+  int rx = dsc->u.block.regmask ? 8 : 0;
+  int rx_val = displaced_read_reg (regs, dsc, rx);
+
+  displaced_write_reg (regs, dsc, ARM_PC_REGNUM, rx_val, BX_WRITE_PC);
+  displaced_write_reg (regs, dsc, rx, dsc->tmp[0], CANNOT_WRITE_PC);
+}
+
+static int
+thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
+			 struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  CORE_ADDR from = dsc->insn_addr;
+
+  dsc->u.block.regmask = insn1 & 0x00ff;
+
+  /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
+     to :
+
+     (1) register list is not empty,
+     Prepare: tmp[0] <- r8,
+
+     POP {rX};   PC is stored in rX
+     MOV r8, rX; finally, PC is stored in r8
+     POP {rX, rY, ...., rZ}
+
+     Cleanup: PC <-r8, r8 <- tmp[0]
+
+     (2) register list is empty,
+     Prepare: tmp[0] <- r0,
+
+     POP {r0}
+
+     Cleanup: PC <- r0, r0 <- tmp[0]
+  */
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
+			dsc->u.block.regmask, insn1);
+
+  if (dsc->u.block.regmask != 0)
+    {
+      int rx = 0;
+
+       dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
+
+      /* Look for the first register in register list.  */
+      for (rx = 0; rx < 8; rx++)
+	if (dsc->u.block.regmask & (1 << rx))
+	  break;
+
+      dsc->modinsn[0] = (0xbc00 | (1 << rx)); /* POP {rX} */
+      dsc->modinsn[1] = (0x4680 | (rx << 3)); /* MOV r8, rX */
+      dsc->modinsn[2] = (insn1 & 0xfeff);     /* POP {rX, rY, ..., rZ} */
+      /* dsc->modinsn[ (3, 0x46c7); */            /* MOV PC, r8 */
+
+      dsc->numinsns = 3;
+    }
+  else
+    {
+      dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+
+      dsc->modinsn[0] = 0xbc00; /* POP {r0} */
+      /* dsc->modinsn[ (1, 0x4683); */ /* MOV PC, r0 */
+
+      dsc->numinsns = 1;
+    }
+
+  dsc->cleanup = &cleanup_pop_pc_16bit;
+  return 0;
+}
+
+static void
+thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch,
+				    unsigned short insn1, struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  unsigned short op_bit_12_15 = bits (insn1, 12, 15);
+  unsigned short op_bit_10_11 = bits (insn1, 10, 11);
+  int err = 0;
+
+  /* 16-bit thumb instructions.  */
+  switch (op_bit_12_15)
+    {
+      /* Shift (imme), add, subtract, move and compare*/
+    case 0: case 1: case 2: case 3:
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"shift/add/sub/mov/cmp",
+					 dsc);
+      break;
+    case 4:
+      switch (op_bit_10_11)
+	{
+	case 0: /* Data-processing */
+	  err = thumb_copy_unmodified_16bit (gdbarch, insn1,"data-processing",
+					     dsc);
+	  break;
+	case 1: /* Special data instructions and branch and exchange */
+	  {
+	    unsigned short op = bits (insn1, 7, 9);
+	    if (op == 6 || op == 7) /* BX or BLX */
+	      err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
+	    else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers.  */
+	      err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
+						 dsc);
+	  }
+	  break;
+	default: /* LDR (literal) */
+	  err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
+	}
+      break;
+    case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldr/str", dsc);
+      break;
+    case 10:
+      if (op_bit_10_11 < 2) /* Generate PC-relative address */
+	err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
+      else /* Generate SP-relative address */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"sp-relative", dsc);
+      break;
+    case 11: /* Misc 16-bit instructions */
+      {
+	switch (bits (insn1, 8, 11))
+	  {
+	  case 1: case 3:  case 9: case 11: /* CBNZ, CBZ */
+	    err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
+	    break;
+	  case 12: case 13: /* POP */
+	    if (bit (insn1, 8)) /* PC is in register list.  */
+	      {
+		err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
+	      }
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"pop", dsc);
+	    break;
+	  case 15: /* If-Then, and hints */
+	    if (bits (insn1, 0, 3))
+	      err = 1; /* Not supported If-Then */
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"hints", dsc);
+	    break;
+	  default:
+	    err = thumb_copy_unmodified_16bit (gdbarch, insn1,"misc", dsc);
+	  }
+      }
+      break;
+    case 12:
+      if (op_bit_10_11 < 2) /* Store multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"stm", dsc);
+      else /* Load multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldm", dsc);
+      break;
+    case 13: /* Conditional branch and supervisor call */
+      if (bits (insn1, 9, 11) != 7) /* conditional branch */
+	err = thumb_copy_b (gdbarch, insn1, dsc);
+      else
+	err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
+      break;
+    case 14: /* Unconditional branch */
+      err = thumb_copy_b (gdbarch, insn1, dsc);
+      break;
+    default:
+      internal_error (__FILE__, __LINE__,
+		      _("thumb_process_displaced_insn: Instruction decode error"));
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_insn: Instruction decode error"));
+}
+
+static void
+thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
+				    uint16_t insn2, struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+}
+
 static void
 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 			      CORE_ADDR to, struct regcache *regs,
 			      struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode"));
+  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  unsigned short insn1
+    = read_memory_unsigned_integer (from, 2, byte_order_for_code);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
+			"at %.8lx\n", insn1, (unsigned long) from);
+
+  dsc->is_thumb = 1;
+  dsc->insn_size = thumb_insn_size (insn1);
+  if (thumb_insn_size (insn1) == 4)
+    {
+      unsigned short insn2
+	= read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
+      thumb_process_displaced_32bit_insn(gdbarch, insn1, insn2, regs, dsc);
+    }
+  else
+    thumb_process_displaced_16bit_insn(gdbarch, insn1, regs, dsc);
 }
 
 void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 6/8] Rename some functions to arm_*
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
                     ` (4 preceding siblings ...)
  2011-03-24 14:05   ` [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns Yao Qi
@ 2011-03-24 14:06   ` Yao Qi
  2011-04-06 20:52     ` Ulrich Weigand
  2011-03-24 14:11   ` [try 2nd 7/8] Test case Yao Qi
  2011-03-24 15:14   ` [try 2nd 8/8] NEWS Yao Qi
  7 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-03-24 14:06 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 152 bytes --]

This patch should be merged to patch 2/8, but `git rebase' failed to
combine them together as a whole.  I sent this separately.

-- 
Yao (齐尧)

[-- Attachment #2: 0006-rename-some-arm-functions.patch --]
[-- Type: text/x-patch, Size: 11313 bytes --]

2011-03-24  Yao Qi  <yao@codesourcery.com>

	* gdb/arm-tdep.c (copy_copro_load_store): Move some common part to ...
	(install_copy_copro_load_store): ... this.  New.
	(arm_copy_copro_load_store): New.
	(copy_undef): Delete.
	(arm_copy_undef): Renamed from copy_undef.
	(decode_misc_memhint_neon): Update caller.
	(decode_unconditional): Likewise.
	(decode_miscellaneous): Likewise.
	(decode_media): Likewise.
	(decode_b_bl_ldmstm): Likewise.
	(decode_ext_reg_ld_st): Delete.
	(arm_decode_ext_reg_ld_st): Renamed from decode_ext_reg_ld_st.
	(decode_svc_copro): Delete.
	(arm_decode_svc_copro): Renamed from decode_svc_copro.
	(arm_process_displaced_insn): Update caller.

---
 gdb/arm-tdep.c |  106 ++++++++++++++++++++++++++++++-------------------------
 1 files changed, 58 insertions(+), 48 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 6ba7b5b..89a6cc4 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5526,21 +5526,12 @@ cleanup_copro_load_store (struct gdbarch *gdbarch,
     displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
 }
 
-static int
-copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
-		       struct regcache *regs,
-		       struct displaced_step_closure *dsc)
+static void
+install_copy_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
+			       struct displaced_step_closure *dsc)
 {
-  unsigned int rn = bits (insn, 16, 19);
   ULONGEST rn_val;
 
-  if (!insn_references_pc (insn, 0x000f0000ul))
-    return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
-			"load/store insn %.8lx\n", (unsigned long) insn);
-
   /* Coprocessor load/store instructions:
 
      {stc/stc2} [<Rn>, #+/-imm]  (and other immediate addressing modes)
@@ -5550,15 +5541,34 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
      ldc/ldc2 are handled identically.  */
 
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
-  rn_val = displaced_read_reg (regs, dsc, rn);
+  rn_val = displaced_read_reg (regs, dsc, dsc->u.ldst.rn);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
 
+  dsc->cleanup = &cleanup_copro_load_store;
+}
+
+static int
+arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
+			   struct regcache *regs,
+			   struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn, 16, 19);
+
+  if (!insn_references_pc (insn, 0x000f0000ul))
+    return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.8lx\n", (unsigned long) insn);
+
+
+
   dsc->u.ldst.writeback = bit (insn, 25);
   dsc->u.ldst.rn = rn;
 
   dsc->modinsn[0] = insn & 0xfff0ffff;
 
-  dsc->cleanup = &cleanup_copro_load_store;
+  install_copy_copro_load_store (gdbarch, regs, dsc);
 
   return 0;
 }
@@ -6906,8 +6916,8 @@ thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
 /* Copy undefined instructions.  */
 
 static int
-copy_undef (struct gdbarch *gdbarch, uint32_t insn,
-	    struct displaced_step_closure *dsc)
+arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
+		struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog,
@@ -7008,10 +7018,10 @@ decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
       case 0x63: case 0x67: case 0x73: case 0x77:
 	return copy_unpred (gdbarch, insn, dsc);
       default:
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
       }
   else
-    return copy_undef (gdbarch, insn, dsc);  /* Probably unreachable.  */
+    return arm_copy_undef (gdbarch, insn, dsc);  /* Probably unreachable.  */
 }
 
 static int
@@ -7038,13 +7048,13 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 	{
 	case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
 	  /* stc/stc2.  */
-	  return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	  return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
 
 	case 0x2:
 	  return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
 
 	default:
-	  return copy_undef (gdbarch, insn, dsc);
+	  return arm_copy_undef (gdbarch, insn, dsc);
 	}
 
     case 0x9:
@@ -7054,19 +7064,19 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 	  {
 	  case 0x1: case 0x3:
 	    /* ldc/ldc2 imm (undefined for rn == pc).  */
-	    return rn_f ? copy_undef (gdbarch, insn, dsc)
-			: copy_copro_load_store (gdbarch, insn, regs, dsc);
+	    return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
+			: arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
 
 	  case 0x2:
 	    return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
 
 	  case 0x4: case 0x5: case 0x6: case 0x7:
 	    /* ldc/ldc2 lit (undefined for rn != pc).  */
-	    return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
-			: copy_undef (gdbarch, insn, dsc);
+	    return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
+			: arm_copy_undef (gdbarch, insn, dsc);
 
 	  default:
-	    return copy_undef (gdbarch, insn, dsc);
+	    return arm_copy_undef (gdbarch, insn, dsc);
 	  }
       }
 
@@ -7076,9 +7086,9 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
     case 0xb:
       if (bits (insn, 16, 19) == 0xf)
         /* ldc/ldc2 lit.  */
-	return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0xc:
       if (bit (insn, 4))
@@ -7093,7 +7103,7 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
 
     default:
-      return copy_undef (gdbarch, insn, dsc);
+      return arm_copy_undef (gdbarch, insn, dsc);
     }
 }
 
@@ -7119,21 +7129,21 @@ decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
       else if (op == 0x3)
 	return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x2:
       if (op == 0x1)
         /* Not really supported.  */
 	return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x3:
       if (op == 0x1)
 	return arm_copy_bx_blx_reg (gdbarch, insn,
 				regs, dsc);  /* blx register.  */
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x5:
       return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
@@ -7146,7 +7156,7 @@ decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
 	return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
 
     default:
-      return copy_undef (gdbarch, insn, dsc);
+      return arm_copy_undef (gdbarch, insn, dsc);
     }
 }
 
@@ -7259,13 +7269,13 @@ decode_media (struct gdbarch *gdbarch, uint32_t insn,
 	    return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
 	}
       else
-	 return copy_undef (gdbarch, insn, dsc);
+	 return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1a: case 0x1b:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
 	return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1c: case 0x1d:
       if (bits (insn, 5, 6) == 0x0)  /* op2[1:0].  */
@@ -7276,13 +7286,13 @@ decode_media (struct gdbarch *gdbarch, uint32_t insn,
 	    return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
 	}
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1e: case 0x1f:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
 	return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
     }
 
   /* Should be unreachable.  */
@@ -7300,9 +7310,9 @@ decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
 }
 
 static int
-decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
+arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
+			  struct regcache *regs,
+			  struct displaced_step_closure *dsc)
 {
   unsigned int opcode = bits (insn, 20, 24);
 
@@ -7323,7 +7333,7 @@ decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
     case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
       /* Note: no writeback for these instructions.  Bit 25 will always be
 	 zero though (via caller), so the following works OK.  */
-      return copy_copro_load_store (gdbarch, insn, regs, dsc);
+      return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
     }
 
   /* Should be unreachable.  */
@@ -7368,8 +7378,8 @@ thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
 }
 
 static int
-decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
-		  struct regcache *regs, struct displaced_step_closure *dsc)
+arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
+		      struct regcache *regs, struct displaced_step_closure *dsc)
 {
   unsigned int op1 = bits (insn, 20, 25);
   int op = bit (insn, 4);
@@ -7377,17 +7387,17 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
   unsigned int rn = bits (insn, 16, 19);
 
   if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
-    return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
+    return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
 	   && (coproc & 0xe) != 0xa)
     /* stc/stc2.  */
-    return copy_copro_load_store (gdbarch, insn, regs, dsc);
+    return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
 	   && (coproc & 0xe) != 0xa)
     /* ldc/ldc2 imm/lit.  */
-    return copy_copro_load_store (gdbarch, insn, regs, dsc);
+    return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x3e) == 0x00)
-    return copy_undef (gdbarch, insn, dsc);
+    return arm_copy_undef (gdbarch, insn, dsc);
   else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
     return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
   else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
@@ -7410,7 +7420,7 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
   else if ((op1 & 0x30) == 0x30)
     return arm_copy_svc (gdbarch, insn, regs, dsc);
   else
-    return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
+    return arm_copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
 
 static int
@@ -8119,7 +8129,7 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
       break;
 
     case 0xc: case 0xd: case 0xe: case 0xf:
-      err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
+      err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
       break;
     }
 
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 7/8] Test case
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
                     ` (5 preceding siblings ...)
  2011-03-24 14:06   ` [try 2nd 6/8] Rename some functions to arm_* Yao Qi
@ 2011-03-24 14:11   ` Yao Qi
  2011-05-05 13:26     ` Yao Qi
  2011-03-24 15:14   ` [try 2nd 8/8] NEWS Yao Qi
  7 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-03-24 14:11 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 65 bytes --]

Test cases for Thumb displaced stepping.

-- 
Yao (齐尧)

[-- Attachment #2: 0007-test-cases.patch --]
[-- Type: text/x-patch, Size: 12328 bytes --]

2011-03-24  Yao Qi  <yao@codesourcery.com>

	* gdb.arch/arm-disp-step.S: Test Thumb instructions.
	* gdb.arch/arm-disp-step.exp: Likewse.

---
 gdb/testsuite/gdb.arch/arm-disp-step.S   |  164 ++++++++++++++++++++++-
 gdb/testsuite/gdb.arch/arm-disp-step.exp |  215 +++++++++++++++++++++++++++---
 2 files changed, 354 insertions(+), 25 deletions(-)

diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.S b/gdb/testsuite/gdb.arch/arm-disp-step.S
index fb76974..276047d 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.S
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.S
@@ -18,7 +18,7 @@
 	.syntax unified
 	.text
 	.type main,%function
-#if defined (__thumb__) || defined (__thumb2__)
+#if defined (__thumb__)
 	.code   16
 	.thumb_func
 #endif
@@ -43,11 +43,37 @@ test_ret_end:
 	/* Test ldr from pc */
 	bl test_ldr_pc
 
-	/* Test ldm/stm only in ARM mode */
-#if !defined (__thumb__) && !defined (__thumb2__)
+	/* Test ldm/stm in ARM */
+#if !defined (__thumb__)
 	bl test_ldm_stm_pc
 #endif
 
+	/* Test ldrX literal in ARM and Thumb-2 */
+#if !defined (__thumb__) || defined(__thumb2__)
+	bl test_ldr_literal
+#endif
+
+	/* Test ldr literal in Thumb */
+#if defined(__thumb__)
+	bl test_ldr_literal_16
+#endif
+
+	/* Test cbnz/cbz in Thumb-2 */
+#if defined(__thumb2__)
+	bl test_cbz_cbnz
+#endif
+
+	/* Test adr in Thumb and Thumb-2 */
+#if defined(__thumb__)
+	bl test_adr
+#endif
+	/* Test 32-bit adr in ARM and Thumb-2 */
+#if defined(__thumb2__) || !defined(__thumb__)
+	bl test_adr_32bit
+#endif
+
+	bl test_pop_pc
+	
 	/* Test str in ARM mode and Thumb-2 */
 #if !defined(__thumb__)
 	bl test_str_pc
@@ -60,7 +86,7 @@ test_ret_end:
 	.size main, .-main
 	
 	.global test_call_subr
-#if defined (__thumb__) || defined (__thumb2__)
+#if defined (__thumb__)
 	.code   16
 	.thumb_func
 #endif
@@ -77,7 +103,7 @@ test_ret:
 
 	
 	.global test_branch
-#if defined (__thumb__) || defined (__thumb2__)
+#if defined (__thumb__)
 	.code   16
 	.thumb_func
 #endif
@@ -90,7 +116,7 @@ L_branch:
 	.size test_branch, .-test_branch
 
 	.global test_ldr_pc
-#if defined (__thumb__) || defined (__thumb2__)
+#if defined (__thumb__)
 	.code   16
 	.thumb_func
 #endif
@@ -103,7 +129,7 @@ test_ldr_pc_ret:
 	bx lr
 	.size test_ldr_pc, .-test_ldr_pc
 
-#if !defined (__thumb__) && !defined (__thumb2__)
+#if !defined (__thumb__) 
 	.global test_ldm_stm_pc
 	.type test_ldm_stm_pc, %function
 test_ldm_stm_pc:
@@ -122,6 +148,130 @@ test_ldm_stm_pc_ret:
 	.word	test_ldm_stm_pc_ret
 	.size test_ldm_stm_pc, .-test_ldm_stm_pc
 #endif
+	
+#if !defined (__thumb__) || defined(__thumb2__)
+	.global test_ldr_literal
+	.type test_ldr_literal, %function
+test_ldr_literal:
+	ldrh	r0, [pc]
+	.global test_ldrsb_literal
+test_ldrsb_literal:
+	ldrsb	r0, [pc]
+	.global test_ldrsh_literal
+test_ldrsh_literal:
+	ldrsh	r0, [pc]
+	.global test_ldr_literal_end
+test_ldr_literal_end:
+	bx lr
+	.size test_ldr_literal, .-test_ldr_literal
+#endif
+
+#if defined(__thumb__)
+	.global test_ldr_literal_16
+	.code   16
+	.thumb_func
+test_ldr_literal_16:
+	ldr	r0, .L2
+	.global test_ldr_literal_16_end
+test_ldr_literal_16_end:
+	bx lr
+	.align	2
+.L2:
+	.word	test_ldr_literal_16
+	.size test_ldr_literal_16, .-test_ldr_literal_16
+#endif
+
+#if defined(__thumb2__)
+	.global test_cbz_cbnz
+	.code   16
+	.thumb_func
+test_cbz_cbnz:
+	movs 	r0, #0
+	.global test_zero_cbnz
+test_zero_cbnz:
+	cbnz	r0, .L3
+	.global test_zero_cbz
+test_zero_cbz:
+	cbz	r0, .L3
+.L3:
+	movs	r0, #1
+	.global test_non_zero_cbz
+test_non_zero_cbz:
+	cbz	r0, .L4
+	.global test_non_zero_cbnz
+test_non_zero_cbnz:
+	cbnz	r0, .L4
+	nop
+.L4:
+	.global test_cbz_cbnz_end
+test_cbz_cbnz_end:
+	bx lr
+	.size test_cbz_cbnz, .-test_cbz_cbnz
+#endif
+
+#if defined(__thumb__)
+	.global test_adr
+	.code   16
+	.thumb_func
+test_adr:
+	adr	r0, .L8
+	nop
+	nop
+	nop
+.L8:
+	.global test_adr_end
+test_adr_end:
+	bx lr
+	.size test_adr, .-test_adr
+#endif
+
+#if defined(__thumb2__) || !defined(__thumb__)
+	.global test_adr_32bit
+#if defined(__thumb2__)
+	.code   16
+	.thumb_func
+#endif
+test_adr_32bit:
+	adr	r0, .L6
+	nop
+.L6:
+	nop
+	.global test_adr_32bit_after
+test_adr_32bit_after:
+	adr	r0, .L6
+
+	.global test_adr_32bit_end
+test_adr_32bit_end:
+	bx lr
+	.size test_adr_32bit, .-test_adr_32bit
+#endif
+	
+	.global test_pop_pc
+	.type test_pop_pc, %function
+#if defined(__thumb__)
+	.code   16
+	.thumb_func
+#endif
+
+test_pop_pc:
+	ldr	r1, .L5
+#if defined(__thumb__)
+	movs	r0, #1
+	orrs	r1, r0
+#endif
+	push	{r1}
+	push	{r1}
+	.global test_pop_pc_1
+test_pop_pc_1:
+	pop	{r1, pc}
+	.global test_pop_pc_ret
+test_pop_pc_ret:
+	bx lr
+	.align	2
+.L5:
+	.word	test_pop_pc_ret
+	.size test_pop_pc, .-test_pop_pc
+
 
 #if !defined(__thumb__)
 #if defined (__thumb2__)
diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.exp b/gdb/testsuite/gdb.arch/arm-disp-step.exp
index 3bea521..b337335 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.exp
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.exp
@@ -50,7 +50,11 @@ proc test_ldm_stm_pc {} {
 	}
 	-re "Function \"test_ldm_stm_pc\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
 	    gdb_test "n" "" "Test case is compiled in Thumb mode"
-	    return
+	    return 0
+	}
+	-re "No symbol.*" {
+	    pass "break test_ldm_stm_pc"
+	    return 0
 	}
     }
 
@@ -68,10 +72,81 @@ proc test_ldm_stm_pc {} {
     gdb_continue_to_breakpoint "continue to test_ldm_stm_pc_ret" \
 	".*bx lr.*"
 }
+
+#########################################
+# Test ldrX literal
+proc test_ldr_literal {} {
+    global srcfile
+    # Try to set breakpoint on test_ldm_stm_pc.  If symbol 'test_ldm_stm_pc'
+    # can't be resolved, test case is compiled in Thumb mode, skip it.
+    gdb_test_multiple "break *test_ldr_literal" "break test_ldr_literal" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "Function \"test_ldr_literal\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
+	    gdb_test "n" "" "Test case is compiled in Thumb mode"
+	    return 0
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_ldrsb_literal" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldrsb_literal"
+    gdb_test "break *test_ldrsh_literal" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldrsh_literal"
+    gdb_test "break *test_ldr_literal_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_test_ldr_literal_end"
+
+    gdb_continue_to_breakpoint "continue to test_ldr_literal" \
+	".*ldrh.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldrsb_literal" \
+	".*ldrsb.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldrsh_literal" \
+	".*ldrsh.*r0\,.*\[pc\].*"
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_ret" \
+	".*bx lr.*"
+}
+
+proc test_ldr_literal_16 {} {
+    global srcfile
+    gdb_test_multiple "break *test_ldr_literal_16" "break test_ldr_literal_16" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "Function \"test_ldr_literal_16\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
+	    gdb_test "n" "" "skip"
+	    return 0
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+    
+    gdb_test "break *test_ldr_literal_16_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldr_literal_16_end"
+
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_16" \
+	".*ldr.*r0\,.*L2.*"
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_16_end" \
+	".*bx lr.*"
+}
+
 ##########################################
 # Test call/ret.
 proc test_call_ret {} {
     global srcfile
+    global testfile
+
+    gdb_test "break *test_call" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_call"
+
     gdb_test "break *test_call_end" \
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_call_end"
@@ -82,9 +157,10 @@ proc test_call_ret {} {
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_ret_end"
 
-    gdb_continue_to_breakpoint "continue to test_call_end" \
+    gdb_continue_to_breakpoint "test_call" ".*bl test_call_subr.*"
+    gdb_continue_to_breakpoint "test_call_end" \
 	".*@ Location test_call_end.*"
-    gdb_continue_to_breakpoint "continue to test_ret" \
+    gdb_continue_to_breakpoint "test_ret" \
 	".*bx lr.*"
     gdb_continue_to_breakpoint "continue to test_ret_end" \
 	".*@ Location test_ret_end.*"
@@ -122,7 +198,111 @@ proc test_ldr_from_pc {} {
 
     gdb_continue_to_breakpoint "continue to test_ldr_pc" \
 	".*ldr.*r1\,.*\[pc, #0\].*"
-    gdb_continue_to_breakpoint "continue to Lbranch" \
+    gdb_continue_to_breakpoint "continue to test_ldr_pc_ret" \
+	".*bx lr.*"
+}
+
+#########################################
+
+# Test cbz and cbnz
+proc test_cbz_cbnz {} {
+    global srcfile
+
+    gdb_test_multiple "break *test_zero_cbnz" "break test_zero_cbnz" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_ldr_literal"
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_zero_cbz"
+    gdb_test "break *test_non_zero_cbnz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbnz"
+    gdb_test "break *test_non_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbz"
+
+    gdb_continue_to_breakpoint "continue to test_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_zero_cbz" \
+	".*cbz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbz" \
+	".*cbz.*r0\,.*\.L4.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L4.*"
+}
+
+# Test adr
+
+proc test_adr {} {
+    global srcfile
+
+    gdb_test_multiple "break *test_adr" "break test_adr" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_adr"
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_adr_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_end"
+
+    gdb_continue_to_breakpoint "test_adr" \
+	".*adr.*r0\,.*\.L8.*"
+    gdb_continue_to_breakpoint "test_adr_end" \
+	".*bx lr.*"
+}
+
+proc test_adr_32bit {} {
+    global srcfile
+
+    gdb_test_multiple "break *test_adr_32bit" "break test_adr_32bit" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	    pass "break test_adr"
+	}
+	-re "No symbol.*" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_adr_32bit_after" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_32bit_after"
+
+    gdb_test "break *test_adr_32bit_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_32bit_end"
+
+    gdb_continue_to_breakpoint "test_adr_32bit" \
+	".*adr.*r0\,.*\.L6.*"
+    gdb_continue_to_breakpoint "test_adr_32bit_after" \
+	".*adr.*r0\,.*\.L6.*"
+    gdb_continue_to_breakpoint "test_adr_32bit_end" \
+	".*bx lr.*"
+}
+
+#########################################
+# Test pop to PC
+proc test_pop_pc {} {
+    global srcfile
+    gdb_test "break *test_pop_pc_1" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc"
+    gdb_test "break *test_pop_pc_ret" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_ret"
+
+    gdb_continue_to_breakpoint "continue to test_pop_pc" \
+	".*b.*\{r1\, pc\}.*"
+    gdb_continue_to_breakpoint "continue to test_pop_pc_ret" \
 	".*bx lr.*"
 }
 
@@ -179,20 +359,6 @@ if ![runto_main] then {
 gdb_test_no_output "set displaced-stepping on"
 gdb_test "show displaced-stepping" ".* displaced stepping .* is on.*"
 
-gdb_test "break *test_call" \
-	"Breakpoint.*at.* file .*$srcfile, line.*" \
-	"break test_call"
-
-gdb_test_multiple "continue" "continue to test_call" {
-	-re ".*bl test_call_subr.*" {
-	    pass "continue to test_call"
-	}
-	-re "Displaced stepping is only supported in" {
-	    kfail "gdb/NNNN" $testfile
-	    return
-	}
-    }
-
 test_call_ret
 
 test_branch
@@ -201,7 +367,20 @@ test_ldr_from_pc
 
 test_ldm_stm_pc
 
+test_ldr_literal
+
+test_ldr_literal_16
+
+test_cbz_cbnz
+
+test_adr
+
+test_adr_32bit
+
+test_pop_pc
+
 test_str_pc
+
 ##########################################
 
 # Done, run program to exit.
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 8/8] NEWS
  2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
                     ` (6 preceding siblings ...)
  2011-03-24 14:11   ` [try 2nd 7/8] Test case Yao Qi
@ 2011-03-24 15:14   ` Yao Qi
  7 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-03-24 15:14 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 58 bytes --]

Add one news entry in this patch.

-- 
Yao (齐尧)

[-- Attachment #2: 0008-News.patch --]
[-- Type: text/x-patch, Size: 623 bytes --]

2011-03-24  Yao Qi  <yao@codesourcery.com>

	* NEWS: Displaced stepping for Thumb instructions.

---
 gdb/NEWS |    2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/gdb/NEWS b/gdb/NEWS
index c602fbe..340f51c 100644
--- a/gdb/NEWS
+++ b/gdb/NEWS
@@ -143,6 +143,8 @@
   libthread_db library with the "set libthread-db-search-path"
   command.  See the user manual for more details on this command.
 
+* GDB now supports displaced stepping for Thumb instructions.
+
 * New features in the GDB remote stub, GDBserver
 
   ** GDBserver is now supported on PowerPC LynxOS (versions 4.x and 5.x),
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 1/8] Fix cleanup_branch to take Thumb into account
  2011-03-24 13:56   ` [try 2nd 1/8] Fix cleanup_branch to take Thumb into account Yao Qi
@ 2011-04-06 20:46     ` Ulrich Weigand
  2011-04-07  3:45       ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-04-06 20:46 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> 2011-03-24  Yao Qi  <yao@codesourcery.com>
> 
> 	* arm-tdep.c (cleanup_branch): Set a correct return address in
> 	LR for ARM and Thumb.

This looks OK to me.

Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 2/8] Rename copy_* functions to arm_copy_*
  2011-03-24 13:58   ` [try 2nd 2/8] Rename copy_* functions to arm_copy_* Yao Qi
@ 2011-04-06 20:51     ` Ulrich Weigand
  2011-04-07  8:02       ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-04-06 20:51 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> The copy functions for arm and thumb instructions should be different.
> So some copy_* functions are renamed to arm_copy_* functions.  In each
> copy functions, there are some arm-thumb-independent part, such as
> install cleanup helper, store register, etc.  This part is moved to
> install_* functions.

The interface between the copy_ routines and the install_ routines
seems a bit odd in some cases: mostly, values are just passed as
arguments, but in some cases, they are passed via displaced_step_closure
fields.

I'd prefer if this were handled in a regular manner: the copy_
routines parse the insn text, extract all required information
and pass it all as arguments to the install_ routine.  The
install_ routine then stores all information that is needed
by the cleanup_ routine into the displaced_step_closure struct.

Otherwise, the split seems OK to me.

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 6/8] Rename some functions to arm_*
  2011-03-24 14:06   ` [try 2nd 6/8] Rename some functions to arm_* Yao Qi
@ 2011-04-06 20:52     ` Ulrich Weigand
  2011-04-07  4:26       ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-04-06 20:52 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> This patch should be merged to patch 2/8, but `git rebase' failed to
> combine them together as a whole.  I sent this separately.

I agree that this really should be a single patch, that would make it
easier to review ...

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 3/8] Refactor copy_svc_os
  2011-03-24 14:01   ` [try 2nd 3/8] Refactor copy_svc_os Yao Qi
@ 2011-04-06 20:55     ` Ulrich Weigand
  2011-04-07  4:19       ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-04-06 20:55 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> 2011-03-24  Yao Qi  <yao@codesourcery.com>
> 
> 	* gdb/arm-linux-tdep.c (arm_linux_copy_svc): Remove parameters INSN
> 	and TO.
> 	* gdb/arm-tdep.c (cleanup_svc): Handle variable instruction size.
> 	(arm_copy_svc): Remove parameters INSN and TO.
> 	(decode_svc_copro): Update caller.
> 	* gdb/arm-tdep.h (struct displaced_step_closure): Remove parameters
> 	from function pointer `copy_svc_os'.

This looks OK to me.

Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 1/8] Fix cleanup_branch to take Thumb into account
  2011-04-06 20:46     ` Ulrich Weigand
@ 2011-04-07  3:45       ` Yao Qi
  0 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-04-07  3:45 UTC (permalink / raw)
  To: gdb-patches

On 04/07/2011 04:46 AM, Ulrich Weigand wrote:
> Yao Qi wrote:
> 
>> 2011-03-24  Yao Qi  <yao@codesourcery.com>
>>
>> 	* arm-tdep.c (cleanup_branch): Set a correct return address in
>> 	LR for ARM and Thumb.
> 
> This looks OK to me.
> 

Thanks.  Applied to trunk.
http://sourceware.org/ml/gdb-cvs/2011-04/msg00046.html

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 3/8] Refactor copy_svc_os
  2011-04-06 20:55     ` Ulrich Weigand
@ 2011-04-07  4:19       ` Yao Qi
  0 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-04-07  4:19 UTC (permalink / raw)
  To: gdb-patches

On 04/07/2011 04:55 AM, Ulrich Weigand wrote:
> Yao Qi wrote:
> 
>> 2011-03-24  Yao Qi  <yao@codesourcery.com>
>>
>> 	* gdb/arm-linux-tdep.c (arm_linux_copy_svc): Remove parameters INSN
>> 	and TO.
>> 	* gdb/arm-tdep.c (cleanup_svc): Handle variable instruction size.
>> 	(arm_copy_svc): Remove parameters INSN and TO.
>> 	(decode_svc_copro): Update caller.
>> 	* gdb/arm-tdep.h (struct displaced_step_closure): Remove parameters
>> 	from function pointer `copy_svc_os'.
> 
> This looks OK to me.
> 

Thanks for the review.  Applied to trunk.
http://sourceware.org/ml/gdb-cvs/2011-04/msg00047.html

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 6/8] Rename some functions to arm_*
  2011-04-06 20:52     ` Ulrich Weigand
@ 2011-04-07  4:26       ` Yao Qi
  0 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-04-07  4:26 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

On 04/07/2011 04:52 AM, Ulrich Weigand wrote:
> Yao Qi wrote:
> 
>> This patch should be merged to patch 2/8, but `git rebase' failed to
>> combine them together as a whole.  I sent this separately.
> 
> I agree that this really should be a single patch, that would make it
> easier to review ...
> 

OK, I'll merge this patch to patch 2/8.

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 2/8] Rename copy_* functions to arm_copy_*
  2011-04-06 20:51     ` Ulrich Weigand
@ 2011-04-07  8:02       ` Yao Qi
  2011-04-19  9:07         ` Yao Qi
  2011-04-26 17:09         ` Ulrich Weigand
  0 siblings, 2 replies; 66+ messages in thread
From: Yao Qi @ 2011-04-07  8:02 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 1124 bytes --]

On 04/07/2011 04:51 AM, Ulrich Weigand wrote:
> The interface between the copy_ routines and the install_ routines
> seems a bit odd in some cases: mostly, values are just passed as
> arguments, but in some cases, they are passed via displaced_step_closure
> fields.
> 

The parameter list of some of install_* routines is a little bit long,
if we pass values as arguments.

> I'd prefer if this were handled in a regular manner: the copy_
> routines parse the insn text, extract all required information
> and pass it all as arguments to the install_ routine.  The
> install_ routine then stores all information that is needed
> by the cleanup_ routine into the displaced_step_closure struct.

From the consistency's point of view, I am fine with your approach.  In
this new patch, the following things are changed,
1.  Merge 6/8 patch,
2.  Pass all values as parameters to install_* routines,
3.  Fix install_* routines' arguments order,
4.  Change all install_* routines to void method.

This patches makes patch 4/8 and 5/8 obsolete.  I'll re-send an updated
version once this patch is approved.

-- 
Yao (齐尧)

[-- Attachment #2: 0003-refactor-rename-functions.patch --]
[-- Type: text/x-patch, Size: 45384 bytes --]

	* gdb/arm-tdep.c (copy_unmodified): Rename to ...
	(arm_copy_unmodified): .. this.  New.
	(copy_preload): Move common part to ...
	(install_preload): .. this.  New.
	(arm_copy_preload): New.
	(copy_preload_reg): Move common part to ...
	(install_preload_reg): ... this.  New.
	(arm_copy_preload_reg): New.
	(copy_b_bl_blx): Move common part to ...
	(install_b_bl_blx): .. this.  New.
	(arm_copy_b_bl_blx): New.
	(copy_bx_blx_reg): Move common part to ...
	(install_bx_blx_reg): ... this. New.
	(arm_copy_bx_blx_reg): New.
	(copy_alu_reg): Move common part to ...
	(install_alu_reg): ... this.  New.
	(arm_copy_alu_reg): New.
	(copy_alu_shifted_reg): Move common part to ...
	(install_alu_shifted_reg): ... this.  New.
	(copy_ldr_str_ldrb_strb): Move common part to ...
	(install_ldr_str_ldrb_strb): ... this.  New.
	(arm_copy_ldr_str_ldrb_strb): New.
	* (copy_copro_load_store): Move some common part to ...
	(install_copy_copro_load_store): ... this.  New.
	(arm_copy_copro_load_store): New.
	(copy_svc): Delete.
	(arm_copy_svc): Renamed from copy_svc.
	(copy_undef): Delete.
	(arm_copy_undef): Renamed from copy_undef.
	(decode_ext_reg_ld_st): Delete.
	(arm_decode_ext_reg_ld_st): Renamed from decode_ext_reg_ld_st.
	(decode_svc_copro): Delete.
	(arm_decode_svc_copro): Renamed from decode_svc_copro.
	(copy_copro_load_store, copy_alu_imm): update callers.
	(copy_extra_ld_st, copy_block_xfer): Likewise.
	(decode_misc_memhint_neon, decode_unconditional): Likewise.
	(decode_miscellaneous, decode_dp_misc): Likewise.
	(decode_ld_st_word_ubyte, decode_media): Likewise.
	(decode_b_bl_ldmstm, decode_ext_reg_ld_st): Likewise.
	(decode_svc_copro, decode_misc_memhint_neon): Likewise.
	(decode_unconditional, decode_miscellaneous): Likewise.
	(decode_media, decode_b_bl_ldmstm): Likewise.
	(arm_process_displaced_insn): Likewise..

---
 gdb/arm-tdep.c |  606 +++++++++++++++++++++++++++++++-------------------------
 1 files changed, 340 insertions(+), 266 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index e284b39..ff1f10f 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5327,8 +5327,8 @@ insn_references_pc (uint32_t insn, uint32_t bitmask)
    matter what address they are executed at: in those cases, use this.  */
 
 static int
-copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
-		 const char *iname, struct displaced_step_closure *dsc)
+arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
+		     const char *iname, struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
@@ -5351,20 +5351,11 @@ cleanup_preload (struct gdbarch *gdbarch,
     displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
 }
 
-static int
-copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-	      struct displaced_step_closure *dsc)
+static void
+install_preload (struct gdbarch *gdbarch, struct regcache *regs,
+		 struct displaced_step_closure *dsc, unsigned int rn)
 {
-  unsigned int rn = bits (insn, 16, 19);
   ULONGEST rn_val;
-
-  if (!insn_references_pc (insn, 0x000f0000ul))
-    return copy_unmodified (gdbarch, insn, "preload", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
-			(unsigned long) insn);
-
   /* Preload instructions:
 
      {pli/pld} [rn, #+/-imm]
@@ -5374,34 +5365,40 @@ copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
   rn_val = displaced_read_reg (regs, dsc, rn);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
-
   dsc->u.preload.immed = 1;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
-
   dsc->cleanup = &cleanup_preload;
-
-  return 0;
 }
 
-/* Preload instructions with register offset.  */
-
 static int
-copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
-		  struct regcache *regs,
+arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 		  struct displaced_step_closure *dsc)
 {
   unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  ULONGEST rn_val, rm_val;
 
-  if (!insn_references_pc (insn, 0x000f000ful))
-    return copy_unmodified (gdbarch, insn, "preload reg", dsc);
+  if (!insn_references_pc (insn, 0x000f0000ul))
+    return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
 			(unsigned long) insn);
 
+  dsc->modinsn[0] = insn & 0xfff0ffff;
+
+  install_preload (gdbarch, regs, dsc, rn);
+
+  return 0;
+}
+
+/* Preload instructions with register offset.  */
+
+static void
+install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
+		    struct displaced_step_closure *dsc, unsigned int rn,
+		    unsigned int rm)
+{
+  ULONGEST rn_val, rm_val;
+
   /* Preload register-offset instructions:
 
      {pli/pld} [rn, rm {, shift}]
@@ -5414,13 +5411,30 @@ copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
   rm_val = displaced_read_reg (regs, dsc, rm);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
-
   dsc->u.preload.immed = 0;
 
-  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
-
   dsc->cleanup = &cleanup_preload;
+}
+
+static int
+arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
+		      struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn, 16, 19);
+  unsigned int rm = bits (insn, 0, 3);
+
+
+  if (!insn_references_pc (insn, 0x000f000ful))
+    return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
+			(unsigned long) insn);
+
+  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
 
+  install_preload_reg (gdbarch, regs, dsc, rn, rm);
   return 0;
 }
 
@@ -5439,21 +5453,13 @@ cleanup_copro_load_store (struct gdbarch *gdbarch,
     displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
 }
 
-static int
-copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
-		       struct regcache *regs,
-		       struct displaced_step_closure *dsc)
+static void
+install_copy_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
+			       struct displaced_step_closure *dsc,
+			       int writeback, unsigned int rn)
 {
-  unsigned int rn = bits (insn, 16, 19);
   ULONGEST rn_val;
 
-  if (!insn_references_pc (insn, 0x000f0000ul))
-    return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
-			"load/store insn %.8lx\n", (unsigned long) insn);
-
   /* Coprocessor load/store instructions:
 
      {stc/stc2} [<Rn>, #+/-imm]  (and other immediate addressing modes)
@@ -5462,16 +5468,33 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
 
      ldc/ldc2 are handled identically.  */
 
+  dsc->u.ldst.writeback = writeback;
+  dsc->u.ldst.rn = rn;
+
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
-  rn_val = displaced_read_reg (regs, dsc, rn);
+  rn_val = displaced_read_reg (regs, dsc, dsc->u.ldst.rn);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
 
-  dsc->u.ldst.writeback = bit (insn, 25);
-  dsc->u.ldst.rn = rn;
+  dsc->cleanup = &cleanup_copro_load_store;
+}
+
+static int
+arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
+			   struct regcache *regs,
+			   struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn, 16, 19);
+
+  if (!insn_references_pc (insn, 0x000f0000ul))
+    return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.8lx\n", (unsigned long) insn);
 
   dsc->modinsn[0] = insn & 0xfff0ffff;
 
-  dsc->cleanup = &cleanup_copro_load_store;
+  install_copy_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
 
   return 0;
 }
@@ -5510,29 +5533,43 @@ cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
 
 /* Copy B/BL/BLX instructions with immediate destinations.  */
 
+static void
+install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
+		  struct displaced_step_closure *dsc,
+		  unsigned int cond, int exchange, int link, long offset)
+{
+  /* Implement "BL<cond> <label>" as:
+
+     Preparation: cond <- instruction condition
+     Insn: mov r0, r0  (nop)
+     Cleanup: if (condition true) { r14 <- pc; pc <- label }.
+
+     B<cond> similar, but don't set r14 in cleanup.  */
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = link;
+  dsc->u.branch.exchange = exchange;
+
+  if (dsc->is_thumb)
+    dsc->u.branch.dest = dsc->insn_addr + 4 + offset;
+  else
+    dsc->u.branch.dest = dsc->insn_addr + 8 + offset;
+
+  dsc->cleanup = &cleanup_branch;
+}
 static int
-copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
-	       struct regcache *regs, struct displaced_step_closure *dsc)
+arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
+		   struct regcache *regs, struct displaced_step_closure *dsc)
 {
   unsigned int cond = bits (insn, 28, 31);
   int exchange = (cond == 0xf);
   int link = exchange || bit (insn, 24);
-  CORE_ADDR from = dsc->insn_addr;
   long offset;
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
 			"%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
 			(unsigned long) insn);
-
-  /* Implement "BL<cond> <label>" as:
-
-     Preparation: cond <- instruction condition
-     Insn: mov r0, r0  (nop)
-     Cleanup: if (condition true) { r14 <- pc; pc <- label }.
-
-     B<cond> similar, but don't set r14 in cleanup.  */
-
   if (exchange)
     /* For BLX, set bit 0 of the destination.  The cleanup_branch function will
        then arrange the switch into Thumb mode.  */
@@ -5543,35 +5580,19 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   if (bit (offset, 25))
     offset = offset | ~0x3ffffff;
 
-  dsc->u.branch.cond = cond;
-  dsc->u.branch.link = link;
-  dsc->u.branch.exchange = exchange;
-  dsc->u.branch.dest = from + 8 + offset;
-
   dsc->modinsn[0] = ARM_NOP;
 
-  dsc->cleanup = &cleanup_branch;
-
+  install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
   return 0;
 }
 
 /* Copy BX/BLX with register-specified destinations.  */
 
-static int
-copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
-		 struct regcache *regs, struct displaced_step_closure *dsc)
+static void
+install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
+		    struct displaced_step_closure *dsc, int link,
+		    unsigned int cond, unsigned int rm)
 {
-  unsigned int cond = bits (insn, 28, 31);
-  /* BX:  x12xxx1x
-     BLX: x12xxx3x.  */
-  int link = bit (insn, 5);
-  unsigned int rm = bits (insn, 0, 3);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
-			"%.8lx\n", (link) ? "blx" : "bx",
-			(unsigned long) insn);
-
   /* Implement {BX,BLX}<cond> <reg>" as:
 
      Preparation: cond <- instruction condition
@@ -5580,16 +5601,31 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
 
      Don't set r14 in cleanup for BX.  */
 
-  dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
-
-  dsc->u.branch.cond = cond;
   dsc->u.branch.link = link;
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
   dsc->u.branch.exchange = 1;
 
-  dsc->modinsn[0] = ARM_NOP;
-
   dsc->cleanup = &cleanup_branch;
+}
 
+static int
+arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int cond = bits (insn, 28, 31);
+  /* BX:  x12xxx1x
+     BLX: x12xxx3x.  */
+  int link = bit (insn, 5);
+  unsigned int rm = bits (insn, 0, 3);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
+			(unsigned long) insn);
+
+  dsc->modinsn[0] = ARM_NOP;
+
+  install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
   return 0;
 }
 
@@ -5616,7 +5652,7 @@ copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   ULONGEST rd_val, rn_val;
 
   if (!insn_references_pc (insn, 0x000ff000ul))
-    return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
@@ -5670,24 +5706,13 @@ cleanup_alu_reg (struct gdbarch *gdbarch,
   displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
 }
 
-static int
-copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-	      struct displaced_step_closure *dsc)
+static void
+install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
+		 struct displaced_step_closure *dsc,
+		 unsigned int rd, unsigned int rn, unsigned int rm)
 {
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  unsigned int rd = bits (insn, 12, 15);
-  unsigned int op = bits (insn, 21, 24);
-  int is_mov = (op == 0xd);
   ULONGEST rd_val, rn_val, rm_val;
 
-  if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
-			is_mov ? "move" : "ALU", (unsigned long) insn);
-
   /* Instruction is of form:
 
      <op><cond> rd, [rn,] rm [, <shift>]
@@ -5699,25 +5724,43 @@ copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
      Insn: <op><cond> r0, r1, r2 [, <shift>]
      Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
   */
+  dsc->rd = rd;
 
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
   dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
   dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
-  rd_val = displaced_read_reg (regs, dsc, rd);
+  rd_val = displaced_read_reg (regs, dsc, dsc->rd);
   rn_val = displaced_read_reg (regs, dsc, rn);
   rm_val = displaced_read_reg (regs, dsc, rm);
+
   displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
-  dsc->rd = rd;
+
+  dsc->cleanup = &cleanup_alu_reg;
+}
+
+static int
+arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+		  struct displaced_step_closure *dsc)
+{
+  unsigned int op = bits (insn, 21, 24);
+  int is_mov = (op == 0xd);
+
+  if (!insn_references_pc (insn, 0x000ff00ful))
+    return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
+			is_mov ? "move" : "ALU", (unsigned long) insn);
 
   if (is_mov)
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
+    dsc->modinsn[0] = ((insn & 0xfff00ff0) | 0x2);
   else
-    dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
-
-  dsc->cleanup = &cleanup_alu_reg;
+    dsc->modinsn[0] = ((insn & 0xfff00ff0) | 0x10002);
 
+  install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
+		   bits (insn, 0, 3));
   return 0;
 }
 
@@ -5737,27 +5780,15 @@ cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
   displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
 }
 
-static int
-copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
+static void
+install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
+			 struct displaced_step_closure *dsc,
+			 unsigned int rd, unsigned int rn, unsigned int rm,
+			 unsigned rs)
 {
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  unsigned int rd = bits (insn, 12, 15);
-  unsigned int rs = bits (insn, 8, 11);
-  unsigned int op = bits (insn, 21, 24);
-  int is_mov = (op == 0xd), i;
+  int i;
   ULONGEST rd_val, rn_val, rm_val, rs_val;
 
-  if (!insn_references_pc (insn, 0x000fff0ful))
-    return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
-			"%.8lx\n", is_mov ? "move" : "ALU",
-			(unsigned long) insn);
-
   /* Instruction is of form:
 
      <op><cond> rd, [rn,] rm, <shift> rs
@@ -5775,7 +5806,8 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   for (i = 0; i < 4; i++)
     dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
 
-  rd_val = displaced_read_reg (regs, dsc, rd);
+  rd_val = displaced_read_reg (regs, dsc, dsc->rd);
+
   rn_val = displaced_read_reg (regs, dsc, rn);
   rm_val = displaced_read_reg (regs, dsc, rm);
   rs_val = displaced_read_reg (regs, dsc, rs);
@@ -5783,14 +5815,38 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
-  dsc->rd = rd;
+
+  dsc->cleanup = &cleanup_alu_shifted_reg;
+}
+
+static int
+copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
+		      struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  unsigned int op = bits (insn, 21, 24);
+  int is_mov = (op == 0xd);
+  unsigned int rd, rn, rm, rs;
+
+  if (!insn_references_pc (insn, 0x000fff0ful))
+    return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
+			"%.8lx\n", is_mov ? "move" : "ALU",
+			(unsigned long) insn);
+
+  rn = bits (insn, 16, 19);
+  rm = bits (insn, 0, 3);
+  rs = bits (insn, 8, 11);
+  rd = bits (insn, 12, 15);
 
   if (is_mov)
     dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
   else
     dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
 
-  dsc->cleanup = &cleanup_alu_shifted_reg;
+  install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
 
   return 0;
 }
@@ -5865,7 +5921,7 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
   ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
 
   if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
@@ -5923,50 +5979,37 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
 
 /* Copy byte/word loads and stores.  */
 
-static int
-copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
-			struct regcache *regs,
-			struct displaced_step_closure *dsc, int load, int byte,
-			int usermode)
+static void
+install_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
+			   struct displaced_step_closure *dsc, int load,
+			   int immed, int writeback, int byte, int usermode,
+			   int rt, int rm, int rn)
 {
-  int immed = !bit (insn, 25);
-  unsigned int rt = bits (insn, 12, 15);
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);  /* Only valid if !immed.  */
   ULONGEST rt_val, rn_val, rm_val = 0;
 
-  if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "load/store", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
-			load ? (byte ? "ldrb" : "ldr")
-			     : (byte ? "strb" : "str"), usermode ? "t" : "",
-			(unsigned long) insn);
+  dsc->rd = rt;
+  dsc->u.ldst.rn = rn;
+  dsc->u.ldst.immed = immed;
+  dsc->u.ldst.writeback = writeback;
+  dsc->u.ldst.xfersize = byte ? 1 : 4;
 
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
   dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
-  if (!immed)
+  if (!dsc->u.ldst.immed)
     dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
   if (!load)
     dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
 
-  rt_val = displaced_read_reg (regs, dsc, rt);
-  rn_val = displaced_read_reg (regs, dsc, rn);
-  if (!immed)
+  rt_val = displaced_read_reg (regs, dsc, dsc->rd);
+  rn_val = displaced_read_reg (regs, dsc, dsc->u.ldst.rn);
+  if (!dsc->u.ldst.immed)
     rm_val = displaced_read_reg (regs, dsc, rm);
 
   displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
-  if (!immed)
+  if (!dsc->u.ldst.immed)
     displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
 
-  dsc->rd = rt;
-  dsc->u.ldst.xfersize = byte ? 1 : 4;
-  dsc->u.ldst.rn = rn;
-  dsc->u.ldst.immed = immed;
-  dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
-
   /* To write PC we can do:
 
      Before this sequence of instructions:
@@ -5988,6 +6031,35 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
      of this can be found in Section "Saving from r15" in
      http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
 
+  dsc->cleanup = load ? &cleanup_load : &cleanup_store;
+}
+
+static int
+arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
+			    struct regcache *regs,
+			    struct displaced_step_closure *dsc,
+			    int load, int byte, int usermode)
+{
+  int immed = !bit (insn, 25);
+  int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
+  unsigned int rt = bits (insn, 12, 15);
+  unsigned int rn = bits (insn, 16, 19);
+  unsigned int rm = bits (insn, 0, 3);  /* Only valid if !immed.  */
+
+  if (!insn_references_pc (insn, 0x000ff00ful))
+    return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.8lx\n",
+			load ? (byte ? "ldrb" : "ldr")
+			     : (byte ? "strb" : "str"), usermode ? "t" : "",
+			rt, rn,
+			(unsigned long) insn);
+
+  install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, immed, writeback, byte,
+			     usermode, rt, rm, rn);
+
   if (load || rt != ARM_PC_REGNUM)
     {
       dsc->u.ldst.restore_r4 = 0;
@@ -6252,13 +6324,13 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   /* Block transfers which don't mention PC can be run directly
      out-of-line.  */
   if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
-    return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
 
   if (rn == ARM_PC_REGNUM)
     {
       warning (_("displaced: Unpredictable LDM or STM with "
 		 "base register r15"));
-      return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
     }
 
   if (debug_displaced)
@@ -6279,7 +6351,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 
   if (load)
     {
-      if ((insn & 0xffff) == 0xffff)
+      if (dsc->u.block.regmask == 0xffff)
 	{
 	  /* LDM with a fully-populated register list.  This case is
 	     particularly tricky.  Implement for now by fully emulating the
@@ -6296,7 +6368,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	     rewriting the list of registers to be transferred into a
 	     contiguous chunk r0...rX before doing the transfer, then shuffling
 	     registers into the correct places in the cleanup routine.  */
-	  unsigned int regmask = insn & 0xffff;
+	  unsigned int regmask = dsc->u.block.regmask;
 	  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
 	  unsigned int to = 0, from = 0, i, new_rn;
 
@@ -6328,7 +6400,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 	    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
 				"{..., pc}: original reg list %.4x, modified "
 				"list %.4x\n"), rn, writeback ? "!" : "",
-				(int) insn & 0xffff, new_regmask);
+				(int) dsc->u.block.regmask, new_regmask);
 
 	  dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
 
@@ -6368,7 +6440,8 @@ cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
-copy_svc (struct gdbarch *gdbarch, uint32_t insn,
+
+arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
 	      struct regcache *regs, struct displaced_step_closure *dsc)
 {
 
@@ -6400,8 +6473,8 @@ copy_svc (struct gdbarch *gdbarch, uint32_t insn,
 /* Copy undefined instructions.  */
 
 static int
-copy_undef (struct gdbarch *gdbarch, uint32_t insn,
-	    struct displaced_step_closure *dsc)
+arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
+		struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog,
@@ -6440,33 +6513,34 @@ decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
   unsigned int rn = bits (insn, 16, 19);
 
   if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
-    return copy_unmodified (gdbarch, insn, "cps", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
   else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
-    return copy_unmodified (gdbarch, insn, "setend", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
   else if ((op1 & 0x60) == 0x20)
-    return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
   else if ((op1 & 0x71) == 0x40)
-    return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
+				dsc);
   else if ((op1 & 0x77) == 0x41)
-    return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
   else if ((op1 & 0x77) == 0x45)
-    return copy_preload (gdbarch, insn, regs, dsc);  /* pli.  */
+    return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pli.  */
   else if ((op1 & 0x77) == 0x51)
     {
       if (rn != 0xf)
-	return copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
+	return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
       else
 	return copy_unpred (gdbarch, insn, dsc);
     }
   else if ((op1 & 0x77) == 0x55)
-    return copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
+    return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
   else if (op1 == 0x57)
     switch (op2)
       {
-      case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
-      case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
-      case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
-      case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
+      case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
+      case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
+      case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
+      case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
       default: return copy_unpred (gdbarch, insn, dsc);
       }
   else if ((op1 & 0x63) == 0x43)
@@ -6475,19 +6549,19 @@ decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
     switch (op1 & ~0x80)
       {
       case 0x61:
-	return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
       case 0x65:
-	return copy_preload_reg (gdbarch, insn, regs, dsc);  /* pli reg.  */
+	return arm_copy_preload_reg (gdbarch, insn, regs, dsc);  /* pli reg.  */
       case 0x71: case 0x75:
         /* pld/pldw reg.  */
-	return copy_preload_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
       case 0x63: case 0x67: case 0x73: case 0x77:
 	return copy_unpred (gdbarch, insn, dsc);
       default:
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
       }
   else
-    return copy_undef (gdbarch, insn, dsc);  /* Probably unreachable.  */
+    return arm_copy_undef (gdbarch, insn, dsc);  /* Probably unreachable.  */
 }
 
 static int
@@ -6501,26 +6575,26 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
   else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
     {
     case 0x0: case 0x2:
-      return copy_unmodified (gdbarch, insn, "srs", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
 
     case 0x1: case 0x3:
-      return copy_unmodified (gdbarch, insn, "rfe", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
 
     case 0x4: case 0x5: case 0x6: case 0x7:
-      return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+      return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
 
     case 0x8:
       switch ((insn & 0xe00000) >> 21)
 	{
 	case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
 	  /* stc/stc2.  */
-	  return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	  return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
 
 	case 0x2:
-	  return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
+	  return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
 
 	default:
-	  return copy_undef (gdbarch, insn, dsc);
+	  return arm_copy_undef (gdbarch, insn, dsc);
 	}
 
     case 0x9:
@@ -6530,46 +6604,46 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 	  {
 	  case 0x1: case 0x3:
 	    /* ldc/ldc2 imm (undefined for rn == pc).  */
-	    return rn_f ? copy_undef (gdbarch, insn, dsc)
-			: copy_copro_load_store (gdbarch, insn, regs, dsc);
+	    return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
+			: arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
 
 	  case 0x2:
-	    return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
 
 	  case 0x4: case 0x5: case 0x6: case 0x7:
 	    /* ldc/ldc2 lit (undefined for rn != pc).  */
-	    return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
-			: copy_undef (gdbarch, insn, dsc);
+	    return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
+			: arm_copy_undef (gdbarch, insn, dsc);
 
 	  default:
-	    return copy_undef (gdbarch, insn, dsc);
+	    return arm_copy_undef (gdbarch, insn, dsc);
 	  }
       }
 
     case 0xa:
-      return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
 
     case 0xb:
       if (bits (insn, 16, 19) == 0xf)
         /* ldc/ldc2 lit.  */
-	return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0xc:
       if (bit (insn, 4))
-	return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
 
     case 0xd:
       if (bit (insn, 4))
-	return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
 
     default:
-      return copy_undef (gdbarch, insn, dsc);
+      return arm_copy_undef (gdbarch, insn, dsc);
     }
 }
 
@@ -6587,42 +6661,42 @@ decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
   switch (op2)
     {
     case 0x0:
-      return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
 
     case 0x1:
       if (op == 0x1)  /* bx.  */
-	return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
       else if (op == 0x3)
-	return copy_unmodified (gdbarch, insn, "clz", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x2:
       if (op == 0x1)
         /* Not really supported.  */
-	return copy_unmodified (gdbarch, insn, "bxj", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x3:
       if (op == 0x1)
-	return copy_bx_blx_reg (gdbarch, insn,
+	return arm_copy_bx_blx_reg (gdbarch, insn,
 				regs, dsc);  /* blx register.  */
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x5:
-      return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
 
     case 0x7:
       if (op == 0x1)
-	return copy_unmodified (gdbarch, insn, "bkpt", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
       else if (op == 0x3)
         /* Not really supported.  */
-	return copy_unmodified (gdbarch, insn, "smc", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
 
     default:
-      return copy_undef (gdbarch, insn, dsc);
+      return arm_copy_undef (gdbarch, insn, dsc);
     }
 }
 
@@ -6634,13 +6708,13 @@ decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
     switch (bits (insn, 20, 24))
       {
       case 0x10:
-	return copy_unmodified (gdbarch, insn, "movw", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
 
       case 0x14:
-	return copy_unmodified (gdbarch, insn, "movt", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
 
       case 0x12: case 0x16:
-	return copy_unmodified (gdbarch, insn, "msr imm", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
 
       default:
 	return copy_alu_imm (gdbarch, insn, regs, dsc);
@@ -6650,17 +6724,17 @@ decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
       uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
 
       if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
-	return copy_alu_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
 	return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
 	return decode_miscellaneous (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
-	return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
       else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
-	return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
       else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
-	return copy_unmodified (gdbarch, insn, "synch", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
       else if (op2 == 0xb || (op2 & 0xd) == 0xd)
 	/* 2nd arg means "unpriveleged".  */
 	return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
@@ -6682,28 +6756,28 @@ decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
 
   if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
       || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
   else if ((!a && (op1 & 0x17) == 0x02)
 	    || (a && (op1 & 0x17) == 0x02 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
   else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
 	    || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
   else if ((!a && (op1 & 0x17) == 0x03)
 	   || (a && (op1 & 0x17) == 0x03 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
   else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
 	    || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
   else if ((!a && (op1 & 0x17) == 0x06)
 	   || (a && (op1 & 0x17) == 0x06 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
   else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
 	   || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
   else if ((!a && (op1 & 0x17) == 0x07)
 	   || (a && (op1 & 0x17) == 0x07 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
 
   /* Should be unreachable.  */
   return 1;
@@ -6716,49 +6790,49 @@ decode_media (struct gdbarch *gdbarch, uint32_t insn,
   switch (bits (insn, 20, 24))
     {
     case 0x00: case 0x01: case 0x02: case 0x03:
-      return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
 
     case 0x04: case 0x05: case 0x06: case 0x07:
-      return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
 
     case 0x08: case 0x09: case 0x0a: case 0x0b:
     case 0x0c: case 0x0d: case 0x0e: case 0x0f:
-      return copy_unmodified (gdbarch, insn,
+      return arm_copy_unmodified (gdbarch, insn,
 			      "decode/pack/unpack/saturate/reverse", dsc);
 
     case 0x18:
       if (bits (insn, 5, 7) == 0)  /* op2.  */
 	 {
 	  if (bits (insn, 12, 15) == 0xf)
-	    return copy_unmodified (gdbarch, insn, "usad8", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
 	  else
-	    return copy_unmodified (gdbarch, insn, "usada8", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
 	}
       else
-	 return copy_undef (gdbarch, insn, dsc);
+	 return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1a: case 0x1b:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
-	return copy_unmodified (gdbarch, insn, "sbfx", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1c: case 0x1d:
       if (bits (insn, 5, 6) == 0x0)  /* op2[1:0].  */
 	 {
 	  if (bits (insn, 0, 3) == 0xf)
-	    return copy_unmodified (gdbarch, insn, "bfc", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
 	  else
-	    return copy_unmodified (gdbarch, insn, "bfi", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
 	}
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1e: case 0x1f:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
-	return copy_unmodified (gdbarch, insn, "ubfx", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
     }
 
   /* Should be unreachable.  */
@@ -6770,36 +6844,36 @@ decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
 		    struct regcache *regs, struct displaced_step_closure *dsc)
 {
   if (bit (insn, 25))
-    return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+    return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
   else
     return copy_block_xfer (gdbarch, insn, regs, dsc);
 }
 
 static int
-decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
+arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
+			  struct regcache *regs,
+			  struct displaced_step_closure *dsc)
 {
   unsigned int opcode = bits (insn, 20, 24);
 
   switch (opcode)
     {
     case 0x04: case 0x05:  /* VFP/Neon mrrc/mcrr.  */
-      return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
 
     case 0x08: case 0x0a: case 0x0c: case 0x0e:
     case 0x12: case 0x16:
-      return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
 
     case 0x09: case 0x0b: case 0x0d: case 0x0f:
     case 0x13: case 0x17:
-      return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
 
     case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
     case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
       /* Note: no writeback for these instructions.  Bit 25 will always be
 	 zero though (via caller), so the following works OK.  */
-      return copy_copro_load_store (gdbarch, insn, regs, dsc);
+      return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
     }
 
   /* Should be unreachable.  */
@@ -6807,8 +6881,8 @@ decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
 }
 
 static int
-decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
-		  struct regcache *regs, struct displaced_step_closure *dsc)
+arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
+		      struct regcache *regs, struct displaced_step_closure *dsc)
 {
   unsigned int op1 = bits (insn, 20, 25);
   int op = bit (insn, 4);
@@ -6816,40 +6890,40 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
   unsigned int rn = bits (insn, 16, 19);
 
   if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
-    return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
+    return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
 	   && (coproc & 0xe) != 0xa)
     /* stc/stc2.  */
-    return copy_copro_load_store (gdbarch, insn, regs, dsc);
+    return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
 	   && (coproc & 0xe) != 0xa)
     /* ldc/ldc2 imm/lit.  */
-    return copy_copro_load_store (gdbarch, insn, regs, dsc);
+    return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x3e) == 0x00)
-    return copy_undef (gdbarch, insn, dsc);
+    return arm_copy_undef (gdbarch, insn, dsc);
   else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
-    return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
   else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
   else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
   else if ((op1 & 0x30) == 0x20 && !op)
     {
       if ((coproc & 0xe) == 0xa)
-	return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
     }
   else if ((op1 & 0x30) == 0x20 && op)
-    return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
   else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
   else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
   else if ((op1 & 0x30) == 0x30)
-    return copy_svc (gdbarch, insn, regs, dsc);
+    return arm_copy_svc (gdbarch, insn, regs, dsc);
   else
-    return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
+    return arm_copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
 
 static void
@@ -6909,7 +6983,7 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
       break;
 
     case 0xc: case 0xd: case 0xe: case 0xf:
-      err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
+      err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
       break;
     }
 
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 2/8] Rename copy_* functions to arm_copy_*
  2011-04-07  8:02       ` Yao Qi
@ 2011-04-19  9:07         ` Yao Qi
  2011-04-26 17:09         ` Ulrich Weigand
  1 sibling, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-04-19  9:07 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

On 04/07/2011 04:02 PM, Yao Qi wrote:
> From the consistency's point of view, I am fine with your approach.  In
> this new patch, the following things are changed,
> 1.  Merge 6/8 patch,
> 2.  Pass all values as parameters to install_* routines,
> 3.  Fix install_* routines' arguments order,
> 4.  Change all install_* routines to void method.
> 
> This patches makes patch 4/8 and 5/8 obsolete.  I'll re-send an updated
> version once this patch is approved.

Ping.
http://sourceware.org/ml/gdb-patches/2011-04/msg00107.html

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 2/8] Rename copy_* functions to arm_copy_*
  2011-04-07  8:02       ` Yao Qi
  2011-04-19  9:07         ` Yao Qi
@ 2011-04-26 17:09         ` Ulrich Weigand
  2011-04-27 10:27           ` Yao Qi
  1 sibling, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-04-26 17:09 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:
> On 04/07/2011 04:51 AM, Ulrich Weigand wrote:
> > I'd prefer if this were handled in a regular manner: the copy_
> > routines parse the insn text, extract all required information
> > and pass it all as arguments to the install_ routine.  The
> > install_ routine then stores all information that is needed
> > by the cleanup_ routine into the displaced_step_closure struct.
> 
> From the consistency's point of view, I am fine with your approach.  In
> this new patch, the following things are changed,
> 1.  Merge 6/8 patch,
> 2.  Pass all values as parameters to install_* routines,
> 3.  Fix install_* routines' arguments order,
> 4.  Change all install_* routines to void method.
> 
> This patches makes patch 4/8 and 5/8 obsolete.  I'll re-send an updated
> version once this patch is approved.

Sorry for the delay; I've been out of the office for a couple of weeks.

Thanks for making those changes, this is looking very good now.  There's
just a couple of minor issues:


> +  dsc->u.ldst.writeback = writeback;
> +  dsc->u.ldst.rn = rn;
> +
>    dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
> -  rn_val = displaced_read_reg (regs, dsc, rn);
> +  rn_val = displaced_read_reg (regs, dsc, dsc->u.ldst.rn);
>    displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
>  
> -  dsc->u.ldst.writeback = bit (insn, 25);
> -  dsc->u.ldst.rn = rn;

These changes are really unnecessary now, except for replacing
"bit (insn, 25)" by "writeback" at the end.  Using the local
arguments "rn" instead of struct accesses like "dsc->u.ldst.rn"
keeps the code more readable (and the patch smaller), so it
would be somewhat preferable.  Similar unnecessary changes
are in a couple of other install_ routines.


For some reason, just three of the copy_ routines are not renamed
to arm_copy_ (even though they are of course ARM specific):
 copy_extra_ld_st, copy_block_xfer, copy_unpred
Please rename those as well (and any others I may have missed).


Finally, the patch renames two of the "decode_" routines to "arm_decode_",
but not the others.  Shouldn't they all be renamed?  (Of course this
is really independent of the rest of the patch, so maybe all those
renamed ought to be done in a separate patch.)


Otherwise, this looks OK now.

Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 2/8] Rename copy_* functions to arm_copy_*
  2011-04-26 17:09         ` Ulrich Weigand
@ 2011-04-27 10:27           ` Yao Qi
  2011-04-27 13:32             ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-04-27 10:27 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 1605 bytes --]

On 04/27/2011 01:09 AM, Ulrich Weigand wrote:
>> > +  dsc->u.ldst.writeback = writeback;
>> > +  dsc->u.ldst.rn = rn;
>> > +
>> >    dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
>> > -  rn_val = displaced_read_reg (regs, dsc, rn);
>> > +  rn_val = displaced_read_reg (regs, dsc, dsc->u.ldst.rn);
>> >    displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
>> >  
>> > -  dsc->u.ldst.writeback = bit (insn, 25);
>> > -  dsc->u.ldst.rn = rn;
> These changes are really unnecessary now, except for replacing
> "bit (insn, 25)" by "writeback" at the end.  Using the local
> arguments "rn" instead of struct accesses like "dsc->u.ldst.rn"
> keeps the code more readable (and the patch smaller), so it
> would be somewhat preferable.  Similar unnecessary changes
> are in a couple of other install_ routines.

These unnecessary changes in install_copy_copro_load_store,
install_bx_blx_reg, install_alu_reg, arm_copy_alu_reg,
install_alu_shifted_reg, and install_ldr_str_ldrb_strb are fixed.

> For some reason, just three of the copy_ routines are not renamed
> to arm_copy_ (even though they are of course ARM specific):
>  copy_extra_ld_st, copy_block_xfer, copy_unpred
> Please rename those as well (and any others I may have missed).

Renamed them to arm_copy_*.

> 
> 
> Finally, the patch renames two of the "decode_" routines to "arm_decode_",
> but not the others.  Shouldn't they all be renamed?  (Of course this
> is really independent of the rest of the patch, so maybe all those
> renamed ought to be done in a separate patch.)
> 

Renamed them to arm_decode_*.

-- 
Yao (齐尧)

[-- Attachment #2: 0001-refactor-rename-functions.patch --]
[-- Type: text/x-patch, Size: 48346 bytes --]

gdb/
	* arm-tdep.c (copy_unmodified): Rename to ...
	(arm_copy_unmodified): .. this.  New.
	(copy_preload): Move common part to ...
	(install_preload): .. this.  New.
	(arm_copy_preload): New.
	(copy_preload_reg): Move common part to ...
	(install_preload_reg): ... this.  New.
	(arm_copy_preload_reg): New.
	(copy_b_bl_blx): Move common part to ...
	(install_b_bl_blx): .. this.  New.
	(arm_copy_b_bl_blx): New.
	(copy_bx_blx_reg): Move common part to ...
	(install_bx_blx_reg): ... this. New.
	(arm_copy_bx_blx_reg): New.
	(copy_alu_reg): Move common part to ...
	(install_alu_reg): ... this.  New.
	(arm_copy_alu_reg): New.
	(copy_alu_shifted_reg): Move common part to ...
	(install_alu_shifted_reg): ... this.  New.
	(copy_ldr_str_ldrb_strb): Move common part to ...
	(install_ldr_str_ldrb_strb): ... this.  New.
	(arm_copy_ldr_str_ldrb_strb): New.
	(copy_copro_load_store): Move some common part to ...
	(install_copy_copro_load_store): ... this.  New.
	(arm_copy_copro_load_store): New.
	(copy_svc): Delete.
	(arm_copy_svc): Renamed from copy_svc.
	(copy_undef): Delete.
	(arm_copy_undef): Renamed from copy_undef.
	(decode_ext_reg_ld_st): Delete.
	(arm_decode_ext_reg_ld_st): Renamed from decode_ext_reg_ld_st.
	(decode_svc_copro): Delete.
	(arm_decode_svc_copro): Renamed from decode_svc_copro.
	(copy_copro_load_store, copy_alu_imm): update callers.
	(copy_extra_ld_st, copy_block_xfer): Likewise.
	(decode_misc_memhint_neon, decode_unconditional): Likewise.
	(decode_miscellaneous, decode_dp_misc): Likewise.
	(decode_ld_st_word_ubyte, decode_media): Likewise.
	(decode_b_bl_ldmstm, decode_ext_reg_ld_st): Likewise.
	(decode_svc_copro, decode_misc_memhint_neon): Likewise.
	(decode_unconditional, decode_miscellaneous): Likewise.
	(decode_media, decode_b_bl_ldmstm): Likewise.
	(arm_process_displaced_insn): Likewise..
	(decode_misc_memhint_neon): Delete.
	(arm_decode_misc_memhint_neon): Renamed from decode_misc_memhint_neon.
	(decode_miscellaneous): Delete.
	(arm_decode_miscellaneous): Renamed from decode_miscellaneous.
	(decode_dp_misc): Delete.
	(arm_decode_dp_misc): Renamed from decode_dp_misc.
	(decode_ld_st_word_ubyte): Delete.
	(arm_decode_ld_st_word_ubyte): Renamed from decode_ld_st_word_ubyte. 
	(decode_media): Delete.
	(arm_decode_media): Renamed from decode_media.
	(decode_b_bl_ldmstm): Delete.
	(arm_decode_b_bl_ldmstm): Renamed from decode_b_bl_ldmstm.
	(decode_ext_reg_ld_st): Delete.
	(arm_decode_ext_reg_ld_st): Renamed from decode_ext_reg_ld_st.
	(decode_unconditional): Delete.
	(arm_decode_unconditional): Renamed from decode_unconditional.

---
 gdb/arm-tdep.c |  650 +++++++++++++++++++++++++++++++-------------------------
 1 files changed, 363 insertions(+), 287 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 9153ac4..2dd8c9e 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5327,8 +5327,8 @@ insn_references_pc (uint32_t insn, uint32_t bitmask)
    matter what address they are executed at: in those cases, use this.  */
 
 static int
-copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
-		 const char *iname, struct displaced_step_closure *dsc)
+arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
+		     const char *iname, struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
@@ -5351,20 +5351,11 @@ cleanup_preload (struct gdbarch *gdbarch,
     displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
 }
 
-static int
-copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-	      struct displaced_step_closure *dsc)
+static void
+install_preload (struct gdbarch *gdbarch, struct regcache *regs,
+		 struct displaced_step_closure *dsc, unsigned int rn)
 {
-  unsigned int rn = bits (insn, 16, 19);
   ULONGEST rn_val;
-
-  if (!insn_references_pc (insn, 0x000f0000ul))
-    return copy_unmodified (gdbarch, insn, "preload", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
-			(unsigned long) insn);
-
   /* Preload instructions:
 
      {pli/pld} [rn, #+/-imm]
@@ -5374,34 +5365,40 @@ copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
   rn_val = displaced_read_reg (regs, dsc, rn);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
-
   dsc->u.preload.immed = 1;
 
-  dsc->modinsn[0] = insn & 0xfff0ffff;
-
   dsc->cleanup = &cleanup_preload;
-
-  return 0;
 }
 
-/* Preload instructions with register offset.  */
-
 static int
-copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
-		  struct regcache *regs,
+arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 		  struct displaced_step_closure *dsc)
 {
   unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  ULONGEST rn_val, rm_val;
 
-  if (!insn_references_pc (insn, 0x000f000ful))
-    return copy_unmodified (gdbarch, insn, "preload reg", dsc);
+  if (!insn_references_pc (insn, 0x000f0000ul))
+    return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
 			(unsigned long) insn);
 
+  dsc->modinsn[0] = insn & 0xfff0ffff;
+
+  install_preload (gdbarch, regs, dsc, rn);
+
+  return 0;
+}
+
+/* Preload instructions with register offset.  */
+
+static void
+install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
+		    struct displaced_step_closure *dsc, unsigned int rn,
+		    unsigned int rm)
+{
+  ULONGEST rn_val, rm_val;
+
   /* Preload register-offset instructions:
 
      {pli/pld} [rn, rm {, shift}]
@@ -5414,13 +5411,30 @@ copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
   rm_val = displaced_read_reg (regs, dsc, rm);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
-
   dsc->u.preload.immed = 0;
 
-  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
-
   dsc->cleanup = &cleanup_preload;
+}
+
+static int
+arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
+		      struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn, 16, 19);
+  unsigned int rm = bits (insn, 0, 3);
+
+
+  if (!insn_references_pc (insn, 0x000f000ful))
+    return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
+			(unsigned long) insn);
 
+  dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
+
+  install_preload_reg (gdbarch, regs, dsc, rn, rm);
   return 0;
 }
 
@@ -5439,21 +5453,13 @@ cleanup_copro_load_store (struct gdbarch *gdbarch,
     displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
 }
 
-static int
-copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
-		       struct regcache *regs,
-		       struct displaced_step_closure *dsc)
+static void
+install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
+			  struct displaced_step_closure *dsc,
+			  int writeback, unsigned int rn)
 {
-  unsigned int rn = bits (insn, 16, 19);
   ULONGEST rn_val;
 
-  if (!insn_references_pc (insn, 0x000f0000ul))
-    return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
-			"load/store insn %.8lx\n", (unsigned long) insn);
-
   /* Coprocessor load/store instructions:
 
      {stc/stc2} [<Rn>, #+/-imm]  (and other immediate addressing modes)
@@ -5466,12 +5472,29 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   rn_val = displaced_read_reg (regs, dsc, rn);
   displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
 
-  dsc->u.ldst.writeback = bit (insn, 25);
+  dsc->u.ldst.writeback = writeback;
   dsc->u.ldst.rn = rn;
 
+  dsc->cleanup = &cleanup_copro_load_store;
+}
+
+static int
+arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
+			   struct regcache *regs,
+			   struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn, 16, 19);
+
+  if (!insn_references_pc (insn, 0x000f0000ul))
+    return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.8lx\n", (unsigned long) insn);
+
   dsc->modinsn[0] = insn & 0xfff0ffff;
 
-  dsc->cleanup = &cleanup_copro_load_store;
+  install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
 
   return 0;
 }
@@ -5510,29 +5533,43 @@ cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
 
 /* Copy B/BL/BLX instructions with immediate destinations.  */
 
+static void
+install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
+		  struct displaced_step_closure *dsc,
+		  unsigned int cond, int exchange, int link, long offset)
+{
+  /* Implement "BL<cond> <label>" as:
+
+     Preparation: cond <- instruction condition
+     Insn: mov r0, r0  (nop)
+     Cleanup: if (condition true) { r14 <- pc; pc <- label }.
+
+     B<cond> similar, but don't set r14 in cleanup.  */
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = link;
+  dsc->u.branch.exchange = exchange;
+
+  if (dsc->is_thumb)
+    dsc->u.branch.dest = dsc->insn_addr + 4 + offset;
+  else
+    dsc->u.branch.dest = dsc->insn_addr + 8 + offset;
+
+  dsc->cleanup = &cleanup_branch;
+}
 static int
-copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
-	       struct regcache *regs, struct displaced_step_closure *dsc)
+arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
+		   struct regcache *regs, struct displaced_step_closure *dsc)
 {
   unsigned int cond = bits (insn, 28, 31);
   int exchange = (cond == 0xf);
   int link = exchange || bit (insn, 24);
-  CORE_ADDR from = dsc->insn_addr;
   long offset;
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
 			"%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
 			(unsigned long) insn);
-
-  /* Implement "BL<cond> <label>" as:
-
-     Preparation: cond <- instruction condition
-     Insn: mov r0, r0  (nop)
-     Cleanup: if (condition true) { r14 <- pc; pc <- label }.
-
-     B<cond> similar, but don't set r14 in cleanup.  */
-
   if (exchange)
     /* For BLX, set bit 0 of the destination.  The cleanup_branch function will
        then arrange the switch into Thumb mode.  */
@@ -5543,35 +5580,19 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   if (bit (offset, 25))
     offset = offset | ~0x3ffffff;
 
-  dsc->u.branch.cond = cond;
-  dsc->u.branch.link = link;
-  dsc->u.branch.exchange = exchange;
-  dsc->u.branch.dest = from + 8 + offset;
-
   dsc->modinsn[0] = ARM_NOP;
 
-  dsc->cleanup = &cleanup_branch;
-
+  install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
   return 0;
 }
 
 /* Copy BX/BLX with register-specified destinations.  */
 
-static int
-copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
-		 struct regcache *regs, struct displaced_step_closure *dsc)
+static void
+install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
+		    struct displaced_step_closure *dsc, int link,
+		    unsigned int cond, unsigned int rm)
 {
-  unsigned int cond = bits (insn, 28, 31);
-  /* BX:  x12xxx1x
-     BLX: x12xxx3x.  */
-  int link = bit (insn, 5);
-  unsigned int rm = bits (insn, 0, 3);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
-			"%.8lx\n", (link) ? "blx" : "bx",
-			(unsigned long) insn);
-
   /* Implement {BX,BLX}<cond> <reg>" as:
 
      Preparation: cond <- instruction condition
@@ -5584,12 +5605,29 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
 
   dsc->u.branch.cond = cond;
   dsc->u.branch.link = link;
-  dsc->u.branch.exchange = 1;
 
-  dsc->modinsn[0] = ARM_NOP;
+  dsc->u.branch.exchange = 1;
 
   dsc->cleanup = &cleanup_branch;
+}
 
+static int
+arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int cond = bits (insn, 28, 31);
+  /* BX:  x12xxx1x
+     BLX: x12xxx3x.  */
+  int link = bit (insn, 5);
+  unsigned int rm = bits (insn, 0, 3);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
+			(unsigned long) insn);
+
+  dsc->modinsn[0] = ARM_NOP;
+
+  install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
   return 0;
 }
 
@@ -5606,8 +5644,8 @@ cleanup_alu_imm (struct gdbarch *gdbarch,
 }
 
 static int
-copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-	      struct displaced_step_closure *dsc)
+arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+		  struct displaced_step_closure *dsc)
 {
   unsigned int rn = bits (insn, 16, 19);
   unsigned int rd = bits (insn, 12, 15);
@@ -5616,7 +5654,7 @@ copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   ULONGEST rd_val, rn_val;
 
   if (!insn_references_pc (insn, 0x000ff000ul))
-    return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
@@ -5670,24 +5708,13 @@ cleanup_alu_reg (struct gdbarch *gdbarch,
   displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
 }
 
-static int
-copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-	      struct displaced_step_closure *dsc)
+static void
+install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
+		 struct displaced_step_closure *dsc,
+		 unsigned int rd, unsigned int rn, unsigned int rm)
 {
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  unsigned int rd = bits (insn, 12, 15);
-  unsigned int op = bits (insn, 21, 24);
-  int is_mov = (op == 0xd);
   ULONGEST rd_val, rn_val, rm_val;
 
-  if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
-			is_mov ? "move" : "ALU", (unsigned long) insn);
-
   /* Instruction is of form:
 
      <op><cond> rd, [rn,] rm [, <shift>]
@@ -5711,13 +5738,30 @@ copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
   dsc->rd = rd;
 
+  dsc->cleanup = &cleanup_alu_reg;
+}
+
+static int
+arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+		  struct displaced_step_closure *dsc)
+{
+  unsigned int op = bits (insn, 21, 24);
+  int is_mov = (op == 0xd);
+
+  if (!insn_references_pc (insn, 0x000ff00ful))
+    return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
+			is_mov ? "move" : "ALU", (unsigned long) insn);
+
   if (is_mov)
     dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
   else
     dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
 
-  dsc->cleanup = &cleanup_alu_reg;
-
+  install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
+		   bits (insn, 0, 3));
   return 0;
 }
 
@@ -5737,27 +5781,15 @@ cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
   displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
 }
 
-static int
-copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
+static void
+install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
+			 struct displaced_step_closure *dsc,
+			 unsigned int rd, unsigned int rn, unsigned int rm,
+			 unsigned rs)
 {
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);
-  unsigned int rd = bits (insn, 12, 15);
-  unsigned int rs = bits (insn, 8, 11);
-  unsigned int op = bits (insn, 21, 24);
-  int is_mov = (op == 0xd), i;
+  int i;
   ULONGEST rd_val, rn_val, rm_val, rs_val;
 
-  if (!insn_references_pc (insn, 0x000fff0ful))
-    return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
-			"%.8lx\n", is_mov ? "move" : "ALU",
-			(unsigned long) insn);
-
   /* Instruction is of form:
 
      <op><cond> rd, [rn,] rm, <shift> rs
@@ -5784,13 +5816,37 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
   displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
   displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
   dsc->rd = rd;
+  dsc->cleanup = &cleanup_alu_shifted_reg;
+}
+
+static int
+arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
+			  struct regcache *regs,
+			  struct displaced_step_closure *dsc)
+{
+  unsigned int op = bits (insn, 21, 24);
+  int is_mov = (op == 0xd);
+  unsigned int rd, rn, rm, rs;
+
+  if (!insn_references_pc (insn, 0x000fff0ful))
+    return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
+			"%.8lx\n", is_mov ? "move" : "ALU",
+			(unsigned long) insn);
+
+  rn = bits (insn, 16, 19);
+  rm = bits (insn, 0, 3);
+  rs = bits (insn, 8, 11);
+  rd = bits (insn, 12, 15);
 
   if (is_mov)
     dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
   else
     dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
 
-  dsc->cleanup = &cleanup_alu_shifted_reg;
+  install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
 
   return 0;
 }
@@ -5850,8 +5906,8 @@ cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
    transfers, which have a different encoding to byte/word transfers.  */
 
 static int
-copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
-		  struct regcache *regs, struct displaced_step_closure *dsc)
+arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
+		      struct regcache *regs, struct displaced_step_closure *dsc)
 {
   unsigned int op1 = bits (insn, 20, 24);
   unsigned int op2 = bits (insn, 5, 6);
@@ -5865,7 +5921,7 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
   ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
 
   if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
 
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
@@ -5923,27 +5979,14 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
 
 /* Copy byte/word loads and stores.  */
 
-static int
-copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
-			struct regcache *regs,
-			struct displaced_step_closure *dsc, int load, int byte,
-			int usermode)
+static void
+install_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
+			   struct displaced_step_closure *dsc, int load,
+			   int immed, int writeback, int byte, int usermode,
+			   int rt, int rm, int rn)
 {
-  int immed = !bit (insn, 25);
-  unsigned int rt = bits (insn, 12, 15);
-  unsigned int rn = bits (insn, 16, 19);
-  unsigned int rm = bits (insn, 0, 3);  /* Only valid if !immed.  */
   ULONGEST rt_val, rn_val, rm_val = 0;
 
-  if (!insn_references_pc (insn, 0x000ff00ful))
-    return copy_unmodified (gdbarch, insn, "load/store", dsc);
-
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
-			load ? (byte ? "ldrb" : "ldr")
-			     : (byte ? "strb" : "str"), usermode ? "t" : "",
-			(unsigned long) insn);
-
   dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
   dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
   if (!immed)
@@ -5960,12 +6003,11 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
   displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
   if (!immed)
     displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
-
   dsc->rd = rt;
   dsc->u.ldst.xfersize = byte ? 1 : 4;
   dsc->u.ldst.rn = rn;
   dsc->u.ldst.immed = immed;
-  dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
+  dsc->u.ldst.writeback = writeback;
 
   /* To write PC we can do:
 
@@ -5988,6 +6030,35 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
      of this can be found in Section "Saving from r15" in
      http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
 
+  dsc->cleanup = load ? &cleanup_load : &cleanup_store;
+}
+
+static int
+arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
+			    struct regcache *regs,
+			    struct displaced_step_closure *dsc,
+			    int load, int byte, int usermode)
+{
+  int immed = !bit (insn, 25);
+  int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
+  unsigned int rt = bits (insn, 12, 15);
+  unsigned int rn = bits (insn, 16, 19);
+  unsigned int rm = bits (insn, 0, 3);  /* Only valid if !immed.  */
+
+  if (!insn_references_pc (insn, 0x000ff00ful))
+    return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.8lx\n",
+			load ? (byte ? "ldrb" : "ldr")
+			     : (byte ? "strb" : "str"), usermode ? "t" : "",
+			rt, rn,
+			(unsigned long) insn);
+
+  install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, immed, writeback, byte,
+			     usermode, rt, rm, rn);
+
   if (load || rt != ARM_PC_REGNUM)
     {
       dsc->u.ldst.restore_r4 = 0;
@@ -6239,8 +6310,9 @@ cleanup_block_load_pc (struct gdbarch *gdbarch,
    in user-level code (in particular exception return, ldm rn, {...pc}^).  */
 
 static int
-copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-		 struct displaced_step_closure *dsc)
+arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
+		     struct regcache *regs,
+		     struct displaced_step_closure *dsc)
 {
   int load = bit (insn, 20);
   int user = bit (insn, 22);
@@ -6252,13 +6324,13 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   /* Block transfers which don't mention PC can be run directly
      out-of-line.  */
   if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
-    return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
 
   if (rn == ARM_PC_REGNUM)
     {
       warning (_("displaced: Unpredictable LDM or STM with "
 		 "base register r15"));
-      return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
     }
 
   if (debug_displaced)
@@ -6368,8 +6440,9 @@ cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
-copy_svc (struct gdbarch *gdbarch, uint32_t insn,
-	  struct regcache *regs, struct displaced_step_closure *dsc)
+
+arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
+	      struct regcache *regs, struct displaced_step_closure *dsc)
 {
 
   if (debug_displaced)
@@ -6400,8 +6473,8 @@ copy_svc (struct gdbarch *gdbarch, uint32_t insn,
 /* Copy undefined instructions.  */
 
 static int
-copy_undef (struct gdbarch *gdbarch, uint32_t insn,
-	    struct displaced_step_closure *dsc)
+arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
+		struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog,
@@ -6416,8 +6489,8 @@ copy_undef (struct gdbarch *gdbarch, uint32_t insn,
 /* Copy unpredictable instructions.  */
 
 static int
-copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
-	     struct displaced_step_closure *dsc)
+arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
+		 struct displaced_step_closure *dsc)
 {
   if (debug_displaced)
     fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
@@ -6432,95 +6505,96 @@ copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
    the presentation in the ARM ARM.  */
 
 static int
-decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
-			  struct regcache *regs,
-			  struct displaced_step_closure *dsc)
+arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
+			      struct regcache *regs,
+			      struct displaced_step_closure *dsc)
 {
   unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
   unsigned int rn = bits (insn, 16, 19);
 
   if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
-    return copy_unmodified (gdbarch, insn, "cps", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
   else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
-    return copy_unmodified (gdbarch, insn, "setend", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
   else if ((op1 & 0x60) == 0x20)
-    return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
   else if ((op1 & 0x71) == 0x40)
-    return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
+				dsc);
   else if ((op1 & 0x77) == 0x41)
-    return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
   else if ((op1 & 0x77) == 0x45)
-    return copy_preload (gdbarch, insn, regs, dsc);  /* pli.  */
+    return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pli.  */
   else if ((op1 & 0x77) == 0x51)
     {
       if (rn != 0xf)
-	return copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
+	return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
       else
-	return copy_unpred (gdbarch, insn, dsc);
+	return arm_copy_unpred (gdbarch, insn, dsc);
     }
   else if ((op1 & 0x77) == 0x55)
-    return copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
+    return arm_copy_preload (gdbarch, insn, regs, dsc);  /* pld/pldw.  */
   else if (op1 == 0x57)
     switch (op2)
       {
-      case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
-      case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
-      case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
-      case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
-      default: return copy_unpred (gdbarch, insn, dsc);
+      case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
+      case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
+      case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
+      case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
+      default: return arm_copy_unpred (gdbarch, insn, dsc);
       }
   else if ((op1 & 0x63) == 0x43)
-    return copy_unpred (gdbarch, insn, dsc);
+    return arm_copy_unpred (gdbarch, insn, dsc);
   else if ((op2 & 0x1) == 0x0)
     switch (op1 & ~0x80)
       {
       case 0x61:
-	return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
       case 0x65:
-	return copy_preload_reg (gdbarch, insn, regs, dsc);  /* pli reg.  */
+	return arm_copy_preload_reg (gdbarch, insn, regs, dsc);  /* pli reg.  */
       case 0x71: case 0x75:
         /* pld/pldw reg.  */
-	return copy_preload_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
       case 0x63: case 0x67: case 0x73: case 0x77:
-	return copy_unpred (gdbarch, insn, dsc);
+	return arm_copy_unpred (gdbarch, insn, dsc);
       default:
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
       }
   else
-    return copy_undef (gdbarch, insn, dsc);  /* Probably unreachable.  */
+    return arm_copy_undef (gdbarch, insn, dsc);  /* Probably unreachable.  */
 }
 
 static int
-decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
+arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
+			  struct regcache *regs,
+			  struct displaced_step_closure *dsc)
 {
   if (bit (insn, 27) == 0)
-    return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
+    return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
   /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx.  */
   else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
     {
     case 0x0: case 0x2:
-      return copy_unmodified (gdbarch, insn, "srs", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
 
     case 0x1: case 0x3:
-      return copy_unmodified (gdbarch, insn, "rfe", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
 
     case 0x4: case 0x5: case 0x6: case 0x7:
-      return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+      return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
 
     case 0x8:
       switch ((insn & 0xe00000) >> 21)
 	{
 	case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
 	  /* stc/stc2.  */
-	  return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	  return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
 
 	case 0x2:
-	  return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
+	  return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
 
 	default:
-	  return copy_undef (gdbarch, insn, dsc);
+	  return arm_copy_undef (gdbarch, insn, dsc);
 	}
 
     case 0x9:
@@ -6530,55 +6604,55 @@ decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
 	  {
 	  case 0x1: case 0x3:
 	    /* ldc/ldc2 imm (undefined for rn == pc).  */
-	    return rn_f ? copy_undef (gdbarch, insn, dsc)
-			: copy_copro_load_store (gdbarch, insn, regs, dsc);
+	    return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
+			: arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
 
 	  case 0x2:
-	    return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
 
 	  case 0x4: case 0x5: case 0x6: case 0x7:
 	    /* ldc/ldc2 lit (undefined for rn != pc).  */
-	    return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
-			: copy_undef (gdbarch, insn, dsc);
+	    return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
+			: arm_copy_undef (gdbarch, insn, dsc);
 
 	  default:
-	    return copy_undef (gdbarch, insn, dsc);
+	    return arm_copy_undef (gdbarch, insn, dsc);
 	  }
       }
 
     case 0xa:
-      return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
 
     case 0xb:
       if (bits (insn, 16, 19) == 0xf)
         /* ldc/ldc2 lit.  */
-	return copy_copro_load_store (gdbarch, insn, regs, dsc);
+	return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0xc:
       if (bit (insn, 4))
-	return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
 
     case 0xd:
       if (bit (insn, 4))
-	return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
 
     default:
-      return copy_undef (gdbarch, insn, dsc);
+      return arm_copy_undef (gdbarch, insn, dsc);
     }
 }
 
 /* Decode miscellaneous instructions in dp/misc encoding space.  */
 
 static int
-decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
+arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
+			  struct regcache *regs,
+			  struct displaced_step_closure *dsc)
 {
   unsigned int op2 = bits (insn, 4, 6);
   unsigned int op = bits (insn, 21, 22);
@@ -6587,84 +6661,85 @@ decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
   switch (op2)
     {
     case 0x0:
-      return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
 
     case 0x1:
       if (op == 0x1)  /* bx.  */
-	return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
       else if (op == 0x3)
-	return copy_unmodified (gdbarch, insn, "clz", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x2:
       if (op == 0x1)
         /* Not really supported.  */
-	return copy_unmodified (gdbarch, insn, "bxj", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x3:
       if (op == 0x1)
-	return copy_bx_blx_reg (gdbarch, insn,
+	return arm_copy_bx_blx_reg (gdbarch, insn,
 				regs, dsc);  /* blx register.  */
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x5:
-      return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
 
     case 0x7:
       if (op == 0x1)
-	return copy_unmodified (gdbarch, insn, "bkpt", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
       else if (op == 0x3)
         /* Not really supported.  */
-	return copy_unmodified (gdbarch, insn, "smc", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
 
     default:
-      return copy_undef (gdbarch, insn, dsc);
+      return arm_copy_undef (gdbarch, insn, dsc);
     }
 }
 
 static int
-decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
-		struct displaced_step_closure *dsc)
+arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
+		    struct regcache *regs,
+		    struct displaced_step_closure *dsc)
 {
   if (bit (insn, 25))
     switch (bits (insn, 20, 24))
       {
       case 0x10:
-	return copy_unmodified (gdbarch, insn, "movw", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
 
       case 0x14:
-	return copy_unmodified (gdbarch, insn, "movt", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
 
       case 0x12: case 0x16:
-	return copy_unmodified (gdbarch, insn, "msr imm", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
 
       default:
-	return copy_alu_imm (gdbarch, insn, regs, dsc);
+	return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
       }
   else
     {
       uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
 
       if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
-	return copy_alu_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
-	return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
+	return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
-	return decode_miscellaneous (gdbarch, insn, regs, dsc);
+	return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
       else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
-	return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
       else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
-	return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
       else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
-	return copy_unmodified (gdbarch, insn, "synch", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
       else if (op2 == 0xb || (op2 & 0xd) == 0xd)
 	/* 2nd arg means "unpriveleged".  */
-	return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
-				 dsc);
+	return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
+				     dsc);
     }
 
   /* Should be unreachable.  */
@@ -6672,9 +6747,9 @@ decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
 }
 
 static int
-decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
-			 struct regcache *regs,
-			 struct displaced_step_closure *dsc)
+arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
+			     struct regcache *regs,
+			     struct displaced_step_closure *dsc)
 {
   int a = bit (insn, 25), b = bit (insn, 4);
   uint32_t op1 = bits (insn, 20, 24);
@@ -6682,83 +6757,83 @@ decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
 
   if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
       || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
   else if ((!a && (op1 & 0x17) == 0x02)
 	    || (a && (op1 & 0x17) == 0x02 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
   else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
 	    || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
   else if ((!a && (op1 & 0x17) == 0x03)
 	   || (a && (op1 & 0x17) == 0x03 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
   else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
 	    || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
   else if ((!a && (op1 & 0x17) == 0x06)
 	   || (a && (op1 & 0x17) == 0x06 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
   else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
 	   || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
   else if ((!a && (op1 & 0x17) == 0x07)
 	   || (a && (op1 & 0x17) == 0x07 && !b))
-    return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
+    return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
 
   /* Should be unreachable.  */
   return 1;
 }
 
 static int
-decode_media (struct gdbarch *gdbarch, uint32_t insn,
-	      struct displaced_step_closure *dsc)
+arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
+		  struct displaced_step_closure *dsc)
 {
   switch (bits (insn, 20, 24))
     {
     case 0x00: case 0x01: case 0x02: case 0x03:
-      return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
 
     case 0x04: case 0x05: case 0x06: case 0x07:
-      return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
 
     case 0x08: case 0x09: case 0x0a: case 0x0b:
     case 0x0c: case 0x0d: case 0x0e: case 0x0f:
-      return copy_unmodified (gdbarch, insn,
+      return arm_copy_unmodified (gdbarch, insn,
 			      "decode/pack/unpack/saturate/reverse", dsc);
 
     case 0x18:
       if (bits (insn, 5, 7) == 0)  /* op2.  */
 	 {
 	  if (bits (insn, 12, 15) == 0xf)
-	    return copy_unmodified (gdbarch, insn, "usad8", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
 	  else
-	    return copy_unmodified (gdbarch, insn, "usada8", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
 	}
       else
-	 return copy_undef (gdbarch, insn, dsc);
+	 return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1a: case 0x1b:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
-	return copy_unmodified (gdbarch, insn, "sbfx", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1c: case 0x1d:
       if (bits (insn, 5, 6) == 0x0)  /* op2[1:0].  */
 	 {
 	  if (bits (insn, 0, 3) == 0xf)
-	    return copy_unmodified (gdbarch, insn, "bfc", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
 	  else
-	    return copy_unmodified (gdbarch, insn, "bfi", dsc);
+	    return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
 	}
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
 
     case 0x1e: case 0x1f:
       if (bits (insn, 5, 6) == 0x2)  /* op2[1:0].  */
-	return copy_unmodified (gdbarch, insn, "ubfx", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
       else
-	return copy_undef (gdbarch, insn, dsc);
+	return arm_copy_undef (gdbarch, insn, dsc);
     }
 
   /* Should be unreachable.  */
@@ -6766,40 +6841,41 @@ decode_media (struct gdbarch *gdbarch, uint32_t insn,
 }
 
 static int
-decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
-		    struct regcache *regs, struct displaced_step_closure *dsc)
+arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
+			struct regcache *regs,
+			struct displaced_step_closure *dsc)
 {
   if (bit (insn, 25))
-    return copy_b_bl_blx (gdbarch, insn, regs, dsc);
+    return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
   else
-    return copy_block_xfer (gdbarch, insn, regs, dsc);
+    return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
 }
 
 static int
-decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
-		      struct regcache *regs,
-		      struct displaced_step_closure *dsc)
+arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
+			  struct regcache *regs,
+			  struct displaced_step_closure *dsc)
 {
   unsigned int opcode = bits (insn, 20, 24);
 
   switch (opcode)
     {
     case 0x04: case 0x05:  /* VFP/Neon mrrc/mcrr.  */
-      return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
 
     case 0x08: case 0x0a: case 0x0c: case 0x0e:
     case 0x12: case 0x16:
-      return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
 
     case 0x09: case 0x0b: case 0x0d: case 0x0f:
     case 0x13: case 0x17:
-      return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
+      return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
 
     case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
     case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
       /* Note: no writeback for these instructions.  Bit 25 will always be
 	 zero though (via caller), so the following works OK.  */
-      return copy_copro_load_store (gdbarch, insn, regs, dsc);
+      return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
     }
 
   /* Should be unreachable.  */
@@ -6807,8 +6883,8 @@ decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
 }
 
 static int
-decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
-		  struct regcache *regs, struct displaced_step_closure *dsc)
+arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
+		      struct regcache *regs, struct displaced_step_closure *dsc)
 {
   unsigned int op1 = bits (insn, 20, 25);
   int op = bit (insn, 4);
@@ -6816,40 +6892,40 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
   unsigned int rn = bits (insn, 16, 19);
 
   if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
-    return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
+    return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
 	   && (coproc & 0xe) != 0xa)
     /* stc/stc2.  */
-    return copy_copro_load_store (gdbarch, insn, regs, dsc);
+    return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
 	   && (coproc & 0xe) != 0xa)
     /* ldc/ldc2 imm/lit.  */
-    return copy_copro_load_store (gdbarch, insn, regs, dsc);
+    return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
   else if ((op1 & 0x3e) == 0x00)
-    return copy_undef (gdbarch, insn, dsc);
+    return arm_copy_undef (gdbarch, insn, dsc);
   else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
-    return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
   else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
   else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
   else if ((op1 & 0x30) == 0x20 && !op)
     {
       if ((coproc & 0xe) == 0xa)
-	return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
       else
-	return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
+	return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
     }
   else if ((op1 & 0x30) == 0x20 && op)
-    return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
   else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
   else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
-    return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
+    return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
   else if ((op1 & 0x30) == 0x30)
-    return copy_svc (gdbarch, insn, regs, dsc);
+    return arm_copy_svc (gdbarch, insn, regs, dsc);
   else
-    return copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
+    return arm_copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
 
 static void
@@ -6889,27 +6965,27 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 			(unsigned long) from);
 
   if ((insn & 0xf0000000) == 0xf0000000)
-    err = decode_unconditional (gdbarch, insn, regs, dsc);
+    err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
   else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
     {
     case 0x0: case 0x1: case 0x2: case 0x3:
-      err = decode_dp_misc (gdbarch, insn, regs, dsc);
+      err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
       break;
 
     case 0x4: case 0x5: case 0x6:
-      err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
+      err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
       break;
 
     case 0x7:
-      err = decode_media (gdbarch, insn, dsc);
+      err = arm_decode_media (gdbarch, insn, dsc);
       break;
 
     case 0x8: case 0x9: case 0xa: case 0xb:
-      err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
+      err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
       break;
 
     case 0xc: case 0xd: case 0xe: case 0xf:
-      err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
+      err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
       break;
     }
 
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 2/8] Rename copy_* functions to arm_copy_*
  2011-04-27 10:27           ` Yao Qi
@ 2011-04-27 13:32             ` Ulrich Weigand
  2011-04-28  5:05               ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-04-27 13:32 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> gdb/
> 	* arm-tdep.c (copy_unmodified): Rename to ...
> 	(arm_copy_unmodified): .. this.  New.
> 	(copy_preload): Move common part to ...
> 	(install_preload): .. this.  New.
> 	(arm_copy_preload): New.
> 	(copy_preload_reg): Move common part to ...
> 	(install_preload_reg): ... this.  New.
> 	(arm_copy_preload_reg): New.
> 	(copy_b_bl_blx): Move common part to ...
> 	(install_b_bl_blx): .. this.  New.
> 	(arm_copy_b_bl_blx): New.
> 	(copy_bx_blx_reg): Move common part to ...
> 	(install_bx_blx_reg): ... this. New.
> 	(arm_copy_bx_blx_reg): New.
> 	(copy_alu_reg): Move common part to ...
> 	(install_alu_reg): ... this.  New.
> 	(arm_copy_alu_reg): New.
> 	(copy_alu_shifted_reg): Move common part to ...
> 	(install_alu_shifted_reg): ... this.  New.
> 	(copy_ldr_str_ldrb_strb): Move common part to ...
> 	(install_ldr_str_ldrb_strb): ... this.  New.
> 	(arm_copy_ldr_str_ldrb_strb): New.
> 	(copy_copro_load_store): Move some common part to ...
> 	(install_copy_copro_load_store): ... this.  New.
> 	(arm_copy_copro_load_store): New.
> 	(copy_svc): Delete.
> 	(arm_copy_svc): Renamed from copy_svc.
> 	(copy_undef): Delete.
> 	(arm_copy_undef): Renamed from copy_undef.
> 	(decode_ext_reg_ld_st): Delete.
> 	(arm_decode_ext_reg_ld_st): Renamed from decode_ext_reg_ld_st.
> 	(decode_svc_copro): Delete.
> 	(arm_decode_svc_copro): Renamed from decode_svc_copro.
> 	(copy_copro_load_store, copy_alu_imm): update callers.
> 	(copy_extra_ld_st, copy_block_xfer): Likewise.
> 	(decode_misc_memhint_neon, decode_unconditional): Likewise.
> 	(decode_miscellaneous, decode_dp_misc): Likewise.
> 	(decode_ld_st_word_ubyte, decode_media): Likewise.
> 	(decode_b_bl_ldmstm, decode_ext_reg_ld_st): Likewise.
> 	(decode_svc_copro, decode_misc_memhint_neon): Likewise.
> 	(decode_unconditional, decode_miscellaneous): Likewise.
> 	(decode_media, decode_b_bl_ldmstm): Likewise.
> 	(arm_process_displaced_insn): Likewise..
> 	(decode_misc_memhint_neon): Delete.
> 	(arm_decode_misc_memhint_neon): Renamed from decode_misc_memhint_neon.
> 	(decode_miscellaneous): Delete.
> 	(arm_decode_miscellaneous): Renamed from decode_miscellaneous.
> 	(decode_dp_misc): Delete.
> 	(arm_decode_dp_misc): Renamed from decode_dp_misc.
> 	(decode_ld_st_word_ubyte): Delete.
> 	(arm_decode_ld_st_word_ubyte): Renamed from decode_ld_st_word_ubyte. 
> 	(decode_media): Delete.
> 	(arm_decode_media): Renamed from decode_media.
> 	(decode_b_bl_ldmstm): Delete.
> 	(arm_decode_b_bl_ldmstm): Renamed from decode_b_bl_ldmstm.
> 	(decode_ext_reg_ld_st): Delete.
> 	(arm_decode_ext_reg_ld_st): Renamed from decode_ext_reg_ld_st.
> 	(decode_unconditional): Delete.
> 	(arm_decode_unconditional): Renamed from decode_unconditional.

This is OK.

Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 2/8] Rename copy_* functions to arm_copy_*
  2011-04-27 13:32             ` Ulrich Weigand
@ 2011-04-28  5:05               ` Yao Qi
  0 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-04-28  5:05 UTC (permalink / raw)
  To: gdb-patches

On 04/27/2011 09:32 PM, Ulrich Weigand wrote:
> Yao Qi wrote:
> 
>> gdb/
>> 	* arm-tdep.c (copy_unmodified): Rename to ...
>> 	(arm_copy_unmodified): .. this.  New.
>> 	(copy_preload): Move common part to ...
>> 	(install_preload): .. this.  New.
>> 	(arm_copy_preload): New.
>> 	(copy_preload_reg): Move common part to ...
>> 	(install_preload_reg): ... this.  New.
>> 	(arm_copy_preload_reg): New.
>> 	(copy_b_bl_blx): Move common part to ...
>> 	(install_b_bl_blx): .. this.  New.
>> 	(arm_copy_b_bl_blx): New.
>> 	(copy_bx_blx_reg): Move common part to ...
>> 	(install_bx_blx_reg): ... this. New.
>> 	(arm_copy_bx_blx_reg): New.
>> 	(copy_alu_reg): Move common part to ...
>> 	(install_alu_reg): ... this.  New.
>> 	(arm_copy_alu_reg): New.
>> 	(copy_alu_shifted_reg): Move common part to ...
>> 	(install_alu_shifted_reg): ... this.  New.
>> 	(copy_ldr_str_ldrb_strb): Move common part to ...
>> 	(install_ldr_str_ldrb_strb): ... this.  New.
>> 	(arm_copy_ldr_str_ldrb_strb): New.
>> 	(copy_copro_load_store): Move some common part to ...
>> 	(install_copy_copro_load_store): ... this.  New.
>> 	(arm_copy_copro_load_store): New.
>> 	(copy_svc): Delete.
>> 	(arm_copy_svc): Renamed from copy_svc.
>> 	(copy_undef): Delete.
>> 	(arm_copy_undef): Renamed from copy_undef.
>> 	(decode_ext_reg_ld_st): Delete.
>> 	(arm_decode_ext_reg_ld_st): Renamed from decode_ext_reg_ld_st.
>> 	(decode_svc_copro): Delete.
>> 	(arm_decode_svc_copro): Renamed from decode_svc_copro.
>> 	(copy_copro_load_store, copy_alu_imm): update callers.
>> 	(copy_extra_ld_st, copy_block_xfer): Likewise.
>> 	(decode_misc_memhint_neon, decode_unconditional): Likewise.
>> 	(decode_miscellaneous, decode_dp_misc): Likewise.
>> 	(decode_ld_st_word_ubyte, decode_media): Likewise.
>> 	(decode_b_bl_ldmstm, decode_ext_reg_ld_st): Likewise.
>> 	(decode_svc_copro, decode_misc_memhint_neon): Likewise.
>> 	(decode_unconditional, decode_miscellaneous): Likewise.
>> 	(decode_media, decode_b_bl_ldmstm): Likewise.
>> 	(arm_process_displaced_insn): Likewise..
>> 	(decode_misc_memhint_neon): Delete.
>> 	(arm_decode_misc_memhint_neon): Renamed from decode_misc_memhint_neon.
>> 	(decode_miscellaneous): Delete.
>> 	(arm_decode_miscellaneous): Renamed from decode_miscellaneous.
>> 	(decode_dp_misc): Delete.
>> 	(arm_decode_dp_misc): Renamed from decode_dp_misc.
>> 	(decode_ld_st_word_ubyte): Delete.
>> 	(arm_decode_ld_st_word_ubyte): Renamed from decode_ld_st_word_ubyte. 
>> 	(decode_media): Delete.
>> 	(arm_decode_media): Renamed from decode_media.
>> 	(decode_b_bl_ldmstm): Delete.
>> 	(arm_decode_b_bl_ldmstm): Renamed from decode_b_bl_ldmstm.
>> 	(decode_ext_reg_ld_st): Delete.
>> 	(arm_decode_ext_reg_ld_st): Renamed from decode_ext_reg_ld_st.
>> 	(decode_unconditional): Delete.
>> 	(arm_decode_unconditional): Renamed from decode_unconditional.
> 
> This is OK.
> 

Tested cvs trunk on armv7-unknown-linux-gnueabi.  No regression.
Checked in.

http://sourceware.org/ml/gdb-cvs/2011-04/msg00183.html

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn
  2011-03-24 14:05   ` [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn Yao Qi
@ 2011-05-05 13:24     ` Yao Qi
  2011-05-10 13:58       ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-05-05 13:24 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 95 bytes --]

Some refacotring code makes this patch obsolete.  Here is the new one.

-- 
Yao (齐尧)

[-- Attachment #2: 0001-displaced-stepping-for-16-bit-thumb-instructions.patch --]
[-- Type: text/x-patch, Size: 16775 bytes --]

2011-05-05  Yao Qi  <yao@codesourcery.com>

	Support displaced stepping for Thumb 16-bit insns.
	* arm-tdep.c (THUMB_NOP) Define.
	(thumb_copy_unmodified_16bit): New.
	(thumb_copy_b, thumb_copy_bx_blx_reg): New.
	(thumb_copy_alu_reg): New.
	(arm_copy_svc): Move some common code to ...
	(copy_svc): ... here.  New.
	(thumb_copy_svc): New.
	(install_pc_relative): New.
	(thumb_copy_pc_relative_16bit): New.
	(thumb_decode_pc_relative_16bit): New.
	(thumb_copy_16bit_ldr_literal): New.
	(thumb_copy_cbnz_cbz): New.
	(cleanup_pop_pc_16bit): New.
	(thumb_copy_pop_pc_16bit): New.
	(thumb_process_displaced_16bit_insn): New.
	(thumb_process_displaced_32bit_insn): New.
	(thumb_process_displaced_insn): process thumb instruction.

---
 gdb/arm-tdep.c |  475 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 463 insertions(+), 12 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 2dd8c9e..83ac297 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5118,6 +5118,7 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
+#define THUMB_NOP 0x4600
 
 /* Helper for register reads for displaced stepping.  In particular, this
    returns the PC as it would be seen by the instruction at its original
@@ -5340,6 +5341,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+/* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
+   modification.  */
+static int
+thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
+			     const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
+			"opcode/class '%s' unmodified\n", insn,
+			iname);
+
+  dsc->modinsn[0] = insn;
+
+  return 0;
+}
+
 /* Preload instructions with immediate offset.  */
 
 static void
@@ -5586,6 +5604,44 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+/* Copy B Thumb instructions.  */
+static int
+thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
+	      struct displaced_step_closure *dsc)
+{
+  unsigned int cond = 0;
+  int offset = 0;
+  unsigned short bit_12_15 = bits (insn, 12, 15);
+  CORE_ADDR from = dsc->insn_addr;
+
+  if (bit_12_15 == 0xd)
+    {
+      offset = sbits (insn, 0, 7);
+      cond = bits (insn, 8, 11);
+    }
+  else if (bit_12_15 == 0xe)
+    {
+       offset = sbits (insn, 0, 10);
+       cond = INST_AL;
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying b immediate insn %.4x "
+			"with offset %d\n", insn, offset);
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+  dsc->u.branch.dest = from + 4 + offset;
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+
 /* Copy BX/BLX with register-specified destinations.  */
 
 static void
@@ -5631,6 +5687,26 @@ arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
+		       struct regcache *regs,
+		       struct displaced_step_closure *dsc)
+{
+  int link = bit (insn, 7);
+  unsigned int rm = bits (insn, 3, 6);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
+			(unsigned short) insn);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
+
+  return 0;
+}
+
+
 /* Copy/cleanup arithmetic/logic instruction with immediate RHS.  */
 
 static void
@@ -5765,6 +5841,31 @@ arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
+		    struct regcache *regs,
+		    struct displaced_step_closure *dsc)
+{
+  unsigned rn, rm, rd;
+
+  rd = bits (insn, 3, 6);
+  rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
+  rm = 2;
+
+  if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_16bit(gdbarch, insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
+			"ALU", (unsigned short) insn);
+
+  dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
+
+  install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
+
+  return 0;
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -6439,21 +6540,16 @@ cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
   displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
 }
 
-static int
-
-arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
-	      struct regcache *regs, struct displaced_step_closure *dsc)
-{
 
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
-			(unsigned long) insn);
+/* Common copy routine for svc instruciton.  */
 
+static int
+copy_svc (struct gdbarch *gdbarch, struct regcache *regs,
+	  struct displaced_step_closure *dsc)
+{
   /* Preparation: none.
      Insn: unmodified svc.
-     Cleanup: pc <- insn_addr + 4.  */
-
-  dsc->modinsn[0] = insn;
+     Cleanup: pc <- insn_addr + insn_size.  */
 
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
      instruction.  */
@@ -6467,7 +6563,34 @@ arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
       dsc->cleanup = &cleanup_svc;
       return 0;
     }
+}
+
+static int
+arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
+	      struct regcache *regs, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
+			(unsigned long) insn);
+
+  dsc->modinsn[0] = insn;
+
+  return copy_svc (gdbarch, regs, dsc);
+}
+
+static int
+thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
+		struct regcache *regs, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
+			insn);
+
+  dsc->modinsn[0] = insn;
 
+  return copy_svc (gdbarch, regs, dsc);
 }
 
 /* Copy undefined instructions.  */
@@ -6929,11 +7052,339 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 }
 
 static void
+install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
+		     struct displaced_step_closure *dsc, int rd)
+{
+  /* ADR Rd, #imm
+
+     Rewrite as:
+
+     Preparation: Rd <- PC
+     Insn: ADD Rd, #imm
+     Cleanup: Null.
+  */
+
+  /* Rd <- PC */
+  int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+  displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
+}
+
+static int
+thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
+			      struct displaced_step_closure *dsc,
+			      int rd, unsigned int imm)
+{
+
+  /* Encoding T2: ADDS Rd, #imm */
+  dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
+
+  install_pc_relative (gdbarch, regs, dsc, rd);
+
+  return 0;
+}
+
+static int
+thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
+				struct regcache *regs,
+				struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn, 8, 10);
+  unsigned int imm8 = bits (insn, 0, 7);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x\n",
+			rd, imm8, insn);
+
+  return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
+}
+
+static int
+thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
+			      struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rt = bits (insn1, 8, 7);
+  unsigned int pc;
+  int imm8 = sbits (insn1, 0, 7);
+  CORE_ADDR from = dsc->insn_addr;
+
+  /* LDR Rd, #imm8
+
+     Rwrite as:
+
+     Preparation: tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
+                  if (Rd is not R0) tmp0 <- R0;
+     Insn: LDR R0, [R2, R3];
+     Cleanup: R2 <- tmp2, R3 <- tmp3,
+              if (Rd is not R0) Rd <- R0, R0 <- tmp0 */
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying thumb ldr literal "
+			"insn %.4x\n", insn1);
+
+  dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+  dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
+  dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
+  pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+
+  displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
+
+  dsc->rd = rt;
+  dsc->u.ldst.xfersize = 4;
+  dsc->u.ldst.rn = 0;
+  dsc->u.ldst.immed = 0;
+  dsc->u.ldst.writeback = 0;
+  dsc->u.ldst.restore_r4 = 0;
+
+  dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
+
+  dsc->cleanup = &cleanup_load;
+
+  return 0;
+}
+
+/* Copy Thumb cbnz/cbz insruction.  */
+
+static int
+thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
+		     struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  int non_zero = bit (insn1, 11);
+  unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
+  CORE_ADDR from = dsc->insn_addr;
+  int rn = bits (insn1, 0, 2);
+  int rn_val = displaced_read_reg (regs, dsc, rn);
+
+  dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
+  /* CBNZ and CBZ do not affect the condition flags.  If condition is true,
+     set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
+     condition is false, let it be, cleanup_branch will do nothing.  */
+  if (dsc->u.branch.cond)
+    dsc->u.branch.cond = INST_AL;
+
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+
+  dsc->u.branch.dest = from + 2 + imm5;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
+			" insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
+			rn, rn_val, insn1, dsc->u.branch.dest);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  dsc->cleanup = &cleanup_branch;
+  return 0;
+}
+
+static void
+cleanup_pop_pc_16bit(struct gdbarch *gdbarch, struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  int rx = dsc->u.block.regmask ? 8 : 0;
+  int rx_val = displaced_read_reg (regs, dsc, rx);
+
+  displaced_write_reg (regs, dsc, ARM_PC_REGNUM, rx_val, BX_WRITE_PC);
+  displaced_write_reg (regs, dsc, rx, dsc->tmp[0], CANNOT_WRITE_PC);
+}
+
+static int
+thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
+			 struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  dsc->u.block.regmask = insn1 & 0x00ff;
+
+  /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
+     to :
+
+     (1) register list is not empty,
+     Prepare: tmp[0] <- r8,
+
+     POP {rX};   PC is stored in rX
+     MOV r8, rX; finally, PC is stored in r8
+     POP {rX, rY, ...., rZ}
+
+     Cleanup: PC <-r8, r8 <- tmp[0]
+
+     (2) register list is empty,
+     Prepare: tmp[0] <- r0,
+
+     POP {r0}
+
+     Cleanup: PC <- r0, r0 <- tmp[0]
+  */
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
+			dsc->u.block.regmask, insn1);
+
+  if (dsc->u.block.regmask != 0)
+    {
+      int rx = 0;
+
+       dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
+
+      /* Look for the first register in register list.  */
+      for (rx = 0; rx < 8; rx++)
+	if (dsc->u.block.regmask & (1 << rx))
+	  break;
+
+      dsc->modinsn[0] = (0xbc00 | (1 << rx)); /* POP {rX} */
+      dsc->modinsn[1] = (0x4680 | (rx << 3)); /* MOV r8, rX */
+      dsc->modinsn[2] = (insn1 & 0xfeff);     /* POP {rX, rY, ..., rZ} */
+
+      dsc->numinsns = 3;
+    }
+  else
+    {
+      dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+
+      dsc->modinsn[0] = 0xbc00; /* POP {r0} */
+      /* dsc->modinsn[ (1, 0x4683); */ /* MOV PC, r0 */
+
+      dsc->numinsns = 1;
+    }
+
+  dsc->cleanup = &cleanup_pop_pc_16bit;
+  return 0;
+}
+
+static void
+thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
+				    struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  unsigned short op_bit_12_15 = bits (insn1, 12, 15);
+  unsigned short op_bit_10_11 = bits (insn1, 10, 11);
+  int err = 0;
+
+  /* 16-bit thumb instructions.  */
+  switch (op_bit_12_15)
+    {
+      /* Shift (imme), add, subtract, move and compare.  */
+    case 0: case 1: case 2: case 3:
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"shift/add/sub/mov/cmp",
+					 dsc);
+      break;
+    case 4:
+      switch (op_bit_10_11)
+	{
+	case 0: /* Data-processing */
+	  err = thumb_copy_unmodified_16bit (gdbarch, insn1,"data-processing",
+					     dsc);
+	  break;
+	case 1: /* Special data instructions and branch and exchange.  */
+	  {
+	    unsigned short op = bits (insn1, 7, 9);
+	    if (op == 6 || op == 7) /* BX or BLX */
+	      err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
+	    else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers.  */
+	      err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
+						 dsc);
+	  }
+	  break;
+	default: /* LDR (literal) */
+	  err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
+	}
+      break;
+    case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldr/str", dsc);
+      break;
+    case 10:
+      if (op_bit_10_11 < 2) /* Generate PC-relative address */
+	err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
+      else /* Generate SP-relative address */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"sp-relative", dsc);
+      break;
+    case 11: /* Misc 16-bit instructions */
+      {
+	switch (bits (insn1, 8, 11))
+	  {
+	  case 1: case 3:  case 9: case 11: /* CBNZ, CBZ */
+	    err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
+	    break;
+	  case 12: case 13: /* POP */
+	    if (bit (insn1, 8)) /* PC is in register list.  */
+	      err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"pop", dsc);
+	    break;
+	  case 15: /* If-Then, and hints */
+	    if (bits (insn1, 0, 3))
+	      /* If-Then makes up to four following instructions conditional.
+		 IT instruction itself is not conditional, so handle it as a
+		 common unmodified instruction.  */
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"If-Then", dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"hints", dsc);
+	    break;
+	  default:
+	    err = thumb_copy_unmodified_16bit (gdbarch, insn1,"misc", dsc);
+	  }
+      }
+      break;
+    case 12:
+      if (op_bit_10_11 < 2) /* Store multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"stm", dsc);
+      else /* Load multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1,"ldm", dsc);
+      break;
+    case 13: /* Conditional branch and supervisor call */
+      if (bits (insn1, 9, 11) != 7) /* conditional branch */
+	err = thumb_copy_b (gdbarch, insn1, dsc);
+      else
+	err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
+      break;
+    case 14: /* Unconditional branch */
+      err = thumb_copy_b (gdbarch, insn1, dsc);
+      break;
+    default:
+      err = 1;
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_16bit_insn: Instruction decode error"));
+}
+
+static void
+thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
+				    uint16_t insn2, struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+}
+
+static void
 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 			      CORE_ADDR to, struct regcache *regs,
 			      struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode"));
+  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  uint16_t insn1
+    = read_memory_unsigned_integer (from, 2, byte_order_for_code);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
+			"at %.8lx\n", insn1, (unsigned long) from);
+
+  dsc->is_thumb = 1;
+  dsc->insn_size = thumb_insn_size (insn1);
+  if (thumb_insn_size (insn1) == 4)
+    {
+      uint16_t insn2
+	= read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
+      thumb_process_displaced_32bit_insn(gdbarch, insn1, insn2, regs, dsc);
+    }
+  else
+    thumb_process_displaced_16bit_insn(gdbarch, insn1, regs, dsc);
 }
 
 void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-03-24 14:05   ` [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns Yao Qi
@ 2011-05-05 13:25     ` Yao Qi
  2011-05-17 17:14       ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-05-05 13:25 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 53 bytes --]

Here is the updated version.

-- 
Yao (齐尧)

[-- Attachment #2: 0002-thumb-32bit.patch --]
[-- Type: text/x-patch, Size: 24385 bytes --]

2011-05-05  Yao Qi  <yao@codesourcery.com>

	Support displaced stepping for Thumb 32-bit insns.
	* gdb/arm-tdep.c (thumb_copy_unmodified_32bit): New.
	(thumb2_copy_preload): New.
	(thumb2_copy_preload_reg): New.
	(thumb2_copy_copro_load_store): New.
	(thumb2_copy_b_bl_blx): New.
	(thumb2_copy_alu_reg): New.
	(thumb2_copy_ldr_str_ldrb_strb): New.
	(thumb2_copy_block_xfer): New.
	(thumb_32bit_copy_undef): New.
	(thumb2_decode_ext_reg_ld_st): New.
	(thumb2_decode_svc_copro): New.
	(thumb_copy_pc_relative_32bit): New.
	(thumb_decode_pc_relative_32bit): New.
	(decode_thumb_32bit_ld_mem_hints): New.
	(thumb_process_displaced_32bit_insn): Process Thumb 32-bit
	instructions.

---
 gdb/arm-tdep.c |  702 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 701 insertions(+), 1 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 83ac297..6fb1eaa 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5341,6 +5341,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+			     uint16_t insn2, const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
+			"opcode/class '%s' unmodified\n", insn1, insn2,
+			iname);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
    modification.  */
 static int
@@ -5408,6 +5425,27 @@ arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+  if (rn == ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
+			insn1, insn2);
+
+  dsc->modinsn[0] = insn1 & 0xfff0;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  install_preload (gdbarch, regs, dsc, rn);
+
+  return 0;
+}
+
 /* Preload instructions with register offset.  */
 
 static void
@@ -5456,6 +5494,30 @@ arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_preload_reg (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);
+
+  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload reg",
+					dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
+			insn1, insn1);
+
+  dsc->modinsn[0] = insn1 & 0xfff0;
+  dsc->modinsn[1] = (insn2 & 0xfff0) | 0x1;
+  dsc->numinsns = 2;
+
+  install_preload_reg (gdbarch, regs, dsc, rn, rm);
+  return 0;
+}
+
 /* Copy/cleanup coprocessor load and store instructions.  */
 
 static void
@@ -5517,6 +5579,30 @@ arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
+			      uint16_t insn2, struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (rn == ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					"copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.4x%.4x\n", insn1, insn2);
+
+  dsc->modinsn[0] = insn1 & 0xfff0;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  install_copro_load_store (gdbarch, regs, dsc, bit (insn1, 9), rn);
+
+  return 0;
+}
+
 /* Clean up branch instructions (actually perform the branch, by setting
    PC).  */
 
@@ -5604,6 +5690,58 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
+		      uint16_t insn2, struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  int link = bit (insn2, 14);
+  int exchange = link && !bit (insn2, 12);
+  int cond = INST_AL;
+  long offset =0;
+  int j1 = bit (insn2, 13);
+  int j2 = bit (insn2, 11);
+  int s = sbits (insn1, 10, 10);
+  int i1 = !(j1 ^ bit (insn1, 10));
+  int i2 = !(j2 ^ bit (insn1, 10));
+
+  if (!link && !exchange) /* B */
+    {
+      cond = bits (insn1, 6, 9);
+      offset = (bits (insn2, 0, 10) << 1);
+      if (bit (insn2, 12)) /* Encoding T4 */
+	{
+	  offset |= (bits (insn1, 0, 9) << 12)
+	    | (i2 << 22)
+	    | (i1 << 23)
+	    | (s << 24);
+	}
+      else /* Encoding T3 */
+	offset |= (bits (insn1, 0, 5) << 12)
+	  | (j1 << 18)
+	  | (j2 << 19)
+	  | (s << 20);
+    }
+  else
+    {
+      offset = (bits (insn1, 0, 9) << 12);
+      offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
+      offset |= exchange ?
+	(bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
+			"%.4x %.4x with offset %.8lx\n",
+			(exchange) ? "blx" : "bl",
+			insn1, insn2, offset);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, 1, offset);
+  return 0;
+}
+
 /* Copy B Thumb instructions.  */
 static int
 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
@@ -5866,6 +6004,41 @@ thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn1,
+		     uint16_t insn2, struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  unsigned int op2 = bits (insn2, 4, 7);
+  int is_mov = (op2 == 0x0);
+  unsigned int rn, rm, rd;
+
+  rn = bits (insn1, 0, 3); /* Rn */
+  rm = bits (insn2, 0, 3); /* Rm */
+  rd = bits (insn2, 8, 11); /* Rd */
+
+  /* In Thumb-2, rn, rm and rd can't be r15.  */
+  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM
+      && rd != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
+			"ALU", insn1, insn2);
+
+  if (is_mov)
+    dsc->modinsn[0] = insn1;
+  else
+    dsc->modinsn[0] = ((insn1 & 0xfff0) | 0x1);
+
+  dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x2);
+  dsc->numinsns = 2;
+
+  install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
+
+  return 0;
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -6135,6 +6308,67 @@ install_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
+thumb2_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint16_t insn1,
+			       uint16_t insn2,  struct regcache *regs,
+			       struct displaced_step_closure *dsc,
+			       int load, int byte, int usermode, int writeback)
+{
+  int immed = !bit (insn1, 9);
+  unsigned int rt = bits (insn2, 12, 15);
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);  /* Only valid if !immed.  */
+
+  if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load/store",
+					dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.4x%.4x\n",
+			load ? (byte ? "ldrb" : "ldr")
+			     : (byte ? "strb" : "str"), usermode ? "t" : "",
+			rt, rn, insn1, insn2);
+
+  install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, immed, writeback, byte,
+			     usermode, rt, rm, rn);
+
+  if (load || rt != ARM_PC_REGNUM)
+    {
+      dsc->u.ldst.restore_r4 = 0;
+
+      if (immed)
+	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
+	{
+	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+	  dsc->modinsn[1] = insn2 & 0x0fff;
+	}
+      else
+	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, r3].  */
+	{
+	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+	  dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
+	}
+
+      dsc->numinsns = 2;
+    }
+  else
+    {
+      /* In Thumb-32 instructions, the behavior is unpredictable when Rt is
+	 PC, while the behavior is undefined when Rn is PC.  Shortly, neither
+	 Rt nor Rn can be PC.  */
+
+      gdb_assert (0);
+    }
+
+  return 0;
+}
+
+
+static int
 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 			    struct regcache *regs,
 			    struct displaced_step_closure *dsc,
@@ -6524,6 +6758,87 @@ arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+			struct regcache *regs,
+			struct displaced_step_closure *dsc)
+{
+  int rn = bits (insn1, 0, 3);
+  int load = bit (insn1, 4);
+  int writeback = bit (insn1, 5);
+
+  /* Block transfers which don't mention PC can be run directly
+     out-of-line.  */
+  if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
+
+  if (rn == ARM_PC_REGNUM)
+    {
+      warning (_("displaced: Unpredictable LDM or STM with "
+		 "base register r15"));
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "unpredictable ldm/stm", dsc);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
+			"%.4x%.4x\n", insn1, insn2);
+
+  /* Clear bit 13, since it should be always zero.  */
+  dsc->u.block.regmask = (insn2 & 0xdfff);
+  dsc->u.block.rn = rn;
+
+  dsc->u.block.load = bit (insn1, 4);
+  dsc->u.block.user = bit (insn1, 6);
+  dsc->u.block.increment = bit (insn1, 7);
+  dsc->u.block.before = bit (insn1, 8);
+  dsc->u.block.writeback = writeback;
+  dsc->u.block.cond = INST_AL;
+
+  if (load)
+    {
+      if (dsc->u.block.regmask == 0xffff)
+	{
+	  /* This branch is impossible to happen.  */
+	  gdb_assert (0);
+	}
+      else
+	{
+	  unsigned int regmask = dsc->u.block.regmask;
+	  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
+	  unsigned int to = 0, from = 0, i, new_rn;
+
+	  for (i = 0; i < num_in_list; i++)
+	    dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
+
+	  if (writeback)
+	    insn1 &= ~(1 << 5);
+
+	  new_regmask = (1 << num_in_list) - 1;
+
+	  if (debug_displaced)
+	    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
+				"{..., pc}: original reg list %.4x, modified "
+				"list %.4x\n"), rn, writeback ? "!" : "",
+				(int) dsc->u.block.regmask, new_regmask);
+
+	  dsc->modinsn[0] = insn1;
+	  dsc->modinsn[1] = (new_regmask & 0xffff);
+	  dsc->numinsns = 2;
+
+	  dsc->cleanup = &cleanup_block_load_pc;
+	}
+    }
+  else
+    {
+      dsc->modinsn[0] = insn1;
+      dsc->modinsn[1] = insn2;
+      dsc->numinsns = 2;
+      dsc->cleanup = &cleanup_block_store_pc;
+    }
+  return 0;
+}
+
 /* Cleanup/copy SVC (SWI) instructions.  These two functions are overridden
    for Linux, where some SVC instructions must be treated specially.  */
 
@@ -6609,6 +6924,23 @@ arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+                       struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
+                       "%.4x %.4x\n", (unsigned short) insn1,
+                       (unsigned short) insn2);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy unpredictable instructions.  */
 
 static int
@@ -7005,6 +7337,43 @@ arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
   return 1;
 }
 
+/* Decode extension register load/store.  Exactly the same as
+   arm_decode_ext_reg_ld_st.  */
+
+static int
+thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
+			     uint16_t insn2,  struct regcache *regs,
+			     struct displaced_step_closure *dsc)
+{
+  unsigned int opcode = bits (insn1, 4, 8);
+
+  switch (opcode)
+    {
+    case 0x04: case 0x05:
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vmov", dsc);
+
+    case 0x08: case 0x0c: /* 01x00 */
+    case 0x0a: case 0x0e: /* 01x10 */
+    case 0x12: case 0x16: /* 10x10 */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vstm/vpush", dsc);
+
+    case 0x09: case 0x0d: /* 01x01 */
+    case 0x0b: case 0x0f: /* 01x11 */
+    case 0x13: case 0x17: /* 10x11 */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vldm/vpop", dsc);
+
+    case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
+    case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
+      return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
+    }
+
+  /* Should be unreachable.  */
+  return 1;
+}
+
 static int
 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 		      struct regcache *regs, struct displaced_step_closure *dsc)
@@ -7051,6 +7420,102 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
     return arm_copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
 
+static int
+thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  unsigned int coproc = bits (insn2, 8, 11);
+  unsigned int op1 = bits (insn1, 4, 9);
+  unsigned int bit_5_8 = bits (insn1, 5, 8);
+  unsigned int bit_9 = bit (insn1, 9);
+  unsigned int bit_4 = bit (insn1, 4);
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (bit_9 == 0)
+    {
+      if (bit_5_8 == 2)
+	{
+	  if ((coproc & 0xe) == 0xa) /* 64-bit xfer.  */
+	    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						"neon 64bit xfer", dsc);
+	  else
+	    {
+	      if (bit_4) /* MRRC/MRRC2 */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "mrrc/mrrc2", dsc);
+	      else /* MCRR/MCRR2 */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "mcrr/mcrr2", dsc);
+	    }
+	}
+      else if (bit_5_8 == 0) /* UNDEFINED.  */
+	return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+      else
+	{
+	   /*coproc is 101x.  SIMD/VFP, ext registers load/store.  */
+	  if ((coproc & 0xe) == 0xa)
+	    return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
+						dsc);
+	  else /* coproc is not 101x.  */
+	    {
+	      if (bit_4 == 0) /* STC/STC2.  */
+		return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+						     regs, dsc);
+	      else
+		{
+		  if (rn == 0xf) /* LDC/LDC2 literal.  */
+		    return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+							 regs, dsc);
+		  else /* LDC/LDC2 immeidate.  */
+		    return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+							 regs, dsc);
+		}
+	    }
+	}
+    }
+  else
+    {
+      unsigned int op = bit (insn2, 4);
+      unsigned int bit_8 = bit (insn1, 8);
+
+      if (bit_8) /* Advanced SIMD */
+	return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					    "neon", dsc);
+      else
+	{
+	  /*coproc is 101x.  */
+	  if ((coproc & 0xe) == 0xa)
+	    {
+	      if (op) /* 8,16,32-bit xfer.  */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "neon 8/16/32 bit xfer",
+						    dsc);
+	      else /* VFP data processing.  */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "vfp dataproc", dsc);
+	    }
+	  else
+	    {
+	      if (op)
+		{
+		  if (bit_4) /* MRC/MRC2 */
+		    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+							"mrc/mrc2", dsc);
+		  else /* MCR/MCR2 */
+		     return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+							"mcr/mcr2", dsc);
+		}
+	      else /* CDP/CDP 2 */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "cdp/cdp2", dsc);
+	    }
+	}
+    }
+
+  return 0;
+}
+
 static void
 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
 		     struct displaced_step_closure *dsc, int rd)
@@ -7100,6 +7565,42 @@ thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
 }
 
 static int
+thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, struct regcache *regs,
+			      struct displaced_step_closure *dsc,
+			      int rd, unsigned int imm)
+{
+  /* Encoding T3: ADDS Rd, Rd, #imm */
+  dsc->modinsn[0] = (0xf100 | rd);
+  dsc->modinsn[1] = (0x0 | (rd << 8) | imm);
+
+  dsc->numinsns = 2;
+
+  install_pc_relative (gdbarch, regs, dsc, rd);
+
+  return 0;
+}
+
+static int
+thumb_decode_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+				uint16_t insn2, struct regcache *regs,
+				struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn2, 8, 11);
+  /* Since immeidate has the same encoding in both ADR and ADDS, so we simply
+     extract raw immediate encoding rather than computing immediate.  When
+     generating ADDS instruction, we can simply perform OR operation to set
+     immediate into ADDS.  */
+  unsigned int imm = (insn2 & 0x70ff) | (bit (insn1, 10) << 26);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x%.4x\n",
+			rd, imm, insn1, insn2);
+
+  return thumb_copy_pc_relative_32bit (gdbarch, regs, dsc, rd, imm);
+}
+
+static int
 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
 			      struct regcache *regs,
 			      struct displaced_step_closure *dsc)
@@ -7354,12 +7855,211 @@ thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
 		    _("thumb_process_displaced_16bit_insn: Instruction decode error"));
 }
 
+static int
+decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
+				 uint16_t insn1, uint16_t insn2,
+				 struct regcache *regs,
+				 struct displaced_step_closure *dsc)
+{
+  int rd = bits (insn2, 12, 15);
+  int user_mode = (bits (insn2, 8, 11) == 0xe);
+  int err = 0;
+  int writeback = 0;
+
+  switch (bits (insn1, 5, 6))
+    {
+    case 0: /* Load byte and memory hints */
+      if (rd == 0xf) /* PLD/PLI */
+	{
+	  if (bits (insn2, 6, 11))
+	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
+	}
+      else
+	{
+	  int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	  return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
+						dsc, 1, 1, user_mode,
+						writeback);
+	}
+
+      break;
+    case 1: /* Load halfword and memory hints */
+      if (rd == 0xf) /* PLD{W} and Unalloc memory hint */
+	{
+	  if (bits (insn2, 6, 11))
+	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
+	}
+      else
+	{
+	  int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+	  return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
+						dsc, 1, 0, user_mode,
+						writeback);
+	}
+      break;
+    case 2: /* Load word */
+      {
+	int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs, dsc,
+					      1, 0, user_mode, writeback);
+	break;
+      }
+    default:
+      return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+      break;
+    }
+  return 0;
+}
+
 static void
 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
 				    uint16_t insn2, struct regcache *regs,
 				    struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+  int err = 0;
+  unsigned short op = bit (insn2, 15);
+  unsigned int op1 = bits (insn1, 11, 12);
+
+  switch (op1)
+    {
+    case 1:
+      {
+	switch (bits (insn1, 9, 10))
+	  {
+	  case 0: /* load/store multiple */
+	    switch (bits (insn1, 7, 8))
+	      {
+	      case 0: case 3: /* SRS, RFE */
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "srs/rfe", dsc);
+		break;
+	      case 1: case 2: /* LDM/STM/PUSH/POP */
+		/* These Thumb 32-bit insns have the same encodings as ARM
+		   counterparts.  */
+		err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
+	      }
+	    break;
+	  case 1:
+	    /* Data-processing (shift register).  In ARM archtecture reference
+	       manual, this entry is
+	       "Data-processing (shifted register) on page A6-31".  However,
+	    instructions in table A6-31 shows that they are `alu_reg'
+	    instructions.  There is no alu_shifted_reg instructions in
+	    Thumb-2.  */
+	    err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs,
+					       dsc);
+	    break;
+	  default: /* Coprocessor instructions */
+	    /* Thumb 32bit coprocessor instructions have the same encoding
+	       as ARM's.  */
+	    err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	    break;
+	  }
+      break;
+      }
+    case 2: /* op1 = 2 */
+      if (op) /* Branch and misc control.  */
+	{
+	  if (bit (insn2, 14)) /* BLX/BL */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else if (!bits (insn2, 12, 14) && bits (insn1, 8, 10) != 0x7)
+	    /* Conditional Branch */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					       "misc ctrl", dsc);
+	}
+      else
+	{
+	  if (bit (insn1, 9)) /* Data processing (plain binary imm) */
+	    {
+	      int op = bits (insn1, 4, 8);
+	      int rn = bits (insn1, 0, 4);
+	      if ((op == 0 || op == 0xa) && rn == 0xf)
+		err = thumb_decode_pc_relative_32bit (gdbarch, insn1, insn2,
+						      regs, dsc);
+	      else
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "dp/pb", dsc);
+	    }
+	  else /* Data processing (modified immeidate) */
+	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					       "dp/mi", dsc);
+	}
+      break;
+    case 3: /* op1 = 3 */
+      switch (bits (insn1, 9, 10))
+	{
+	case 0:
+	  if (bit (insn1, 4))
+	    err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
+						   regs, dsc);
+	  else
+	    {
+	      if (bit (insn1, 8)) /* NEON Load/Store */
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "neon elt/struct load/store",
+						   dsc);
+	      else /* Store single data item */
+		{
+		  int user_mode = (bits (insn2, 8, 11) == 0xe);
+		  int byte = (bits (insn1, 5, 7) == 0
+			      || bits (insn1, 5, 7) == 4);
+		  int writeback = 0;
+
+		  if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
+		    writeback = bit (insn2, 8);
+
+		  err = thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2,
+						       regs, dsc, 0, byte,
+						       user_mode, writeback);
+		}
+	    }
+	  break;
+	case 1: /* op1 = 3, bits (9, 10) == 1 */
+	  switch (bits (insn1, 7, 8))
+	    {
+	    case 0: case 1: /* Data processing (register) */
+	      err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs, dsc);
+	      break;
+	    case 2: /* Multiply and absolute difference */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "mul/mua/diff", dsc);
+	      break;
+	    case 3: /* Long multiply and divide */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "lmul/lmua", dsc);
+	      break;
+	    }
+	  break;
+	default: /* Coprocessor instructions */
+	  err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	  break;
+	}
+      break;
+    default:
+      err = 1;
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_32bit_insn: Instruction decode error"));
+
 }
 
 static void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 7/8] Test case
  2011-03-24 14:11   ` [try 2nd 7/8] Test case Yao Qi
@ 2011-05-05 13:26     ` Yao Qi
  2011-05-11 13:15       ` [try 2nd 7/8] Test case: V3 Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-05-05 13:26 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 134 bytes --]

On 03/24/2011 10:05 PM, Yao Qi wrote:
> Test cases for Thumb displaced stepping.
> 

Here is the updated one.

-- 
Yao (齐尧)

[-- Attachment #2: 0003-test-cases.patch --]
[-- Type: text/x-patch, Size: 11008 bytes --]

gdb/testsuite/

2011-05-05  Yao Qi  <yao@codesourcery.com>

	* gdb.arch/arm-disp-step.S (test_ldr_literal): Test for Thumb
	instructions.
	(test_adr_32bit, test_pop_pc): Likewise.
	(test_ldr_literal_16, test_cbz_cbnz, test_adr): New test for
	Thumb instructions.
	* gdb.arch/arm-disp-step.exp (test_ldm_stm_pc): Match $gdb_prompt
	in gdb_test_multiple.
	(test_ldr_literal_16, test_cbz_cbnz, test_adr): New.

---
 gdb/testsuite/gdb.arch/arm-disp-step.S   |   90 ++++++++++++++--
 gdb/testsuite/gdb.arch/arm-disp-step.exp |  171 ++++++++++++++++++++++-------
 2 files changed, 209 insertions(+), 52 deletions(-)

diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.S b/gdb/testsuite/gdb.arch/arm-disp-step.S
index fa69e31..61f695b 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.S
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.S
@@ -48,20 +48,32 @@ test_ret_end:
 	bl test_ldm_stm_pc
 #endif
 
-	/* Test ldrX literal in ARM */
-#if !defined (__thumb__)
+	/* Test ldrX literal in ARM and Thumb-2 */
+#if !defined (__thumb__) || defined(__thumb2__)
 	bl test_ldr_literal
 #endif
 
-	/* Test 32-bit adr in ARM */
-#if !defined(__thumb__)
-	bl test_adr_32bit
+	/* Test ldr literal in Thumb */
+#if defined(__thumb__)
+	bl test_ldr_literal_16
 #endif
 
-#if !defined(__thumb__)
-	bl test_pop_pc
+	/* Test cbnz/cbz in Thumb-2 */
+#if defined(__thumb2__)
+	bl test_cbz_cbnz
 #endif
 
+	/* Test adr in Thumb and Thumb-2 */
+#if defined(__thumb__)
+	bl test_adr
+#endif
+	/* Test 32-bit adr in ARM and Thumb-2 */
+#if defined(__thumb2__) || !defined(__thumb__)
+	bl test_adr_32bit
+#endif
+
+	bl test_pop_pc
+	
 	/* Test str in ARM mode and Thumb-2 */
 #if !defined(__thumb__)
 	bl test_str_pc
@@ -136,8 +148,8 @@ test_ldm_stm_pc_ret:
 	.word	test_ldm_stm_pc_ret
 	.size test_ldm_stm_pc, .-test_ldm_stm_pc
 #endif
-
-#if !defined (__thumb__)
+	
+#if !defined (__thumb__) || defined(__thumb2__)
 	.global test_ldr_literal
 	.type test_ldr_literal, %function
 test_ldr_literal:
@@ -154,8 +166,66 @@ test_ldr_literal_end:
 	.size test_ldr_literal, .-test_ldr_literal
 #endif
 
+#if defined(__thumb__)
+	.global test_ldr_literal_16
+	.code   16
+	.thumb_func
+test_ldr_literal_16:
+	ldr	r0, .L2
+	.global test_ldr_literal_16_end
+test_ldr_literal_16_end:
+	bx lr
+	.align	2
+.L2:
+	.word	test_ldr_literal_16
+	.size test_ldr_literal_16, .-test_ldr_literal_16
+#endif
 
-#if !defined(__thumb__)
+#if defined(__thumb2__)
+	.global test_cbz_cbnz
+	.code   16
+	.thumb_func
+test_cbz_cbnz:
+	movs 	r0, #0
+	.global test_zero_cbnz
+test_zero_cbnz:
+	cbnz	r0, .L3
+	.global test_zero_cbz
+test_zero_cbz:
+	cbz	r0, .L3
+.L3:
+	movs	r0, #1
+	.global test_non_zero_cbz
+test_non_zero_cbz:
+	cbz	r0, .L4
+	.global test_non_zero_cbnz
+test_non_zero_cbnz:
+	cbnz	r0, .L4
+	nop
+.L4:
+	.global test_cbz_cbnz_end
+test_cbz_cbnz_end:
+	bx lr
+	.size test_cbz_cbnz, .-test_cbz_cbnz
+#endif
+
+#if defined(__thumb__)
+	.global test_adr
+	.code   16
+	.thumb_func
+test_adr:
+	adr	r0, .L8
+	nop
+	nop
+	nop
+.L8:
+	.global test_adr_end
+test_adr_end:
+	bx lr
+	.size test_adr, .-test_adr
+#endif
+
+#if defined(__thumb2__) || !defined(__thumb__)
 	.global test_adr_32bit
 #if defined(__thumb2__)
 	.code   16
diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.exp b/gdb/testsuite/gdb.arch/arm-disp-step.exp
index 0427a04..4ae70f6 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.exp
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.exp
@@ -42,15 +42,17 @@ if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable [list
 # Test ldm/stm related to PC.
 proc test_ldm_stm_pc {} {
     global srcfile
+    global gdb_prompt
+
     # Try to set breakpoint on test_ldm_stm_pc.  If symbol 'test_ldm_stm_pc'
     # can't be resolved, test case is compiled in Thumb mode, skip it.
     gdb_test_multiple "break *test_ldm_stm_pc" "break test_ldm_stm_pc" {
-	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
 	    pass "break test_ldm_stm_pc"
 	}
-	-re "Function \"test_ldm_stm_pc\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
-	    gdb_test "n" "" "Test case is compiled in Thumb mode"
-	    return
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    pass "break test_ldm_stm_pc"
+	    return 0
 	}
     }
 
@@ -104,10 +106,38 @@ proc test_ldr_literal {} {
        ".*bx lr.*"
 }
 
+proc test_ldr_literal_16 {} {
+    global srcfile
+    global gdb_prompt
+
+    gdb_test_multiple "break *test_ldr_literal_16" "break test_ldr_literal_16" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
+	    pass "break test_ldr_literal"
+	}
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    return 0
+	}
+    }
+    gdb_test "break *test_ldr_literal_16_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldr_literal_16_end"
+
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_16" \
+	".*ldr.*r0\,.*L2.*"
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_16_end" \
+	".*bx lr.*"
+}
+
 ##########################################
 # Test call/ret.
 proc test_call_ret {} {
     global srcfile
+    global testfile
+
+    gdb_test "break *test_call" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_call"
+
     gdb_test "break *test_call_end" \
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_call_end"
@@ -118,9 +148,10 @@ proc test_call_ret {} {
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_ret_end"
 
-    gdb_continue_to_breakpoint "continue to test_call_end" \
+    gdb_continue_to_breakpoint "test_call" ".*bl test_call_subr.*"
+    gdb_continue_to_breakpoint "test_call_end" \
 	".*@ Location test_call_end.*"
-    gdb_continue_to_breakpoint "continue to test_ret" \
+    gdb_continue_to_breakpoint "test_ret" \
 	".*bx lr.*"
     gdb_continue_to_breakpoint "continue to test_ret_end" \
 	".*@ Location test_ret_end.*"
@@ -158,7 +189,68 @@ proc test_ldr_from_pc {} {
 
     gdb_continue_to_breakpoint "continue to test_ldr_pc" \
 	".*ldr.*r1\,.*\[pc, #0\].*"
-    gdb_continue_to_breakpoint "continue to Lbranch" \
+    gdb_continue_to_breakpoint "continue to test_ldr_pc_ret" \
+	".*bx lr.*"
+}
+
+#########################################
+
+# Test cbz and cbnz
+proc test_cbz_cbnz {} {
+    global srcfile
+    global gdb_prompt
+
+    gdb_test_multiple "break *test_zero_cbnz" "break test_zero_cbnz" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
+	    pass "break test_ldr_literal"
+	}
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_zero_cbz"
+    gdb_test "break *test_non_zero_cbnz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbnz"
+    gdb_test "break *test_non_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbz"
+
+    gdb_continue_to_breakpoint "continue to test_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_zero_cbz" \
+	".*cbz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbz" \
+	".*cbz.*r0\,.*\.L4.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L4.*"
+}
+
+# Test adr
+
+proc test_adr {} {
+    global srcfile
+    global gdb_prompt
+
+    gdb_test_multiple "break *test_adr" "break test_adr" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
+	    pass "break test_adr"
+	}
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_adr_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_end"
+
+    gdb_continue_to_breakpoint "test_adr" \
+	".*adr.*r0\,.*\.L8.*"
+    gdb_continue_to_breakpoint "test_adr_end" \
 	".*bx lr.*"
 }
 
@@ -167,28 +259,28 @@ proc test_adr_32bit {} {
     global gdb_prompt
 
     gdb_test_multiple "break *test_adr_32bit" "break test_adr_32bit" {
-       -re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
-           pass "break test_adr"
-       }
-       -re "No symbol.*\r\n$gdb_prompt $" {
-           return 0
-       }
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
+	    pass "break test_adr"
+	}
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    return 0
+	}
     }
 
     gdb_test "break *test_adr_32bit_after" \
-       "Breakpoint.*at.* file .*$srcfile, line.*" \
-       "break test_adr_32bit_after"
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_32bit_after"
 
     gdb_test "break *test_adr_32bit_end" \
-       "Breakpoint.*at.* file .*$srcfile, line.*" \
-       "break test_adr_32bit_end"
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_32bit_end"
 
     gdb_continue_to_breakpoint "test_adr_32bit" \
-       ".*adr.*r0\,.*\.L6.*"
+	".*adr.*r0\,.*\.L6.*"
     gdb_continue_to_breakpoint "test_adr_32bit_after" \
-       ".*adr.*r0\,.*\.L6.*"
+	".*adr.*r0\,.*\.L6.*"
     gdb_continue_to_breakpoint "test_adr_32bit_end" \
-       ".*bx lr.*"
+	".*bx lr.*"
 }
 
 #########################################
@@ -196,27 +288,29 @@ proc test_adr_32bit {} {
 proc test_pop_pc {} {
     global srcfile
     gdb_test "break *test_pop_pc_1" \
-       "Breakpoint.*at.* file .*$srcfile, line.*" \
-       "break test_pop_pc"
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc"
     gdb_test "break *test_pop_pc_ret" \
-       "Breakpoint.*at.* file .*$srcfile, line.*" \
-       "break test_pop_pc_ret"
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_ret"
 
     gdb_continue_to_breakpoint "continue to test_pop_pc" \
-       ".*b.*\{r1\, pc\}.*"
+	".*b.*\{r1\, pc\}.*"
     gdb_continue_to_breakpoint "continue to test_pop_pc_ret" \
-        ".*bx lr.*"
+	".*bx lr.*"
 }
 
 ###########################################
 
 proc test_str_pc {} {
     global srcfile
+    global gdb_prompt
+
     gdb_test_multiple "break *test_str_pc" "break test_str_pc" {
-	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
 	    pass "break test_str_pc"
 	}
-	-re "No symbol.*" {
+	-re "No symbol.*\r\n$gdb_prompt $" {
 	    pass "break test_str_pc"
 	    return
 	}
@@ -261,20 +355,6 @@ if ![runto_main] then {
 gdb_test_no_output "set displaced-stepping on"
 gdb_test "show displaced-stepping" ".* displaced stepping .* is on.*"
 
-gdb_test "break *test_call" \
-	"Breakpoint.*at.* file .*$srcfile, line.*" \
-	"break test_call"
-
-gdb_test_multiple "continue" "continue to test_call" {
-	-re ".*bl test_call_subr.*" {
-	    pass "continue to test_call"
-	}
-	-re "Displaced stepping is only supported in" {
-	    kfail "gdb/NNNN" $testfile
-	    return
-	}
-    }
-
 test_call_ret
 
 test_branch
@@ -285,11 +365,18 @@ test_ldm_stm_pc
 
 test_ldr_literal
 
+test_ldr_literal_16
+
+test_cbz_cbnz
+
+test_adr
+
 test_adr_32bit
 
 test_pop_pc
 
 test_str_pc
+
 ##########################################
 
 # Done, run program to exit.
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn
  2011-05-05 13:24     ` Yao Qi
@ 2011-05-10 13:58       ` Ulrich Weigand
  2011-05-11 13:06         ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-05-10 13:58 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> 	Support displaced stepping for Thumb 16-bit insns.
> 	* arm-tdep.c (THUMB_NOP) Define.
> 	(thumb_copy_unmodified_16bit): New.
> 	(thumb_copy_b, thumb_copy_bx_blx_reg): New.
> 	(thumb_copy_alu_reg): New.
> 	(arm_copy_svc): Move some common code to ...
> 	(copy_svc): ... here.  New.
> 	(thumb_copy_svc): New.
> 	(install_pc_relative): New.
> 	(thumb_copy_pc_relative_16bit): New.
> 	(thumb_decode_pc_relative_16bit): New.
> 	(thumb_copy_16bit_ldr_literal): New.
> 	(thumb_copy_cbnz_cbz): New.
> 	(cleanup_pop_pc_16bit): New.
> 	(thumb_copy_pop_pc_16bit): New.
> 	(thumb_process_displaced_16bit_insn): New.
> 	(thumb_process_displaced_32bit_insn): New.
> 	(thumb_process_displaced_insn): process thumb instruction.

This is looking pretty good now, thanks.  There is still one problem
that I noticed:

> +static int
> +thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
> +			 struct regcache *regs,
> +			 struct displaced_step_closure *dsc)
> +{
> +  dsc->u.block.regmask = insn1 & 0x00ff;
> +
> +  /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
> +     to :
> +
> +     (1) register list is not empty,
> +     Prepare: tmp[0] <- r8,
> +
> +     POP {rX};   PC is stored in rX
> +     MOV r8, rX; finally, PC is stored in r8
> +     POP {rX, rY, ...., rZ}
> +
> +     Cleanup: PC <-r8, r8 <- tmp[0]

It seems this approach is actually incorrect.  If you do a
  POP {rX, rY, ..., rZ, PC}
the value at the SP gets restored into rX, the value at rX+4
into rY and so on, and the value at the highest address gets
restored into PC.

With your replacement sequence, it would appear that you
instead get the value at the *lowest* address (just SP)
restored into the PC ...


Apart from this, just a few minor issues:

> +/* Common copy routine for svc instruciton.  */
>  
> +static int
> +copy_svc (struct gdbarch *gdbarch, struct regcache *regs,
> +	  struct displaced_step_closure *dsc)
> +{

I guess to keep in sync with terminology in the rest of the file,
this really should be called install_svc ...

> +      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"shift/add/sub/mov/cmp",

Formatting: space after the comma (here and at a couple other places).

> +      thumb_process_displaced_32bit_insn(gdbarch, insn1, insn2, regs, dsc);
> +    }
> +  else
> +    thumb_process_displaced_16bit_insn(gdbarch, insn1, regs, dsc);

Formatting: space before (


Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn
  2011-05-10 13:58       ` Ulrich Weigand
@ 2011-05-11 13:06         ` Yao Qi
  2011-05-16 17:19           ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-05-11 13:06 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 3048 bytes --]

On 05/10/2011 09:58 PM, Ulrich Weigand wrote:
>> > +static int
>> > +thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
>> > +			 struct regcache *regs,
>> > +			 struct displaced_step_closure *dsc)
>> > +{
>> > +  dsc->u.block.regmask = insn1 & 0x00ff;
>> > +
>> > +  /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
>> > +     to :
>> > +
>> > +     (1) register list is not empty,
>> > +     Prepare: tmp[0] <- r8,
>> > +
>> > +     POP {rX};   PC is stored in rX
>> > +     MOV r8, rX; finally, PC is stored in r8
>> > +     POP {rX, rY, ...., rZ}
>> > +
>> > +     Cleanup: PC <-r8, r8 <- tmp[0]
> It seems this approach is actually incorrect.  If you do a
>   POP {rX, rY, ..., rZ, PC}
> the value at the SP gets restored into rX, the value at rX+4
> into rY and so on, and the value at the highest address gets
> restored into PC.
> 
> With your replacement sequence, it would appear that you
> instead get the value at the *lowest* address (just SP)
> restored into the PC ...
> 

Hmmm, you are right.  In test case, I put the same address on the first
two slots on stack, and pop them to r1 and pc respectively.  That is the
reason why this bug is not found by test case.

In my new patch, there are three different cases to handle POP instruction,
1.  register list is full, no free register.  The code sequence I am
using is like

     POP {r0, r1, ...., r6};
     POP {r7};
     MOV r8, r7;
     POP {r7};

after execution of this sequence, PC's value is stored in r7, and r7's
value is stored in r8.  In cleanup, we can set PC, r7, and r8 accordingly.

2.  register list is not full, and not empty.  In this case, we scan the
code to find a free register, rN.  Run the follow code sequence,

     POP {rX, rY, ...., rZ};
     POP {rN};

After execution of this sequence, PC's value is stored in rN.  In
cleanup, we can set PC from rN.

3.  register list is empty.  This case is relative simple.

     POP {r0}

In cleanup, we store r0's value to PC.

The testcase is update according to these three cases, and value of PC
after each POP instruction is checked.  I'll resend test case patch later.

> 
> Apart from this, just a few minor issues:
> 
>> > +/* Common copy routine for svc instruciton.  */
>> >  
>> > +static int
>> > +copy_svc (struct gdbarch *gdbarch, struct regcache *regs,
>> > +	  struct displaced_step_closure *dsc)
>> > +{
> I guess to keep in sync with terminology in the rest of the file,
> this really should be called install_svc ...
> 

Well, the function `copy_svc' is a little bit different from other
install_* routines.  Anyway, I am OK to name it to install_svc.  Fixed.

>> > +      err = thumb_copy_unmodified_16bit (gdbarch, insn1,"shift/add/sub/mov/cmp",
> Formatting: space after the comma (here and at a couple other places).
>

Fixed.

>> > +      thumb_process_displaced_32bit_insn(gdbarch, insn1, insn2, regs, dsc);
>> > +    }
>> > +  else
>> > +    thumb_process_displaced_16bit_insn(gdbarch, insn1, regs, dsc);
> Formatting: space before (

Fixed.

-- 
Yao (齐尧)

[-- Attachment #2: 0001-Support-displaced-stepping-for-Thumb-16-bit-insns.patch --]
[-- Type: text/x-patch, Size: 18214 bytes --]

         Support displaced stepping for Thumb 16-bit insns.
         * arm-tdep.c (THUMB_NOP) Define.
         (thumb_copy_unmodified_16bit): New.
         (thumb_copy_b, thumb_copy_bx_blx_reg): New.
         (thumb_copy_alu_reg): New.
         (arm_copy_svc): Move some common code to ...
         (install_svc): ... here.  New.
         (thumb_copy_svc): New.
         (install_pc_relative): New.
         (thumb_copy_pc_relative_16bit): New.
         (thumb_decode_pc_relative_16bit): New.
         (thumb_copy_16bit_ldr_literal): New.
         (thumb_copy_cbnz_cbz): New.
         (cleanup_pop_pc_16bit): New.
         (thumb_copy_pop_pc_16bit): New.
         (thumb_process_displaced_16bit_insn): New.
         (thumb_process_displaced_32bit_insn): New.
         (thumb_process_displaced_insn): process thumb instruction.

---
 gdb/arm-tdep.c |  524 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 512 insertions(+), 12 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 2dd8c9e..1421168 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5118,6 +5118,7 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
+#define THUMB_NOP 0x4600
 
 /* Helper for register reads for displaced stepping.  In particular, this
    returns the PC as it would be seen by the instruction at its original
@@ -5340,6 +5341,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+/* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
+   modification.  */
+static int
+thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
+			     const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
+			"opcode/class '%s' unmodified\n", insn,
+			iname);
+
+  dsc->modinsn[0] = insn;
+
+  return 0;
+}
+
 /* Preload instructions with immediate offset.  */
 
 static void
@@ -5586,6 +5604,44 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+/* Copy B Thumb instructions.  */
+static int
+thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
+	      struct displaced_step_closure *dsc)
+{
+  unsigned int cond = 0;
+  int offset = 0;
+  unsigned short bit_12_15 = bits (insn, 12, 15);
+  CORE_ADDR from = dsc->insn_addr;
+
+  if (bit_12_15 == 0xd)
+    {
+      offset = sbits (insn, 0, 7);
+      cond = bits (insn, 8, 11);
+    }
+  else if (bit_12_15 == 0xe) /* Encoding T2 */
+    {
+      offset = sbits ((insn << 1), 0, 11);
+       cond = INST_AL;
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying b immediate insn %.4x "
+			"with offset %d\n", insn, offset);
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+  dsc->u.branch.dest = from + 4 + offset;
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+
 /* Copy BX/BLX with register-specified destinations.  */
 
 static void
@@ -5631,6 +5687,26 @@ arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
+		       struct regcache *regs,
+		       struct displaced_step_closure *dsc)
+{
+  int link = bit (insn, 7);
+  unsigned int rm = bits (insn, 3, 6);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
+			(unsigned short) insn);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
+
+  return 0;
+}
+
+
 /* Copy/cleanup arithmetic/logic instruction with immediate RHS.  */
 
 static void
@@ -5765,6 +5841,31 @@ arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
+		    struct regcache *regs,
+		    struct displaced_step_closure *dsc)
+{
+  unsigned rn, rm, rd;
+
+  rd = bits (insn, 3, 6);
+  rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
+  rm = 2;
+
+  if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_16bit(gdbarch, insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
+			"ALU", (unsigned short) insn);
+
+  dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
+
+  install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
+
+  return 0;
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -6439,21 +6540,16 @@ cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
   displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
 }
 
-static int
-
-arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
-	      struct regcache *regs, struct displaced_step_closure *dsc)
-{
 
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
-			(unsigned long) insn);
+/* Common copy routine for svc instruciton.  */
 
+static int
+install_svc (struct gdbarch *gdbarch, struct regcache *regs,
+	     struct displaced_step_closure *dsc)
+{
   /* Preparation: none.
      Insn: unmodified svc.
-     Cleanup: pc <- insn_addr + 4.  */
-
-  dsc->modinsn[0] = insn;
+     Cleanup: pc <- insn_addr + insn_size.  */
 
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
      instruction.  */
@@ -6467,7 +6563,34 @@ arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
       dsc->cleanup = &cleanup_svc;
       return 0;
     }
+}
 
+static int
+arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
+	      struct regcache *regs, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
+			(unsigned long) insn);
+
+  dsc->modinsn[0] = insn;
+
+  return install_svc (gdbarch, regs, dsc);
+}
+
+static int
+thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
+		struct regcache *regs, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
+			insn);
+
+  dsc->modinsn[0] = insn;
+
+  return install_svc (gdbarch, regs, dsc);
 }
 
 /* Copy undefined instructions.  */
@@ -6929,11 +7052,388 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 }
 
 static void
+install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
+		     struct displaced_step_closure *dsc, int rd)
+{
+  /* ADR Rd, #imm
+
+     Rewrite as:
+
+     Preparation: Rd <- PC
+     Insn: ADD Rd, #imm
+     Cleanup: Null.
+  */
+
+  /* Rd <- PC */
+  int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+  displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
+}
+
+static int
+thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
+			      struct displaced_step_closure *dsc,
+			      int rd, unsigned int imm)
+{
+
+  /* Encoding T2: ADDS Rd, #imm */
+  dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
+
+  install_pc_relative (gdbarch, regs, dsc, rd);
+
+  return 0;
+}
+
+static int
+thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
+				struct regcache *regs,
+				struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn, 8, 10);
+  unsigned int imm8 = bits (insn, 0, 7);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x\n",
+			rd, imm8, insn);
+
+  return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
+}
+
+static int
+thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
+			      struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rt = bits (insn1, 8, 7);
+  unsigned int pc;
+  int imm8 = sbits (insn1, 0, 7);
+  CORE_ADDR from = dsc->insn_addr;
+
+  /* LDR Rd, #imm8
+
+     Rwrite as:
+
+     Preparation: tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
+                  if (Rd is not R0) tmp0 <- R0;
+     Insn: LDR R0, [R2, R3];
+     Cleanup: R2 <- tmp2, R3 <- tmp3,
+              if (Rd is not R0) Rd <- R0, R0 <- tmp0 */
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying thumb ldr literal "
+			"insn %.4x\n", insn1);
+
+  dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+  dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
+  dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
+  pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+
+  displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
+
+  dsc->rd = rt;
+  dsc->u.ldst.xfersize = 4;
+  dsc->u.ldst.rn = 0;
+  dsc->u.ldst.immed = 0;
+  dsc->u.ldst.writeback = 0;
+  dsc->u.ldst.restore_r4 = 0;
+
+  dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
+
+  dsc->cleanup = &cleanup_load;
+
+  return 0;
+}
+
+/* Copy Thumb cbnz/cbz insruction.  */
+
+static int
+thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
+		     struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  int non_zero = bit (insn1, 11);
+  unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
+  CORE_ADDR from = dsc->insn_addr;
+  int rn = bits (insn1, 0, 2);
+  int rn_val = displaced_read_reg (regs, dsc, rn);
+
+  dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
+  /* CBNZ and CBZ do not affect the condition flags.  If condition is true,
+     set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
+     condition is false, let it be, cleanup_branch will do nothing.  */
+  if (dsc->u.branch.cond)
+    dsc->u.branch.cond = INST_AL;
+
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+
+  dsc->u.branch.dest = from + 2 + imm5;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
+			" insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
+			rn, rn_val, insn1, dsc->u.branch.dest);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  dsc->cleanup = &cleanup_branch;
+  return 0;
+}
+
+static void
+cleanup_pop_pc_16bit(struct gdbarch *gdbarch, struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+
+  if (dsc->u.block.regmask == 0xff)
+    {
+      /* PC <- r7 */
+      int val = displaced_read_reg (regs, dsc, 7);
+      displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
+
+      /* r7 <- r8 */
+      val = displaced_read_reg (regs, dsc, 8);
+      displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
+
+      /* r8 <- tmp[0] */
+      displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
+    }
+  else /* Cleanup procedure of case #2 and case #3 can be unified.  */
+    {
+      int rx = 0;
+      int rx_val = 0;
+
+      if (dsc->u.block.regmask)
+	{
+	  for (rx = 0; rx < 8; rx++)
+	    if ((dsc->u.block.regmask & (1 << rx)) == 0)
+	      break;
+	}
+      else
+	rx = 0;
+
+      rx_val = displaced_read_reg (regs, dsc, rx);
+
+      displaced_write_reg (regs, dsc, ARM_PC_REGNUM, rx_val, BX_WRITE_PC);
+      displaced_write_reg (regs, dsc, rx, dsc->tmp[0], CANNOT_WRITE_PC);
+    }
+
+}
+
+static int
+thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
+			 struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  dsc->u.block.regmask = insn1 & 0x00ff;
+
+  /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
+     to :
+
+     (1) register list is full, that is, r0-r7 are used.
+     Prepare: tmp[0] <- r8
+
+     POP {r0, r1, ...., r6}; remove PC and r7 from reglist
+     POP {r7};
+     MOV r8, r7; Move value of r7 to r8;
+     POP {r7}; Store PC value into r7.
+
+     Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
+
+     (2) register list is not empty, but no full,
+     Prepare: tmp[0] <- rN, rN is the first free register.
+
+     POP {rX, rY, ...., rZ}; remove PC from reglist.
+     POP {rN};   PC is stored in rN
+
+     Cleanup: PC <-rN, rN <- tmp[0]
+
+     (3) register list is empty.
+     Prepare: tmp[0] <- r0,
+
+     POP {r0}
+
+     Cleanup: PC <- r0, r0 <- tmp[0]
+  */
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
+			dsc->u.block.regmask, insn1);
+
+  if (dsc->u.block.regmask == 0xff)
+    {
+      dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
+
+      dsc->modinsn[0] = (insn1 & 0xfe7f); /* POP {r0,r1,...,r6} */
+      dsc->modinsn[1] = 0xbc80; /* POP {r7} */
+      dsc->modinsn[2] = 0x46b8; /* MOV r8, r7 */
+      dsc->modinsn[3] = 0xbc80; /* POP {r7} */
+
+      dsc->numinsns = 4;
+    }
+  else if (dsc->u.block.regmask != 0)
+    {
+      int rn = 0;
+
+      /* Look for the first register not in register list.  */
+      for (rn = 0; rn < 8; rn++)
+	if ((dsc->u.block.regmask & (1 << rn)) == 0)
+	  break;
+      dsc->tmp[0] = displaced_read_reg (regs, dsc, rn);
+
+      dsc->modinsn[0] = (insn1 & 0xfeff);     /* POP {rX, rY, ..., rZ} */
+      dsc->modinsn[1] = (0xbc00 | (1 << rn)); /* POP {rN} */
+
+      dsc->numinsns = 2;
+    }
+  else
+    {
+      dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+
+      dsc->modinsn[0] = 0xbc01; /* POP {r0} */
+
+      dsc->numinsns = 1;
+    }
+
+  dsc->cleanup = &cleanup_pop_pc_16bit;
+  return 0;
+}
+
+static void
+thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
+				    struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  unsigned short op_bit_12_15 = bits (insn1, 12, 15);
+  unsigned short op_bit_10_11 = bits (insn1, 10, 11);
+  int err = 0;
+
+  /* 16-bit thumb instructions.  */
+  switch (op_bit_12_15)
+    {
+      /* Shift (imme), add, subtract, move and compare.  */
+    case 0: case 1: case 2: case 3:
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,
+					 "shift/add/sub/mov/cmp",
+					 dsc);
+      break;
+    case 4:
+      switch (op_bit_10_11)
+	{
+	case 0: /* Data-processing */
+	  err = thumb_copy_unmodified_16bit (gdbarch, insn1,
+					     "data-processing",
+					     dsc);
+	  break;
+	case 1: /* Special data instructions and branch and exchange.  */
+	  {
+	    unsigned short op = bits (insn1, 7, 9);
+	    if (op == 6 || op == 7) /* BX or BLX */
+	      err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
+	    else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers.  */
+	      err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
+						 dsc);
+	  }
+	  break;
+	default: /* LDR (literal) */
+	  err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
+	}
+      break;
+    case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
+      break;
+    case 10:
+      if (op_bit_10_11 < 2) /* Generate PC-relative address */
+	err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
+      else /* Generate SP-relative address */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
+      break;
+    case 11: /* Misc 16-bit instructions */
+      {
+	switch (bits (insn1, 8, 11))
+	  {
+	  case 1: case 3:  case 9: case 11: /* CBNZ, CBZ */
+	    err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
+	    break;
+	  case 12: case 13: /* POP */
+	    if (bit (insn1, 8)) /* PC is in register list.  */
+	      err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
+	    break;
+	  case 15: /* If-Then, and hints */
+	    if (bits (insn1, 0, 3))
+	      /* If-Then makes up to four following instructions conditional.
+		 IT instruction itself is not conditional, so handle it as a
+		 common unmodified instruction.  */
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
+						 dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
+	    break;
+	  default:
+	    err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
+	  }
+      }
+      break;
+    case 12:
+      if (op_bit_10_11 < 2) /* Store multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
+      else /* Load multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
+      break;
+    case 13: /* Conditional branch and supervisor call */
+      if (bits (insn1, 9, 11) != 7) /* conditional branch */
+	err = thumb_copy_b (gdbarch, insn1, dsc);
+      else
+	err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
+      break;
+    case 14: /* Unconditional branch */
+      err = thumb_copy_b (gdbarch, insn1, dsc);
+      break;
+    default:
+      err = 1;
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_16bit_insn: Instruction decode error"));
+}
+
+static void
+thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
+				    uint16_t insn2, struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+}
+
+static void
 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 			      CORE_ADDR to, struct regcache *regs,
 			      struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode"));
+  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  uint16_t insn1
+    = read_memory_unsigned_integer (from, 2, byte_order_for_code);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
+			"at %.8lx\n", insn1, (unsigned long) from);
+
+  dsc->is_thumb = 1;
+  dsc->insn_size = thumb_insn_size (insn1);
+  if (thumb_insn_size (insn1) == 4)
+    {
+      uint16_t insn2
+	= read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
+      thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
+    }
+  else
+    thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
 }
 
 void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* [try 2nd 7/8] Test case: V3
  2011-05-05 13:26     ` Yao Qi
@ 2011-05-11 13:15       ` Yao Qi
  2011-05-17 17:24         ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-05-11 13:15 UTC (permalink / raw)
  To: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 269 bytes --]

As one problem pointed out by Ulrich in the patch review,

  http://sourceware.org/ml/gdb-patches/2011-05/msg00240.html

test case is updated to test 16-bit Thumb pop instruction more
carefully, checking the PC value after each POP instruction.

-- 
Yao (齐尧)

[-- Attachment #2: 0003-test-case.patch --]
[-- Type: text/x-patch, Size: 14467 bytes --]

gdb/testsuite/

        * gdb.arch/arm-disp-step.S (test_ldr_literal): Test for Thumb
        instructions.
        (test_adr_32bit, test_pop_pc): Likewise.
        (test_ldr_literal_16, test_cbz_cbnz, test_adr): New test for
        Thumb instructions.
        * gdb.arch/arm-disp-step.exp (test_ldm_stm_pc): Match $gdb_prompt
        in gdb_test_multiple.
        (test_ldr_literal_16, test_cbz_cbnz, test_adr): New
---
 gdb/testsuite/gdb.arch/arm-disp-step.S   |  166 +++++++++++++++++++++--
 gdb/testsuite/gdb.arch/arm-disp-step.exp |  210 ++++++++++++++++++++++++------
 2 files changed, 319 insertions(+), 57 deletions(-)

diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.S b/gdb/testsuite/gdb.arch/arm-disp-step.S
index fa69e31..1da7191 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.S
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.S
@@ -48,20 +48,32 @@ test_ret_end:
 	bl test_ldm_stm_pc
 #endif
 
-	/* Test ldrX literal in ARM */
-#if !defined (__thumb__)
+	/* Test ldrX literal in ARM and Thumb-2 */
+#if !defined (__thumb__) || defined(__thumb2__)
 	bl test_ldr_literal
 #endif
 
-	/* Test 32-bit adr in ARM */
-#if !defined(__thumb__)
-	bl test_adr_32bit
+	/* Test ldr literal in Thumb */
+#if defined(__thumb__)
+	bl test_ldr_literal_16
 #endif
 
-#if !defined(__thumb__)
-	bl test_pop_pc
+	/* Test cbnz/cbz in Thumb-2 */
+#if defined(__thumb2__)
+	bl test_cbz_cbnz
 #endif
 
+	/* Test adr in Thumb and Thumb-2 */
+#if defined(__thumb__)
+	bl test_adr
+#endif
+	/* Test 32-bit adr in ARM and Thumb-2 */
+#if defined(__thumb2__) || !defined(__thumb__)
+	bl test_adr_32bit
+#endif
+
+	bl test_pop_pc
+	
 	/* Test str in ARM mode and Thumb-2 */
 #if !defined(__thumb__)
 	bl test_str_pc
@@ -136,8 +148,8 @@ test_ldm_stm_pc_ret:
 	.word	test_ldm_stm_pc_ret
 	.size test_ldm_stm_pc, .-test_ldm_stm_pc
 #endif
-
-#if !defined (__thumb__)
+	
+#if !defined (__thumb__) || defined(__thumb2__)
 	.global test_ldr_literal
 	.type test_ldr_literal, %function
 test_ldr_literal:
@@ -154,8 +166,66 @@ test_ldr_literal_end:
 	.size test_ldr_literal, .-test_ldr_literal
 #endif
 
+#if defined(__thumb__)
+	.global test_ldr_literal_16
+	.code   16
+	.thumb_func
+test_ldr_literal_16:
+	ldr	r0, .L2
+	.global test_ldr_literal_16_end
+test_ldr_literal_16_end:
+	bx lr
+	.align	2
+.L2:
+	.word	test_ldr_literal_16
+	.size test_ldr_literal_16, .-test_ldr_literal_16
+#endif
 
-#if !defined(__thumb__)
+#if defined(__thumb2__)
+	.global test_cbz_cbnz
+	.code   16
+	.thumb_func
+test_cbz_cbnz:
+	movs 	r0, #0
+	.global test_zero_cbnz
+test_zero_cbnz:
+	cbnz	r0, .L3
+	.global test_zero_cbz
+test_zero_cbz:
+	cbz	r0, .L3
+.L3:
+	movs	r0, #1
+	.global test_non_zero_cbz
+test_non_zero_cbz:
+	cbz	r0, .L4
+	.global test_non_zero_cbnz
+test_non_zero_cbnz:
+	cbnz	r0, .L4
+	nop
+.L4:
+	.global test_cbz_cbnz_end
+test_cbz_cbnz_end:
+	bx lr
+	.size test_cbz_cbnz, .-test_cbz_cbnz
+#endif
+
+#if defined(__thumb__)
+	.global test_adr
+	.code   16
+	.thumb_func
+test_adr:
+	adr	r0, .L8
+	nop
+	nop
+	nop
+.L8:
+	.global test_adr_end
+test_adr_end:
+	bx lr
+	.size test_adr, .-test_adr
+#endif
+
+#if defined(__thumb2__) || !defined(__thumb__)
 	.global test_adr_32bit
 #if defined(__thumb2__)
 	.code   16
@@ -184,22 +254,90 @@ test_adr_32bit_end:
 #endif
 
 test_pop_pc:
-	ldr     r1, .L5
+	ldr     r1, .L1_right
+	ldr	r2, .L1_wrong
 #if defined(__thumb__)
 	movs    r0, #1
 	orrs    r1, r0
+	orrs	r2, r0
 #endif
 	push    {r1}
-	push    {r1}
+	push    {r2}
 	.global test_pop_pc_1
 test_pop_pc_1:
 	pop     {r1, pc}
+
+test_pop_pc_2_start:
+	ldr r1, .L2_right
+#if defined(__thumb__)
+	movs    r0, #1
+	orrs    r1, r0
+#endif
+	push	{r1}
+	.global test_pop_pc_2
+test_pop_pc_2:
+	pop	{pc}
+
+	/* Test pop instruction with full register list.  */
+test_pop_pc_3_start:
+	ldr     r1, .L3_right
+	ldr	r2, .L3_wrong
+#if defined(__thumb__)
+	movs    r0, #1
+	orrs    r1, r0
+	orrs	r2, r0
+#endif
+	push 	{r7}
+	push    {r1} /* Push the right address so that PC will get it.  */
+	/* Push the wrong address so r0-r7 will get the wrong a	ddress.  If PC
+	is set from any of them, we can get a FAIL.  */
+	push	{r2} 
+	push	{r2}
+	push	{r2}
+	push	{r2}
+	push	{r2}
+	push	{r2}
+	push	{r2}
+	push	{r2}
+test_pop_pc_3:
+	pop	{r0,r1,r2,r3,r4,r5,r6,r7,pc}
 	.global test_pop_pc_ret
 test_pop_pc_ret:
+	pop	{r7}
 	bx lr
+
+	.global test_pop_pc_1_right
+test_pop_pc_1_right:
+	b	test_pop_pc_2_start /* right */
+	.global test_pop_pc_1_wrong
+test_pop_pc_1_wrong:
+	b	test_pop_pc_2_start /* wrong */
+	.global test_pop_pc_2_right
+test_pop_pc_2_right:
+	b	test_pop_pc_3_start /* right */
+	.global test_pop_pc_2_wrong
+test_pop_pc_2_wrong:
+	b	test_pop_pc_3_start /* wrong */
+	.global test_pop_pc_3_right
+test_pop_pc_3_right:
+	b	test_pop_pc_ret /* right */
+	.global test_pop_pc_3_wrong
+test_pop_pc_3_wrong:
+	b	test_pop_pc_ret /* wrong */
+	
 	.align  2
-.L5:
-	.word   test_pop_pc_ret
+.L1_right:
+	.word   test_pop_pc_1_right
+.L1_wrong:
+	.word	test_pop_pc_1_wrong
+.L2_right:
+	.word   test_pop_pc_2_right
+.L2_wrong:
+	.word	test_pop_pc_2_wrong
+.L3_right:
+	.word   test_pop_pc_3_right
+.L3_wrong:
+	.word	test_pop_pc_3_wrong
 	.size test_pop_pc, .-test_pop_pc
 
 #if !defined(__thumb__)
diff --git a/gdb/testsuite/gdb.arch/arm-disp-step.exp b/gdb/testsuite/gdb.arch/arm-disp-step.exp
index 0427a04..994f08e 100644
--- a/gdb/testsuite/gdb.arch/arm-disp-step.exp
+++ b/gdb/testsuite/gdb.arch/arm-disp-step.exp
@@ -42,15 +42,17 @@ if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable [list
 # Test ldm/stm related to PC.
 proc test_ldm_stm_pc {} {
     global srcfile
+    global gdb_prompt
+
     # Try to set breakpoint on test_ldm_stm_pc.  If symbol 'test_ldm_stm_pc'
     # can't be resolved, test case is compiled in Thumb mode, skip it.
     gdb_test_multiple "break *test_ldm_stm_pc" "break test_ldm_stm_pc" {
-	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
 	    pass "break test_ldm_stm_pc"
 	}
-	-re "Function \"test_ldm_stm_pc\" not defined\..*Make breakpoint pending on future shared library load.*y or .n.. $" {
-	    gdb_test "n" "" "Test case is compiled in Thumb mode"
-	    return
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    pass "break test_ldm_stm_pc"
+	    return 0
 	}
     }
 
@@ -104,10 +106,38 @@ proc test_ldr_literal {} {
        ".*bx lr.*"
 }
 
+proc test_ldr_literal_16 {} {
+    global srcfile
+    global gdb_prompt
+
+    gdb_test_multiple "break *test_ldr_literal_16" "break test_ldr_literal_16" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
+	    pass "break test_ldr_literal"
+	}
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    return 0
+	}
+    }
+    gdb_test "break *test_ldr_literal_16_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_ldr_literal_16_end"
+
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_16" \
+	".*ldr.*r0\,.*L2.*"
+    gdb_continue_to_breakpoint "continue to test_ldr_literal_16_end" \
+	".*bx lr.*"
+}
+
 ##########################################
 # Test call/ret.
 proc test_call_ret {} {
     global srcfile
+    global testfile
+
+    gdb_test "break *test_call" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_call"
+
     gdb_test "break *test_call_end" \
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_call_end"
@@ -118,9 +148,10 @@ proc test_call_ret {} {
 	"Breakpoint.*at.* file .*$srcfile, line.*" \
 	"break test_ret_end"
 
-    gdb_continue_to_breakpoint "continue to test_call_end" \
+    gdb_continue_to_breakpoint "test_call" ".*bl test_call_subr.*"
+    gdb_continue_to_breakpoint "test_call_end" \
 	".*@ Location test_call_end.*"
-    gdb_continue_to_breakpoint "continue to test_ret" \
+    gdb_continue_to_breakpoint "test_ret" \
 	".*bx lr.*"
     gdb_continue_to_breakpoint "continue to test_ret_end" \
 	".*@ Location test_ret_end.*"
@@ -158,7 +189,68 @@ proc test_ldr_from_pc {} {
 
     gdb_continue_to_breakpoint "continue to test_ldr_pc" \
 	".*ldr.*r1\,.*\[pc, #0\].*"
-    gdb_continue_to_breakpoint "continue to Lbranch" \
+    gdb_continue_to_breakpoint "continue to test_ldr_pc_ret" \
+	".*bx lr.*"
+}
+
+#########################################
+
+# Test cbz and cbnz
+proc test_cbz_cbnz {} {
+    global srcfile
+    global gdb_prompt
+
+    gdb_test_multiple "break *test_zero_cbnz" "break test_zero_cbnz" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
+	    pass "break test_ldr_literal"
+	}
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_zero_cbz"
+    gdb_test "break *test_non_zero_cbnz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbnz"
+    gdb_test "break *test_non_zero_cbz" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_non_zero_cbz"
+
+    gdb_continue_to_breakpoint "continue to test_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_zero_cbz" \
+	".*cbz.*r0\,.*\.L3.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbz" \
+	".*cbz.*r0\,.*\.L4.*"
+    gdb_continue_to_breakpoint "continue to test_non_zero_cbnz" \
+	".*cbnz.*r0\,.*\.L4.*"
+}
+
+# Test adr
+
+proc test_adr {} {
+    global srcfile
+    global gdb_prompt
+
+    gdb_test_multiple "break *test_adr" "break test_adr" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
+	    pass "break test_adr"
+	}
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    return 0
+	}
+    }
+
+    gdb_test "break *test_adr_end" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_end"
+
+    gdb_continue_to_breakpoint "test_adr" \
+	".*adr.*r0\,.*\.L8.*"
+    gdb_continue_to_breakpoint "test_adr_end" \
 	".*bx lr.*"
 }
 
@@ -167,28 +259,28 @@ proc test_adr_32bit {} {
     global gdb_prompt
 
     gdb_test_multiple "break *test_adr_32bit" "break test_adr_32bit" {
-       -re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
-           pass "break test_adr"
-       }
-       -re "No symbol.*\r\n$gdb_prompt $" {
-           return 0
-       }
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
+	    pass "break test_adr"
+	}
+	-re "No symbol.*\r\n$gdb_prompt $" {
+	    return 0
+	}
     }
 
     gdb_test "break *test_adr_32bit_after" \
-       "Breakpoint.*at.* file .*$srcfile, line.*" \
-       "break test_adr_32bit_after"
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_32bit_after"
 
     gdb_test "break *test_adr_32bit_end" \
-       "Breakpoint.*at.* file .*$srcfile, line.*" \
-       "break test_adr_32bit_end"
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_adr_32bit_end"
 
     gdb_continue_to_breakpoint "test_adr_32bit" \
-       ".*adr.*r0\,.*\.L6.*"
+	".*adr.*r0\,.*\.L6.*"
     gdb_continue_to_breakpoint "test_adr_32bit_after" \
-       ".*adr.*r0\,.*\.L6.*"
+	".*adr.*r0\,.*\.L6.*"
     gdb_continue_to_breakpoint "test_adr_32bit_end" \
-       ".*bx lr.*"
+	".*bx lr.*"
 }
 
 #########################################
@@ -196,27 +288,66 @@ proc test_adr_32bit {} {
 proc test_pop_pc {} {
     global srcfile
     gdb_test "break *test_pop_pc_1" \
-       "Breakpoint.*at.* file .*$srcfile, line.*" \
-       "break test_pop_pc"
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_1"
+    gdb_test "break *test_pop_pc_2" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_2"
+    gdb_test "break *test_pop_pc_3" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_3"
+
     gdb_test "break *test_pop_pc_ret" \
-       "Breakpoint.*at.* file .*$srcfile, line.*" \
-       "break test_pop_pc_ret"
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_ret"
 
-    gdb_continue_to_breakpoint "continue to test_pop_pc" \
-       ".*b.*\{r1\, pc\}.*"
+    gdb_test "break *test_pop_pc_1_right" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_1_right"
+    gdb_test "break *test_pop_pc_1_wrong" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_1_wrong"
+    gdb_test "break *test_pop_pc_2_right" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_2_right"
+    gdb_test "break *test_pop_pc_2_wrong" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_2_wrong"
+    gdb_test "break *test_pop_pc_3_right" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_3_right"
+    gdb_test "break *test_pop_pc_3_wrong" \
+	"Breakpoint.*at.* file .*$srcfile, line.*" \
+	"break test_pop_pc_1_wrong"
+
+    gdb_continue_to_breakpoint "continue to test_pop_pc_1" \
+	".*b.*\{r1\, pc\}.*"
+    gdb_continue_to_breakpoint "continue to test_pop_pc_1_check" \
+	".*b.*right.*"
+
+    gdb_continue_to_breakpoint "continue to test_pop_pc_2" \
+	".*\{pc\}.*"
+    gdb_continue_to_breakpoint "continue to test_pop_pc_2_check" \
+	".*b.*right.*"
+    gdb_continue_to_breakpoint "continue to test_pop_pc_3" \
+	".*\{r0\,r1\,r2\,r3\,r4\,r5\,r6\,r7\,pc\}.*"
+    gdb_continue_to_breakpoint "continue to test_pop_pc_3_check" \
+	".*b.*right.*"
     gdb_continue_to_breakpoint "continue to test_pop_pc_ret" \
-        ".*bx lr.*"
+	".*r7.*"
 }
 
 ###########################################
 
 proc test_str_pc {} {
     global srcfile
+    global gdb_prompt
+
     gdb_test_multiple "break *test_str_pc" "break test_str_pc" {
-	-re "Breakpoint.*at.* file .*$srcfile, line.*" {
+	-re "Breakpoint.*at.* file .*$srcfile, line.*\r\n$gdb_prompt $" {
 	    pass "break test_str_pc"
 	}
-	-re "No symbol.*" {
+	-re "No symbol.*\r\n$gdb_prompt $" {
 	    pass "break test_str_pc"
 	    return
 	}
@@ -261,20 +392,6 @@ if ![runto_main] then {
 gdb_test_no_output "set displaced-stepping on"
 gdb_test "show displaced-stepping" ".* displaced stepping .* is on.*"
 
-gdb_test "break *test_call" \
-	"Breakpoint.*at.* file .*$srcfile, line.*" \
-	"break test_call"
-
-gdb_test_multiple "continue" "continue to test_call" {
-	-re ".*bl test_call_subr.*" {
-	    pass "continue to test_call"
-	}
-	-re "Displaced stepping is only supported in" {
-	    kfail "gdb/NNNN" $testfile
-	    return
-	}
-    }
-
 test_call_ret
 
 test_branch
@@ -285,11 +402,18 @@ test_ldm_stm_pc
 
 test_ldr_literal
 
+test_ldr_literal_16
+
+test_cbz_cbnz
+
+test_adr
+
 test_adr_32bit
 
 test_pop_pc
 
 test_str_pc
+
 ##########################################
 
 # Done, run program to exit.
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn
  2011-05-11 13:06         ` Yao Qi
@ 2011-05-16 17:19           ` Ulrich Weigand
  2011-05-17 14:29             ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-05-16 17:19 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> In my new patch, there are three different cases to handle POP instruction,
> 1.  register list is full, no free register.  The code sequence I am
> using is like
> 
>      POP {r0, r1, ...., r6};
>      POP {r7};

The above can use just a single POP {r0, ..., r7}, can't it?

>      MOV r8, r7;
>      POP {r7};
> 
> after execution of this sequence, PC's value is stored in r7, and r7's
> value is stored in r8.  In cleanup, we can set PC, r7, and r8 accordingly.
> 
> 2.  register list is not full, and not empty.  In this case, we scan the
> code to find a free register, rN.  Run the follow code sequence,
> 
>      POP {rX, rY, ...., rZ};
>      POP {rN};
> 
> After execution of this sequence, PC's value is stored in rN.  In
> cleanup, we can set PC from rN.

Have you looked at how the ARM case does it?  There, we still have just
a single POP { r0, ..., rN } that pops the right number of registers,
and then the cleanup function (cleanup_block_load_pc) reshuffles them.
It seems to me we could do the same (and actually use the same cleanup
function) for the Thumb case too ...

> 3.  register list is empty.  This case is relative simple.
> 
>      POP {r0}
> 
> In cleanup, we store r0's value to PC.

If we used cleanup_block_load_pc, this would handle the same case as well.

(Unfortunately, handling case 1 the same way looks somewhat difficult,
since cleanup_block_load_pc would expect the PC in register r8 ...)

> +cleanup_pop_pc_16bit(struct gdbarch *gdbarch, struct regcache *regs,
> +		     struct displaced_step_closure *dsc)

One more space before ( ...

> +  else /* Cleanup procedure of case #2 and case #3 can be unified.  */
> +    {
> +      int rx = 0;
> +      int rx_val = 0;
> +
> +      if (dsc->u.block.regmask)
> +	{
> +	  for (rx = 0; rx < 8; rx++)
> +	    if ((dsc->u.block.regmask & (1 << rx)) == 0)
> +	      break;
> +	}
> +      else
> +	rx = 0;

(This is irrelevant if we decide to use cleanup_block_load_pc, but:
the "if (dsc->u.block.regmask)" and "else rx = 0" are superfluous,
since the for loop will terminate with rx == 0 anyway if regmask
is zero.)

Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn
  2011-05-16 17:19           ` Ulrich Weigand
@ 2011-05-17 14:29             ` Yao Qi
  2011-05-17 17:20               ` Ulrich Weigand
  0 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-05-17 14:29 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 2408 bytes --]

On 05/17/2011 01:19 AM, Ulrich Weigand wrote:
> Yao Qi wrote:
>>
>>      POP {r0, r1, ...., r6};
>>      POP {r7};
> 
> The above can use just a single POP {r0, ..., r7}, can't it?

Yes, we can.  Why didn't I combine these two instructions then?

> Have you looked at how the ARM case does it?  There, we still have just
> a single POP { r0, ..., rN } that pops the right number of registers,
> and then the cleanup function (cleanup_block_load_pc) reshuffles them.
> It seems to me we could do the same (and actually use the same cleanup
> function) for the Thumb case too ...

Sure, we can reuse that for Thumb case here.  In this case, when
register list is not full, we could optimize it a little bit like what I
did in my last patch.  However, it is a separate issue, and can be
addressed separately.

>> 3.  register list is empty.  This case is relative simple.
>>
>>      POP {r0}
>>
>> In cleanup, we store r0's value to PC.
> 
> If we used cleanup_block_load_pc, this would handle the same case as well.
> 
> (Unfortunately, handling case 1 the same way looks somewhat difficult,
> since cleanup_block_load_pc would expect the PC in register r8 ...)
> 

In my new patch, there are two different cases to handle POP instruction.
1.  register list is full.  Use the following code sequence,

     POP {r0, r1, ...., r6, r7}; remove PC from reglist
     MOV r8, r7; Move value of r7 to r8;
     POP {r7}; Store PC value into r7.

Install cleanup routine cleanup_pop_pc_16bit_all (renamed from
cleanup_pop_pc_16bit)

2.  register list is not full.  Similar to arm part (arm_copy_block_xfer)

>> +cleanup_pop_pc_16bit(struct gdbarch *gdbarch, struct regcache *regs,
>> +		     struct displaced_step_closure *dsc)
> 
> One more space before ( ...
> 

Sorry about that.  Fixed.

>> +  else /* Cleanup procedure of case #2 and case #3 can be unified.  */
>> +    {
>> +      int rx = 0;
>> +      int rx_val = 0;
>> +
>> +      if (dsc->u.block.regmask)
>> +	{
>> +	  for (rx = 0; rx < 8; rx++)
>> +	    if ((dsc->u.block.regmask & (1 << rx)) == 0)
>> +	      break;
>> +	}
>> +      else
>> +	rx = 0;
> 
> (This is irrelevant if we decide to use cleanup_block_load_pc, but:
> the "if (dsc->u.block.regmask)" and "else rx = 0" are superfluous,
> since the for loop will terminate with rx == 0 anyway if regmask
> is zero.)

This part is removed since cleanup_block_load_pc is used.

-- 
Yao (齐尧)

[-- Attachment #2: 0001-Support-displaced-stepping-for-Thumb-16-bit-insns.patch --]
[-- Type: text/x-patch, Size: 17690 bytes --]

         Support displaced stepping for Thumb 16-bit insns.
         * arm-tdep.c (THUMB_NOP) Define.
         (thumb_copy_unmodified_16bit): New.
         (thumb_copy_b, thumb_copy_bx_blx_reg): New.
         (thumb_copy_alu_reg): New.
         (arm_copy_svc): Move some common code to ...
         (install_svc): ... here.  New.
         (thumb_copy_svc): New.
         (install_pc_relative): New.
         (thumb_copy_pc_relative_16bit): New.
         (thumb_decode_pc_relative_16bit): New.
         (thumb_copy_16bit_ldr_literal): New.
         (thumb_copy_cbnz_cbz): New.
         (cleanup_pop_pc_16bit_all): New.
         (thumb_copy_pop_pc_16bit): New.
         (thumb_process_displaced_16bit_insn): New.
         (thumb_process_displaced_32bit_insn): New.
         (thumb_process_displaced_insn): process thumb instruction.

---
 gdb/arm-tdep.c |  495 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 483 insertions(+), 12 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 2dd8c9e..702a8a1 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5118,6 +5118,7 @@ arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
 
 /* NOP instruction (mov r0, r0).  */
 #define ARM_NOP				0xe1a00000
+#define THUMB_NOP 0x4600
 
 /* Helper for register reads for displaced stepping.  In particular, this
    returns the PC as it would be seen by the instruction at its original
@@ -5340,6 +5341,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+/* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
+   modification.  */
+static int
+thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
+			     const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
+			"opcode/class '%s' unmodified\n", insn,
+			iname);
+
+  dsc->modinsn[0] = insn;
+
+  return 0;
+}
+
 /* Preload instructions with immediate offset.  */
 
 static void
@@ -5586,6 +5604,44 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+/* Copy B Thumb instructions.  */
+static int
+thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
+	      struct displaced_step_closure *dsc)
+{
+  unsigned int cond = 0;
+  int offset = 0;
+  unsigned short bit_12_15 = bits (insn, 12, 15);
+  CORE_ADDR from = dsc->insn_addr;
+
+  if (bit_12_15 == 0xd)
+    {
+      offset = sbits (insn, 0, 7);
+      cond = bits (insn, 8, 11);
+    }
+  else if (bit_12_15 == 0xe) /* Encoding T2 */
+    {
+      offset = sbits ((insn << 1), 0, 11);
+       cond = INST_AL;
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying b immediate insn %.4x "
+			"with offset %d\n", insn, offset);
+
+  dsc->u.branch.cond = cond;
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+  dsc->u.branch.dest = from + 4 + offset;
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+
 /* Copy BX/BLX with register-specified destinations.  */
 
 static void
@@ -5631,6 +5687,26 @@ arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
+		       struct regcache *regs,
+		       struct displaced_step_closure *dsc)
+{
+  int link = bit (insn, 7);
+  unsigned int rm = bits (insn, 3, 6);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
+			(unsigned short) insn);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
+
+  return 0;
+}
+
+
 /* Copy/cleanup arithmetic/logic instruction with immediate RHS.  */
 
 static void
@@ -5765,6 +5841,31 @@ arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
+		    struct regcache *regs,
+		    struct displaced_step_closure *dsc)
+{
+  unsigned rn, rm, rd;
+
+  rd = bits (insn, 3, 6);
+  rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
+  rm = 2;
+
+  if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
+			"ALU", (unsigned short) insn);
+
+  dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
+
+  install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
+
+  return 0;
+}
+
 /* Cleanup/copy arithmetic/logic insns with shifted register RHS.  */
 
 static void
@@ -6439,21 +6540,16 @@ cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
   displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
 }
 
-static int
-
-arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
-	      struct regcache *regs, struct displaced_step_closure *dsc)
-{
 
-  if (debug_displaced)
-    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
-			(unsigned long) insn);
+/* Common copy routine for svc instruciton.  */
 
+static int
+install_svc (struct gdbarch *gdbarch, struct regcache *regs,
+	     struct displaced_step_closure *dsc)
+{
   /* Preparation: none.
      Insn: unmodified svc.
-     Cleanup: pc <- insn_addr + 4.  */
-
-  dsc->modinsn[0] = insn;
+     Cleanup: pc <- insn_addr + insn_size.  */
 
   /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
      instruction.  */
@@ -6467,7 +6563,34 @@ arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
       dsc->cleanup = &cleanup_svc;
       return 0;
     }
+}
+
+static int
+arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
+	      struct regcache *regs, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
+			(unsigned long) insn);
+
+  dsc->modinsn[0] = insn;
+
+  return install_svc (gdbarch, regs, dsc);
+}
+
+static int
+thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
+		struct regcache *regs, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
+			insn);
+
+  dsc->modinsn[0] = insn;
 
+  return install_svc (gdbarch, regs, dsc);
 }
 
 /* Copy undefined instructions.  */
@@ -6929,11 +7052,359 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 }
 
 static void
+install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
+		     struct displaced_step_closure *dsc, int rd)
+{
+  /* ADR Rd, #imm
+
+     Rewrite as:
+
+     Preparation: Rd <- PC
+     Insn: ADD Rd, #imm
+     Cleanup: Null.
+  */
+
+  /* Rd <- PC */
+  int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+  displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
+}
+
+static int
+thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
+			      struct displaced_step_closure *dsc,
+			      int rd, unsigned int imm)
+{
+
+  /* Encoding T2: ADDS Rd, #imm */
+  dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
+
+  install_pc_relative (gdbarch, regs, dsc, rd);
+
+  return 0;
+}
+
+static int
+thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
+				struct regcache *regs,
+				struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn, 8, 10);
+  unsigned int imm8 = bits (insn, 0, 7);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d insn %.4x\n",
+			rd, imm8, insn);
+
+  return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
+}
+
+static int
+thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
+			      struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rt = bits (insn1, 8, 7);
+  unsigned int pc;
+  int imm8 = sbits (insn1, 0, 7);
+  CORE_ADDR from = dsc->insn_addr;
+
+  /* LDR Rd, #imm8
+
+     Rwrite as:
+
+     Preparation: tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
+                  if (Rd is not R0) tmp0 <- R0;
+     Insn: LDR R0, [R2, R3];
+     Cleanup: R2 <- tmp2, R3 <- tmp3,
+              if (Rd is not R0) Rd <- R0, R0 <- tmp0 */
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying thumb ldr literal "
+			"insn %.4x\n", insn1);
+
+  dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+  dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
+  dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
+  pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+
+  displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
+
+  dsc->rd = rt;
+  dsc->u.ldst.xfersize = 4;
+  dsc->u.ldst.rn = 0;
+  dsc->u.ldst.immed = 0;
+  dsc->u.ldst.writeback = 0;
+  dsc->u.ldst.restore_r4 = 0;
+
+  dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
+
+  dsc->cleanup = &cleanup_load;
+
+  return 0;
+}
+
+/* Copy Thumb cbnz/cbz insruction.  */
+
+static int
+thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
+		     struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  int non_zero = bit (insn1, 11);
+  unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
+  CORE_ADDR from = dsc->insn_addr;
+  int rn = bits (insn1, 0, 2);
+  int rn_val = displaced_read_reg (regs, dsc, rn);
+
+  dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
+  /* CBNZ and CBZ do not affect the condition flags.  If condition is true,
+     set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
+     condition is false, let it be, cleanup_branch will do nothing.  */
+  if (dsc->u.branch.cond)
+    dsc->u.branch.cond = INST_AL;
+
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+
+  dsc->u.branch.dest = from + 2 + imm5;
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
+			" insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
+			rn, rn_val, insn1, dsc->u.branch.dest);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  dsc->cleanup = &cleanup_branch;
+  return 0;
+}
+
+static void
+cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
+			  struct displaced_step_closure *dsc)
+{
+  /* PC <- r7 */
+  int val = displaced_read_reg (regs, dsc, 7);
+  displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
+
+  /* r7 <- r8 */
+  val = displaced_read_reg (regs, dsc, 8);
+  displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
+
+  /* r8 <- tmp[0] */
+  displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
+
+}
+
+static int
+thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
+			 struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  dsc->u.block.regmask = insn1 & 0x00ff;
+
+  /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
+     to :
+
+     (1) register list is full, that is, r0-r7 are used.
+     Prepare: tmp[0] <- r8
+
+     POP {r0, r1, ...., r6, r7}; remove PC from reglist
+     MOV r8, r7; Move value of r7 to r8;
+     POP {r7}; Store PC value into r7.
+
+     Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
+
+     (2) register list is not full, supposing there are N registers in
+     register list (except PC, 0 <= N <= 7).
+     Prepare: for each i, 0 - N, tmp[i] <- ri.
+
+     POP {r0, r1, ...., rN};
+
+     Cleanup: Set registers in original reglist from r0 - rN.  Restore r0 - rN
+     from tmp[] properly.
+  */
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
+			dsc->u.block.regmask, insn1);
+
+  if (dsc->u.block.regmask == 0xff)
+    {
+      dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
+
+      dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
+      dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
+      dsc->modinsn[2] = 0xbc80; /* POP {r7} */
+
+      dsc->numinsns = 3;
+      dsc->cleanup = &cleanup_pop_pc_16bit_all;
+    }
+  else
+    {
+      unsigned int num_in_list = bitcount (dsc->u.block.regmask);
+      unsigned int new_regmask, bit = 1;
+      unsigned int to = 0, from = 0, i, new_rn;
+
+      for (i = 0; i < num_in_list + 1; i++)
+	dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
+
+      new_regmask = (1 << (num_in_list + 1)) - 1;
+
+      if (debug_displaced)
+	fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
+					  "{..., pc}: original reg list %.4x,"
+					  " modified list %.4x\n"),
+			    (int) dsc->u.block.regmask, new_regmask);
+
+      dsc->u.block.regmask |= 0x8000;
+      dsc->u.block.writeback = 0;
+      dsc->u.block.cond = INST_AL;
+
+      dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
+
+      dsc->cleanup = &cleanup_block_load_pc;
+    }
+
+  return 0;
+}
+
+static void
+thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
+				    struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  unsigned short op_bit_12_15 = bits (insn1, 12, 15);
+  unsigned short op_bit_10_11 = bits (insn1, 10, 11);
+  int err = 0;
+
+  /* 16-bit thumb instructions.  */
+  switch (op_bit_12_15)
+    {
+      /* Shift (imme), add, subtract, move and compare.  */
+    case 0: case 1: case 2: case 3:
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1,
+					 "shift/add/sub/mov/cmp",
+					 dsc);
+      break;
+    case 4:
+      switch (op_bit_10_11)
+	{
+	case 0: /* Data-processing */
+	  err = thumb_copy_unmodified_16bit (gdbarch, insn1,
+					     "data-processing",
+					     dsc);
+	  break;
+	case 1: /* Special data instructions and branch and exchange.  */
+	  {
+	    unsigned short op = bits (insn1, 7, 9);
+	    if (op == 6 || op == 7) /* BX or BLX */
+	      err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
+	    else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers.  */
+	      err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
+						 dsc);
+	  }
+	  break;
+	default: /* LDR (literal) */
+	  err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
+	}
+      break;
+    case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
+      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
+      break;
+    case 10:
+      if (op_bit_10_11 < 2) /* Generate PC-relative address */
+	err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
+      else /* Generate SP-relative address */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
+      break;
+    case 11: /* Misc 16-bit instructions */
+      {
+	switch (bits (insn1, 8, 11))
+	  {
+	  case 1: case 3:  case 9: case 11: /* CBNZ, CBZ */
+	    err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
+	    break;
+	  case 12: case 13: /* POP */
+	    if (bit (insn1, 8)) /* PC is in register list.  */
+	      err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
+	    break;
+	  case 15: /* If-Then, and hints */
+	    if (bits (insn1, 0, 3))
+	      /* If-Then makes up to four following instructions conditional.
+		 IT instruction itself is not conditional, so handle it as a
+		 common unmodified instruction.  */
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
+						 dsc);
+	    else
+	      err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
+	    break;
+	  default:
+	    err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
+	  }
+      }
+      break;
+    case 12:
+      if (op_bit_10_11 < 2) /* Store multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
+      else /* Load multiple registers */
+	err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
+      break;
+    case 13: /* Conditional branch and supervisor call */
+      if (bits (insn1, 9, 11) != 7) /* conditional branch */
+	err = thumb_copy_b (gdbarch, insn1, dsc);
+      else
+	err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
+      break;
+    case 14: /* Unconditional branch */
+      err = thumb_copy_b (gdbarch, insn1, dsc);
+      break;
+    default:
+      err = 1;
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_16bit_insn: Instruction decode error"));
+}
+
+static void
+thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
+				    uint16_t insn2, struct regcache *regs,
+				    struct displaced_step_closure *dsc)
+{
+  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+}
+
+static void
 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
 			      CORE_ADDR to, struct regcache *regs,
 			      struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode"));
+  enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+  uint16_t insn1
+    = read_memory_unsigned_integer (from, 2, byte_order_for_code);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
+			"at %.8lx\n", insn1, (unsigned long) from);
+
+  dsc->is_thumb = 1;
+  dsc->insn_size = thumb_insn_size (insn1);
+  if (thumb_insn_size (insn1) == 4)
+    {
+      uint16_t insn2
+	= read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
+      thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
+    }
+  else
+    thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
 }
 
 void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-05-05 13:25     ` Yao Qi
@ 2011-05-17 17:14       ` Ulrich Weigand
  2011-05-23 11:32         ` Yao Qi
                           ` (2 more replies)
  0 siblings, 3 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-05-17 17:14 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> +static int
> +thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
> +		     struct regcache *regs, struct displaced_step_closure *dsc)
> +{
> +  unsigned int rn = bits (insn1, 0, 3);
> +  if (rn == ARM_PC_REGNUM)
> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
> +
> +  if (debug_displaced)
> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
> +			insn1, insn2);
> +
> +  dsc->modinsn[0] = insn1 & 0xfff0;
> +  dsc->modinsn[1] = insn2;
> +  dsc->numinsns = 2;
> +
> +  install_preload (gdbarch, regs, dsc, rn);
> +
> +  return 0;
> +}

> +static int
> +thumb2_copy_preload_reg (struct gdbarch *gdbarch, uint16_t insn1,
> +			 uint16_t insn2, struct regcache *regs,
> +			 struct displaced_step_closure *dsc)
> +{
> +  unsigned int rn = bits (insn1, 0, 3);
> +  unsigned int rm = bits (insn2, 0, 3);
> +
> +  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload reg",
> +					dsc);
> +
> +  if (debug_displaced)
> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
> +			insn1, insn1);
> +
> +  dsc->modinsn[0] = insn1 & 0xfff0;
> +  dsc->modinsn[1] = (insn2 & 0xfff0) | 0x1;
> +  dsc->numinsns = 2;
> +
> +  install_preload_reg (gdbarch, regs, dsc, rn, rm);
> +  return 0;
> +}

Handling of preload instructions seems wrong for a couple of reasons:

- In Thumb mode, PLD/PLI with register offset must not use PC as offset
  register, so those can just be copied unmodified.  The only instructions
  to be treated specially are the "literal" variants, which do encode
  PC-relative offsets.

  This means a separate thumb2_copy_preload_reg shouldn't be needed.

- However, you cannot just transform a PLD/PLI "literal" (i.e. PC + immediate)
  into an "immediate" (i.e. register + immediate) version, since in Thumb
  mode the "literal" version supports a 12-bit immediate, while the immediate
  version only supports an 8-bit immediate.

  I guess you could either add the immediate to the PC during preparation
  stage and then use an "immediate" instruction with immediate zero, or
  else load the immediate into a second register and use a "register"
  version of the instruction.


> +static int
> +thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
> +			      uint16_t insn2, struct regcache *regs,
> +			      struct displaced_step_closure *dsc)
> +{
> +  unsigned int rn = bits (insn1, 0, 3);
> +
> +  if (rn == ARM_PC_REGNUM)
> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +					"copro load/store", dsc);
> +
> +  if (debug_displaced)
> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
> +			"load/store insn %.4x%.4x\n", insn1, insn2);
> +
> +  dsc->modinsn[0] = insn1 & 0xfff0;
> +  dsc->modinsn[1] = insn2;
> +  dsc->numinsns = 2;

This doesn't look right: you're replacing the RN register if it is anything
*but* 15 -- but those cases do not need to be replaced!

In fact, unless I'm missing something, in Thumb mode no coprocessor
instruction actually uses the PC (either RN == 15 indicates some other
operation, or else it is specified as unpredictable).  So those should
simply all be copied unmodified ...


> +static int
> +thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
> +		      uint16_t insn2, struct regcache *regs,
> +		      struct displaced_step_closure *dsc)
> +{
> +  int link = bit (insn2, 14);
> +  int exchange = link && !bit (insn2, 12);
> +  int cond = INST_AL;
> +  long offset =0;
> +  int j1 = bit (insn2, 13);
> +  int j2 = bit (insn2, 11);
> +  int s = sbits (insn1, 10, 10);
> +  int i1 = !(j1 ^ bit (insn1, 10));
> +  int i2 = !(j2 ^ bit (insn1, 10));
> +
> +  if (!link && !exchange) /* B */
> +    {
> +      cond = bits (insn1, 6, 9);

Only encoding T3 has condition bits, not T4.

> +      offset = (bits (insn2, 0, 10) << 1);
> +      if (bit (insn2, 12)) /* Encoding T4 */
> +	{
> +	  offset |= (bits (insn1, 0, 9) << 12)
> +	    | (i2 << 22)
> +	    | (i1 << 23)
> +	    | (s << 24);
> +	}
> +      else /* Encoding T3 */
> +	offset |= (bits (insn1, 0, 5) << 12)
> +	  | (j1 << 18)
> +	  | (j2 << 19)
> +	  | (s << 20);
> +    }
> +  else
> +    {
> +      offset = (bits (insn1, 0, 9) << 12);
> +      offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
> +      offset |= exchange ?
> +	(bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
> +    }
> +
> +  if (debug_displaced)
> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
> +			"%.4x %.4x with offset %.8lx\n",
> +			(exchange) ? "blx" : "bl",
> +			insn1, insn2, offset);
> +
> +  dsc->modinsn[0] = THUMB_NOP;
> +
> +  install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, 1, offset);

Why do you always pass 1 for link?  Shouldn't "link" be passed?

> +static int
> +thumb2_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn1,
> +		     uint16_t insn2, struct regcache *regs,
> +		     struct displaced_step_closure *dsc)
> +{
> +  unsigned int op2 = bits (insn2, 4, 7);
> +  int is_mov = (op2 == 0x0);
> +  unsigned int rn, rm, rd;
> +
> +  rn = bits (insn1, 0, 3); /* Rn */
> +  rm = bits (insn2, 0, 3); /* Rm */
> +  rd = bits (insn2, 8, 11); /* Rd */
> +
> +  /* In Thumb-2, rn, rm and rd can't be r15.  */
This isn't quite true ... otherwise we wouldn't need the routine at all.
> +  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM
> +      && rd != ARM_PC_REGNUM)
> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU reg", dsc);
> +
> +  if (debug_displaced)
> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
> +			"ALU", insn1, insn2);
> +
> +  if (is_mov)
> +    dsc->modinsn[0] = insn1;
> +  else
> +    dsc->modinsn[0] = ((insn1 & 0xfff0) | 0x1);
> +
> +  dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x2);
> +  dsc->numinsns = 2;

This doesn't look right.  It looks like this function is called for all
instructions in tables A6-22 through A6-26; those encodings differ
significantly in how their fields are used.  Some of them have the
Rn, Rm, Rd fields as above, but others just have some of them.  For
some, a register field content of 15 does indeed refer to the PC and
needs to be replaced; for others a register field content of 15 means
instead that a different operation is to be performed (e.g. ADD vs TST,
EOR vs TEQ ...) and so it must *not* be replaced; and for yet others,
a register field content of 15 is unpredictable.

In fact, I think only a very small number of instructions in this
category actually may refer to the PC (only MOV?), so there needs
to the be more instruction decoding to actually identify those.

>  static int
> +thumb2_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint16_t insn1,
> +			       uint16_t insn2,  struct regcache *regs,
> +			       struct displaced_step_closure *dsc,
> +			       int load, int byte, int usermode, int writeback)

Hmmm ... this function is called for *halfwords* as well, not just for
bytes and words.  This means the "byte" operand is no longer sufficient
to uniquely determine the size -- note that when calling down to the
install_ routine, xfersize is always set to 1 or 4.

> +{
> +  int immed = !bit (insn1, 9);
> +  unsigned int rt = bits (insn2, 12, 15);
> +  unsigned int rn = bits (insn1, 0, 3);
> +  unsigned int rm = bits (insn2, 0, 3);  /* Only valid if !immed.  */
> +
> +  if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
rm shouldn't be checked if immed is true
> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load/store",
> +					dsc);
> +
> +  if (debug_displaced)
> +    fprintf_unfiltered (gdb_stdlog,
> +			"displaced: copying %s%s r%d [r%d] insn %.4x%.4x\n",
> +			load ? (byte ? "ldrb" : "ldr")
> +			     : (byte ? "strb" : "str"), usermode ? "t" : "",
> +			rt, rn, insn1, insn2);
> +
> +  install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, immed, writeback, byte,
> +			     usermode, rt, rm, rn);
> +
> +  if (load || rt != ARM_PC_REGNUM)
> +    {
> +      dsc->u.ldst.restore_r4 = 0;
> +
> +      if (immed)
> +	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
> +	   ->
> +	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
> +	{
> +	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
> +	  dsc->modinsn[1] = insn2 & 0x0fff;
> +	}
> +      else
> +	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
> +	   ->
> +	   {ldr,str}[b]<cond> r0, [r2, r3].  */
> +	{
> +	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
> +	  dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
> +	}
> +
> +      dsc->numinsns = 2;
> +    }
> +  else
> +    {
> +      /* In Thumb-32 instructions, the behavior is unpredictable when Rt is
> +	 PC, while the behavior is undefined when Rn is PC.  Shortly, neither
> +	 Rt nor Rn can be PC.  */
> +
> +      gdb_assert (0);
> +    }
> +
> +  return 0;
> +}

> +/* Decode extension register load/store.  Exactly the same as
> +   arm_decode_ext_reg_ld_st.  */
> +
> +static int
> +thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
> +			     uint16_t insn2,  struct regcache *regs,
> +			     struct displaced_step_closure *dsc)
> +{
> +  unsigned int opcode = bits (insn1, 4, 8);
> +
> +  switch (opcode)
> +    {
> +    case 0x04: case 0x05:
> +      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +					  "vfp/neon vmov", dsc);
> +
> +    case 0x08: case 0x0c: /* 01x00 */
> +    case 0x0a: case 0x0e: /* 01x10 */
> +    case 0x12: case 0x16: /* 10x10 */
> +      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +					  "vfp/neon vstm/vpush", dsc);
> +
> +    case 0x09: case 0x0d: /* 01x01 */
> +    case 0x0b: case 0x0f: /* 01x11 */
> +    case 0x13: case 0x17: /* 10x11 */
> +      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +					  "vfp/neon vldm/vpop", dsc);
> +
> +    case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
> +    case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
> +      return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);

See the comment at thumb2_copy_copro_load_store: since that function will
always copy the instruction unmodified, so can this function.


> +static int
> +thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
> +			 uint16_t insn2, struct regcache *regs,
> +			 struct displaced_step_closure *dsc)
> +{
> +  unsigned int coproc = bits (insn2, 8, 11);
> +  unsigned int op1 = bits (insn1, 4, 9);
> +  unsigned int bit_5_8 = bits (insn1, 5, 8);
> +  unsigned int bit_9 = bit (insn1, 9);
> +  unsigned int bit_4 = bit (insn1, 4);
> +  unsigned int rn = bits (insn1, 0, 3);
> +
> +  if (bit_9 == 0)
> +    {
> +      if (bit_5_8 == 2)
> +	{
> +	  if ((coproc & 0xe) == 0xa) /* 64-bit xfer.  */
> +	    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						"neon 64bit xfer", dsc);
> +	  else
> +	    {
> +	      if (bit_4) /* MRRC/MRRC2 */
> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						    "mrrc/mrrc2", dsc);
> +	      else /* MCRR/MCRR2 */
> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						    "mcrr/mcrr2", dsc);
> +	    }
> +	}
> +      else if (bit_5_8 == 0) /* UNDEFINED.  */
> +	return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
> +      else
> +	{
> +	   /*coproc is 101x.  SIMD/VFP, ext registers load/store.  */
> +	  if ((coproc & 0xe) == 0xa)
> +	    return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
> +						dsc);
> +	  else /* coproc is not 101x.  */
> +	    {
> +	      if (bit_4 == 0) /* STC/STC2.  */
> +		return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
> +						     regs, dsc);
> +	      else
> +		{
> +		  if (rn == 0xf) /* LDC/LDC2 literal.  */
> +		    return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
> +							 regs, dsc);
> +		  else /* LDC/LDC2 immeidate.  */
> +		    return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
> +							 regs, dsc);
> +		}
> +	    }

See above ... I don't think any of those instructions can ever use the PC
in Thumb mode, so this can be simplified.

> +	}
> +    }
> +  else
> +    {
> +      unsigned int op = bit (insn2, 4);
> +      unsigned int bit_8 = bit (insn1, 8);
> +
> +      if (bit_8) /* Advanced SIMD */
> +	return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +					    "neon", dsc);
> +      else
> +	{
> +	  /*coproc is 101x.  */
> +	  if ((coproc & 0xe) == 0xa)
> +	    {
> +	      if (op) /* 8,16,32-bit xfer.  */
> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						    "neon 8/16/32 bit xfer",
> +						    dsc);
> +	      else /* VFP data processing.  */
> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						    "vfp dataproc", dsc);
> +	    }
> +	  else
> +	    {
> +	      if (op)
> +		{
> +		  if (bit_4) /* MRC/MRC2 */
> +		    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +							"mrc/mrc2", dsc);
> +		  else /* MCR/MCR2 */
> +		     return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +							"mcr/mcr2", dsc);
> +		}
> +	      else /* CDP/CDP 2 */
> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						    "cdp/cdp2", dsc);
> +	    }

Likewise I'm not sure there is any need to decode to such depth, if the
instruction in the end all can be copied unmodified.


>  static int
> +thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, struct regcache *regs,
> +			      struct displaced_step_closure *dsc,
> +			      int rd, unsigned int imm)
> +{
> +  /* Encoding T3: ADDS Rd, Rd, #imm */
Why do you refer to ADDS?  The instruction you generate is ADD (with no S bit),
which is actually correct -- so it seems just the comment is wrong.
> +  dsc->modinsn[0] = (0xf100 | rd);
> +  dsc->modinsn[1] = (0x0 | (rd << 8) | imm);
> +
> +  dsc->numinsns = 2;
> +
> +  install_pc_relative (gdbarch, regs, dsc, rd);
> +
> +  return 0;
> +}
> +
> +static int
> +thumb_decode_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
> +				uint16_t insn2, struct regcache *regs,
> +				struct displaced_step_closure *dsc)
> +{
> +  unsigned int rd = bits (insn2, 8, 11);
> +  /* Since immeidate has the same encoding in both ADR and ADDS, so we simply
typo
> +     extract raw immediate encoding rather than computing immediate.  When
> +     generating ADDS instruction, we can simply perform OR operation to set
> +     immediate into ADDS.  */
See above for ADDS vs. ADD.
> +  unsigned int imm = (insn2 & 0x70ff) | (bit (insn1, 10) << 26);

The last bit will get lost, since thumb_copy_pc_relative_32bit only or's
the value to the second 16-bit halfword.

> +  if (debug_displaced)
> +    fprintf_unfiltered (gdb_stdlog,
> +			"displaced: copying thumb adr r%d, #%d insn %.4x%.4x\n",
> +			rd, imm, insn1, insn2);
> +
> +  return thumb_copy_pc_relative_32bit (gdbarch, regs, dsc, rd, imm);
> +}

B.t.w. I think the distinction between a _decode_ and a _copy_ routine is
pointless in this case since the _decode_ routine is only ever called for
one single instruction that matches ... it doesn't actually decode anything.


> +static int
> +decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
> +				 uint16_t insn1, uint16_t insn2,
> +				 struct regcache *regs,
> +				 struct displaced_step_closure *dsc)
> +{
> +  int rd = bits (insn2, 12, 15);
> +  int user_mode = (bits (insn2, 8, 11) == 0xe);
> +  int err = 0;
> +  int writeback = 0;
> +
> +  switch (bits (insn1, 5, 6))
> +    {
> +    case 0: /* Load byte and memory hints */
> +      if (rd == 0xf) /* PLD/PLI */
> +	{
> +	  if (bits (insn2, 6, 11))
This check doesn't look right to me.
> +	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
> +	  else
> +	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);

In any case, see the comments above on handling preload instructions.  You
should only need to handle the "literal" variants.

> +	}
> +      else
> +	{
> +	  int op1 = bits (insn1, 7, 8);
> +
> +	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
> +	    writeback = bit (insn2, 8);
> +
> +	  return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
> +						dsc, 1, 1, user_mode,
> +						writeback);
> +	}
> +
> +      break;
> +    case 1: /* Load halfword and memory hints */
> +      if (rd == 0xf) /* PLD{W} and Unalloc memory hint */
> +	{
> +	  if (bits (insn2, 6, 11))
> +	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
> +	  else
> +	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
See above.
> +	}
> +      else
> +	{
> +	  int op1 = bits (insn1, 7, 8);
> +
> +	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
> +	    writeback = bit (insn2, 8);
> +	  return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
> +						dsc, 1, 0, user_mode,
> +						writeback);
> +	}
> +      break;
> +    case 2: /* Load word */
> +      {
> +	int op1 = bits (insn1, 7, 8);
> +
> +	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
> +	    writeback = bit (insn2, 8);
> +
> +	return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs, dsc,
> +					      1, 0, user_mode, writeback);
> +	break;
> +      }
> +    default:
> +      return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
> +      break;
> +    }
> +  return 0;
> +}


>  static void
>  thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
>  				    uint16_t insn2, struct regcache *regs,
>  				    struct displaced_step_closure *dsc)
>  {
> -  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
> +  int err = 0;
> +  unsigned short op = bit (insn2, 15);
> +  unsigned int op1 = bits (insn1, 11, 12);
> +
> +  switch (op1)
> +    {
> +    case 1:
> +      {
> +	switch (bits (insn1, 9, 10))
> +	  {
> +	  case 0: /* load/store multiple */
> +	    switch (bits (insn1, 7, 8))
> +	      {
> +	      case 0: case 3: /* SRS, RFE */
> +		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						   "srs/rfe", dsc);
> +		break;
> +	      case 1: case 2: /* LDM/STM/PUSH/POP */
> +		/* These Thumb 32-bit insns have the same encodings as ARM
> +		   counterparts.  */
"same encodings" isn't quite true ...
> +		err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
> +	      }
> +	    break;

Hmm, it seems this case is missing code to handle the load/store dual,
load/store exclusive, and table branch instructions (page A6-24 / table A6-17);
there should be a check whether bit 6 is zero or one somewhere.

> +	  case 1:
> +	    /* Data-processing (shift register).  In ARM archtecture reference
> +	       manual, this entry is
> +	       "Data-processing (shifted register) on page A6-31".  However,
> +	    instructions in table A6-31 shows that they are `alu_reg'
> +	    instructions.  There is no alu_shifted_reg instructions in
> +	    Thumb-2.  */

Well ... they are not *register*-shifted register instructions like
there are in ARM mode (i.e. register shifted by another register),
but they are still *shifted* register instructions (i.e. register
shifted by an immediate).

> +	    err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs,
> +					       dsc);
(see comments at that function ...)
> +	    break;
> +	  default: /* Coprocessor instructions */
> +	    /* Thumb 32bit coprocessor instructions have the same encoding
> +	       as ARM's.  */
(see above as to "same encoding" ... also, some ARM coprocessor instruction
may in fact use the PC, while no Thumb coprocessor instruction can ... so
there is probably no need to decode them further at this point)
> +	    err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
> +	    break;
> +	  }
> +      break;
> +      }


> +    case 2: /* op1 = 2 */
> +      if (op) /* Branch and misc control.  */
> +	{
> +	  if (bit (insn2, 14)) /* BLX/BL */
> +	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
> +	  else if (!bits (insn2, 12, 14) && bits (insn1, 8, 10) != 0x7)
I don't understand this condition, but it looks wrong to me ...

> +	    /* Conditional Branch */
> +	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
> +	  else
> +	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +					       "misc ctrl", dsc);
> +	}
> +      else
> +	{
> +	  if (bit (insn1, 9)) /* Data processing (plain binary imm) */
> +	    {
> +	      int op = bits (insn1, 4, 8);
> +	      int rn = bits (insn1, 0, 4);
> +	      if ((op == 0 || op == 0xa) && rn == 0xf)
> +		err = thumb_decode_pc_relative_32bit (gdbarch, insn1, insn2,
> +						      regs, dsc);
> +	      else
> +		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						   "dp/pb", dsc);
> +	    }
> +	  else /* Data processing (modified immeidate) */
> +	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +					       "dp/mi", dsc);
> +	}
> +      break;
> +    case 3: /* op1 = 3 */
> +      switch (bits (insn1, 9, 10))
> +	{
> +	case 0:
> +	  if (bit (insn1, 4))
> +	    err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
> +						   regs, dsc);
> +	  else
> +	    {
> +	      if (bit (insn1, 8)) /* NEON Load/Store */
> +		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						   "neon elt/struct load/store",
> +						   dsc);
> +	      else /* Store single data item */
> +		{
> +		  int user_mode = (bits (insn2, 8, 11) == 0xe);
> +		  int byte = (bits (insn1, 5, 7) == 0
> +			      || bits (insn1, 5, 7) == 4);
> +		  int writeback = 0;
> +
> +		  if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
> +		    writeback = bit (insn2, 8);

If things get this complicated, a decode routine might be appropriate.
> +
> +		  err = thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2,
> +						       regs, dsc, 0, byte,
> +						       user_mode, writeback);
> +		}
> +	    }
> +	  break;
> +	case 1: /* op1 = 3, bits (9, 10) == 1 */
> +	  switch (bits (insn1, 7, 8))
> +	    {
> +	    case 0: case 1: /* Data processing (register) */
> +	      err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs, dsc);
> +	      break;
> +	    case 2: /* Multiply and absolute difference */
> +	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						 "mul/mua/diff", dsc);
> +	      break;
> +	    case 3: /* Long multiply and divide */
> +	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
> +						 "lmul/lmua", dsc);
> +	      break;
> +	    }
> +	  break;
> +	default: /* Coprocessor instructions */
> +	  err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
> +	  break;
> +	}
> +      break;
> +    default:
> +      err = 1;
> +    }
> +
> +  if (err)
> +    internal_error (__FILE__, __LINE__,
> +		    _("thumb_process_displaced_32bit_insn: Instruction decode error"));
> +
>  }

Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn
  2011-05-17 14:29             ` Yao Qi
@ 2011-05-17 17:20               ` Ulrich Weigand
  0 siblings, 0 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-05-17 17:20 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

> > Have you looked at how the ARM case does it?  There, we still have just
> > a single POP { r0, ..., rN } that pops the right number of registers,
> > and then the cleanup function (cleanup_block_load_pc) reshuffles them.
> > It seems to me we could do the same (and actually use the same cleanup
> > function) for the Thumb case too ...
> 
> Sure, we can reuse that for Thumb case here.  In this case, when
> register list is not full, we could optimize it a little bit like what I
> did in my last patch.  However, it is a separate issue, and can be
> addressed separately.

OK, sounds good.

>          Support displaced stepping for Thumb 16-bit insns.
>          * arm-tdep.c (THUMB_NOP) Define.
>          (thumb_copy_unmodified_16bit): New.
>          (thumb_copy_b, thumb_copy_bx_blx_reg): New.
>          (thumb_copy_alu_reg): New.
>          (arm_copy_svc): Move some common code to ...
>          (install_svc): ... here.  New.
>          (thumb_copy_svc): New.
>          (install_pc_relative): New.
>          (thumb_copy_pc_relative_16bit): New.
>          (thumb_decode_pc_relative_16bit): New.
>          (thumb_copy_16bit_ldr_literal): New.
>          (thumb_copy_cbnz_cbz): New.
>          (cleanup_pop_pc_16bit_all): New.
>          (thumb_copy_pop_pc_16bit): New.
>          (thumb_process_displaced_16bit_insn): New.
>          (thumb_process_displaced_32bit_insn): New.
>          (thumb_process_displaced_insn): process thumb instruction.

I didn't find anything else :-)   Thanks for your patience in working
through all those iterations!

This version is OK.

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 7/8] Test case: V3
  2011-05-11 13:15       ` [try 2nd 7/8] Test case: V3 Yao Qi
@ 2011-05-17 17:24         ` Ulrich Weigand
  0 siblings, 0 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-05-17 17:24 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:

>         * gdb.arch/arm-disp-step.S (test_ldr_literal): Test for Thumb
>         instructions.
>         (test_adr_32bit, test_pop_pc): Likewise.
>         (test_ldr_literal_16, test_cbz_cbnz, test_adr): New test for
>         Thumb instructions.
>         * gdb.arch/arm-disp-step.exp (test_ldm_stm_pc): Match $gdb_prompt
>         in gdb_test_multiple.
>         (test_ldr_literal_16, test_cbz_cbnz, test_adr): New

This looks good to me.

However, I think this can only go in after 32-bit Thumb2 support is in,
otherwise some of the new tests would fail.

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-05-17 17:14       ` Ulrich Weigand
  2011-05-23 11:32         ` Yao Qi
@ 2011-05-23 11:32         ` Yao Qi
  2011-05-27 22:11           ` Ulrich Weigand
  2011-07-06 10:55         ` Yao Qi
  2 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-05-23 11:32 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

On 05/18/2011 01:14 AM, Ulrich Weigand wrote:
>> > +static int
>> > +thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
>> > +			      uint16_t insn2, struct regcache *regs,
>> > +			      struct displaced_step_closure *dsc)
>> > +{
>> > +  unsigned int rn = bits (insn1, 0, 3);
>> > +
>> > +  if (rn == ARM_PC_REGNUM)
>> > +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> > +					"copro load/store", dsc);
>> > +
>> > +  if (debug_displaced)
>> > +    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
>> > +			"load/store insn %.4x%.4x\n", insn1, insn2);
>> > +
>> > +  dsc->modinsn[0] = insn1 & 0xfff0;
>> > +  dsc->modinsn[1] = insn2;
>> > +  dsc->numinsns = 2;
> This doesn't look right: you're replacing the RN register if it is anything
> *but* 15 -- but those cases do not need to be replaced!
> 

Sorry, the condition check should be reversed.

> In fact, unless I'm missing something, in Thumb mode no coprocessor
> instruction actually uses the PC (either RN == 15 indicates some other
> operation, or else it is specified as unpredictable).  So those should
> simply all be copied unmodified ...
> 

I can understand almost of your comments except this one.  I think you
are right, but there are still some cases that PC is used in this
category of instructions.

thumb2_copy_copro_load_store covers instructions STC/STC2, VLDR/VSTR and
LDC/LDC2 (literal and immediate).  I re-read ARM ARM again, and find that,

STC/STC2 doesn't use PC.  ARM ARM said "if n == 15 && (wback ||
CurrentInstrSet() != InstrSet_ARM) then UNPREDICTABLE;"

VSTR doesn't use PC.  ARM ARM said "if n == 15 && CurrentInstrSet() !=
InstrSet_ARM then UNPREDICTABLE;"

However, LDC/LDC2/VLDR can use PC.

VLDR<c><q>{.32} <Sd>, [PC, #+/-<imm>]

LDC, LDC2 (literal or immediate)
LDC{L}<c> <coproc>,<CRd>,[PC],<option>

I can write a real VLDR instruction using PC successfully.  Still no
luck to fix 'Illegal instruction' when running program having LDC/LDC2
using PC register, but I think LDC/LDC2 should be able to use PC
register.  Am I missing something here?

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-05-17 17:14       ` Ulrich Weigand
@ 2011-05-23 11:32         ` Yao Qi
  2011-05-23 11:32         ` Yao Qi
  2011-07-06 10:55         ` Yao Qi
  2 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-05-23 11:32 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

On 05/18/2011 01:14 AM, Ulrich Weigand wrote:
>> > +static int
>> > +thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
>> > +			      uint16_t insn2, struct regcache *regs,
>> > +			      struct displaced_step_closure *dsc)
>> > +{
>> > +  unsigned int rn = bits (insn1, 0, 3);
>> > +
>> > +  if (rn == ARM_PC_REGNUM)
>> > +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> > +					"copro load/store", dsc);
>> > +
>> > +  if (debug_displaced)
>> > +    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
>> > +			"load/store insn %.4x%.4x\n", insn1, insn2);
>> > +
>> > +  dsc->modinsn[0] = insn1 & 0xfff0;
>> > +  dsc->modinsn[1] = insn2;
>> > +  dsc->numinsns = 2;
> This doesn't look right: you're replacing the RN register if it is anything
> *but* 15 -- but those cases do not need to be replaced!
> 

Sorry, the condition check should be reversed.

> In fact, unless I'm missing something, in Thumb mode no coprocessor
> instruction actually uses the PC (either RN == 15 indicates some other
> operation, or else it is specified as unpredictable).  So those should
> simply all be copied unmodified ...
> 

I can understand almost of your comments except this one.  I think you
are right, but there are still some cases that PC is used in this
category of instructions.

thumb2_copy_copro_load_store covers instructions STC/STC2, VLDR/VSTR and
LDC/LDC2 (literal and immediate).  I re-read ARM ARM again, and find that,

STC/STC2 doesn't use PC.  ARM ARM said "if n == 15 && (wback ||
CurrentInstrSet() != InstrSet_ARM) then UNPREDICTABLE;"

VSTR doesn't use PC.  ARM ARM said "if n == 15 && CurrentInstrSet() !=
InstrSet_ARM then UNPREDICTABLE;"

However, LDC/LDC2/VLDR can use PC.

VLDR<c><q>{.32} <Sd>, [PC, #+/-<imm>]

LDC, LDC2 (literal or immediate)
LDC{L}<c> <coproc>,<CRd>,[PC],<option>

I can write a real VLDR instruction using PC successfully.  Still no
luck to fix 'Illegal instruction' when running program having LDC/LDC2
using PC register, but I think LDC/LDC2 should be able to use PC
register.  Am I missing something here?

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-05-23 11:32         ` Yao Qi
@ 2011-05-27 22:11           ` Ulrich Weigand
  0 siblings, 0 replies; 66+ messages in thread
From: Ulrich Weigand @ 2011-05-27 22:11 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Yao Qi wrote:
> On 05/18/2011 01:14 AM, Ulrich Weigand wrote:
> > In fact, unless I'm missing something, in Thumb mode no coprocessor
> > instruction actually uses the PC (either RN == 15 indicates some other
> > operation, or else it is specified as unpredictable).  So those should
> > simply all be copied unmodified ...
> 
> I can understand almost of your comments except this one.  I think you
> are right, but there are still some cases that PC is used in this
> category of instructions.
> 
> thumb2_copy_copro_load_store covers instructions STC/STC2, VLDR/VSTR and
> LDC/LDC2 (literal and immediate).  I re-read ARM ARM again, and find that,
> 
> STC/STC2 doesn't use PC.  ARM ARM said "if n == 15 && (wback ||
> CurrentInstrSet() != InstrSet_ARM) then UNPREDICTABLE;"
> 
> VSTR doesn't use PC.  ARM ARM said "if n == 15 && CurrentInstrSet() !=
> InstrSet_ARM then UNPREDICTABLE;"
> 
> However, LDC/LDC2/VLDR can use PC.
> 
> VLDR<c><q>{.32} <Sd>, [PC, #+/-<imm>]
> 
> LDC, LDC2 (literal or immediate)
> LDC{L}<c> <coproc>,<CRd>,[PC],<option>
> 
> I can write a real VLDR instruction using PC successfully.  Still no
> luck to fix 'Illegal instruction' when running program having LDC/LDC2
> using PC register, but I think LDC/LDC2 should be able to use PC
> register.  Am I missing something here?

No, you're right -- I had overlooked those.  LDC/LDC2/VLDR must
indeed be handled here.

Bye,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-05-17 17:14       ` Ulrich Weigand
  2011-05-23 11:32         ` Yao Qi
  2011-05-23 11:32         ` Yao Qi
@ 2011-07-06 10:55         ` Yao Qi
  2011-07-15 19:57           ` Ulrich Weigand
  2 siblings, 1 reply; 66+ messages in thread
From: Yao Qi @ 2011-07-06 10:55 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

[-- Attachment #1: Type: text/plain, Size: 16949 bytes --]

On 05/18/2011 01:14 AM, Ulrich Weigand wrote:
> Yao Qi wrote:
> 

>> +static int
>> +thumb2_copy_preload_reg (struct gdbarch *gdbarch, uint16_t insn1,
>> +			 uint16_t insn2, struct regcache *regs,
>> +			 struct displaced_step_closure *dsc)
>> +{
>> +  unsigned int rn = bits (insn1, 0, 3);
>> +  unsigned int rm = bits (insn2, 0, 3);
>> +
>> +  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
>> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload reg",
>> +					dsc);
>> +
>> +  if (debug_displaced)
>> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
>> +			insn1, insn1);
>> +
>> +  dsc->modinsn[0] = insn1 & 0xfff0;
>> +  dsc->modinsn[1] = (insn2 & 0xfff0) | 0x1;
>> +  dsc->numinsns = 2;
>> +
>> +  install_preload_reg (gdbarch, regs, dsc, rn, rm);
>> +  return 0;
>> +}
> 
> Handling of preload instructions seems wrong for a couple of reasons:
> 
> - In Thumb mode, PLD/PLI with register offset must not use PC as offset
>   register, so those can just be copied unmodified.  The only instructions
>   to be treated specially are the "literal" variants, which do encode
>   PC-relative offsets.
> 
>   This means a separate thumb2_copy_preload_reg shouldn't be needed.
> 

Right.  thumb2_copy_preload_reg is removed.

> - However, you cannot just transform a PLD/PLI "literal" (i.e. PC + immediate)
>   into an "immediate" (i.e. register + immediate) version, since in Thumb
>   mode the "literal" version supports a 12-bit immediate, while the immediate
>   version only supports an 8-bit immediate.
> 
>   I guess you could either add the immediate to the PC during preparation
>   stage and then use an "immediate" instruction with immediate zero, or
>   else load the immediate into a second register and use a "register"
>   version of the instruction.
> 

The former may not be correct.  PC should be set at the address of `copy
area' in displaced stepping, instead of any other arbitrary values.  The
alternative to the former approach is to compute the new immediate value
according to the new PC value we will set (new PC value is
dsc->scratch_base).  However, in this way, we have to worry about the
overflow of new computed 12-bit immediate.

The latter one sounds better, because we don't have to worry about
overflow problem, and cleanup_preload can be still used as cleanup
routine in this case.

> 
>> +static int
>> +thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
>> +			      uint16_t insn2, struct regcache *regs,
>> +			      struct displaced_step_closure *dsc)
>> +{
>> +  unsigned int rn = bits (insn1, 0, 3);
>> +
>> +  if (rn == ARM_PC_REGNUM)
>> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +					"copro load/store", dsc);
>> +
>> +  if (debug_displaced)
>> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
>> +			"load/store insn %.4x%.4x\n", insn1, insn2);
>> +
>> +  dsc->modinsn[0] = insn1 & 0xfff0;
>> +  dsc->modinsn[1] = insn2;
>> +  dsc->numinsns = 2;
> 
> This doesn't look right: you're replacing the RN register if it is anything
> *but* 15 -- but those cases do not need to be replaced!
> 

Oh, sorry, it is a logic error.  The code should be like

if (rn != ARM_PC_REGNUM)
  return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "copro
load/store", dsc);

>> +static int
>> +thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
>> +		      uint16_t insn2, struct regcache *regs,
>> +		      struct displaced_step_closure *dsc)
>> +
>> +  if (!link && !exchange) /* B */
>> +    {
>> +      cond = bits (insn1, 6, 9);
> 
> Only encoding T3 has condition bits, not T4.
> 

Oh, right.  Fixed.

>> +
>> +  dsc->modinsn[0] = THUMB_NOP;
>> +
>> +  install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, 1, offset);
> 
> Why do you always pass 1 for link?  Shouldn't "link" be passed?
> 

"link" should be passed.  Fixed.

>> +static int
>> +thumb2_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn1,
>> +		     uint16_t insn2, struct regcache *regs,
>> +		     struct displaced_step_closure *dsc)
>> +{
>> +  unsigned int op2 = bits (insn2, 4, 7);
>> +  int is_mov = (op2 == 0x0);
>> +  unsigned int rn, rm, rd;
>> +
>> +  rn = bits (insn1, 0, 3); /* Rn */
>> +  rm = bits (insn2, 0, 3); /* Rm */
>> +  rd = bits (insn2, 8, 11); /* Rd */
>> +
>> +  /* In Thumb-2, rn, rm and rd can't be r15.  */
> This isn't quite true ... otherwise we wouldn't need the routine at all.

This line of comment is out of date.  Remove it.

>> +  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM
>> +      && rd != ARM_PC_REGNUM)
>> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU reg", dsc);
>> +
>> +  if (debug_displaced)
>> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
>> +			"ALU", insn1, insn2);
>> +
>> +  if (is_mov)
>> +    dsc->modinsn[0] = insn1;
>> +  else
>> +    dsc->modinsn[0] = ((insn1 & 0xfff0) | 0x1);
>> +
>> +  dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x2);
>> +  dsc->numinsns = 2;
> 
> This doesn't look right.  It looks like this function is called for all
> instructions in tables A6-22 through A6-26; those encodings differ
> significantly in how their fields are used.  Some of them have the
> Rn, Rm, Rd fields as above, but others just have some of them.  For
> some, a register field content of 15 does indeed refer to the PC and
> needs to be replaced; for others a register field content of 15 means
> instead that a different operation is to be performed (e.g. ADD vs TST,
> EOR vs TEQ ...) and so it must *not* be replaced; and for yet others,
> a register field content of 15 is unpredictable.
> 
> In fact, I think only a very small number of instructions in this
> category actually may refer to the PC (only MOV?), so there needs
> to the be more instruction decoding to actually identify those.
> 

thumb2_copy_alu_reg is called in for two groups of instructions,
1.  A6.3.11 Data-processing (shifted register)
2.  A6.3.12 Data-processing (register)

PC is not used in group #2.  Even in group #1, PC is only used in MOV.
This routine thumb2_copy_alu_reg is deleted, and
thumb2_decode_dp_shift_reg is added to decode group #2.

>>  static int
>> +thumb2_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint16_t insn1,
>> +			       uint16_t insn2,  struct regcache *regs,
>> +			       struct displaced_step_closure *dsc,
>> +			       int load, int byte, int usermode, int writeback)
> 
> Hmmm ... this function is called for *halfwords* as well, not just for
> bytes and words.  This means the "byte" operand is no longer sufficient
> to uniquely determine the size -- note that when calling down to the
> install_ routine, xfersize is always set to 1 or 4.
> 

I thought "halfword" can be treated as "word" in this case, so I didn't
distinguish them.  I rename "thumb2_copy_ldr_str_ldrb_strb" to
"thumb2_copy_load_store", and change parameter BYTE to SIZE.  install_
routine and arm_ routine is updated as well.

>> +{
>> +  int immed = !bit (insn1, 9);
>> +  unsigned int rt = bits (insn2, 12, 15);
>> +  unsigned int rn = bits (insn1, 0, 3);
>> +  unsigned int rm = bits (insn2, 0, 3);  /* Only valid if !immed.  */
>> +
>> +  if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
> rm shouldn't be checked if immed is true

Fixed.

>> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load/store",
>> +					dsc);


>> +/* Decode extension register load/store.  Exactly the same as
>> +   arm_decode_ext_reg_ld_st.  */
>> +
>> +static int
>> +thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
>> +			     uint16_t insn2,  struct regcache *regs,
>> +			     struct displaced_step_closure *dsc)
>> +{
>> +
>> +    case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
>> +    case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
>> +      return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
> 
> See the comment at thumb2_copy_copro_load_store: since that function will
> always copy the instruction unmodified, so can this function.
> 
> 

As we discussed VLDR may still use PC, so call
thumb_copy_unmodified_32bit for VSTR in my new patch.

>> +static int
>> +thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
>> +			 uint16_t insn2, struct regcache *regs,
>> +			 struct displaced_step_closure *dsc)
>> +{
[...]
> 
> See above ... I don't think any of those instructions can ever use the PC
> in Thumb mode, so this can be simplified.
> 

It is simplified to some extent in new patch.

>> +	}
>> +    }
>> +  else
>> +    {
>> +      unsigned int op = bit (insn2, 4);
>> +      unsigned int bit_8 = bit (insn1, 8);
>> +
>> +      if (bit_8) /* Advanced SIMD */
>> +	return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +					    "neon", dsc);
>> +      else
>> +	{
>> +	  /*coproc is 101x.  */
>> +	  if ((coproc & 0xe) == 0xa)
>> +	    {
>> +	      if (op) /* 8,16,32-bit xfer.  */
>> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +						    "neon 8/16/32 bit xfer",
>> +						    dsc);
>> +	      else /* VFP data processing.  */
>> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +						    "vfp dataproc", dsc);
>> +	    }
>> +	  else
>> +	    {
>> +	      if (op)
>> +		{
>> +		  if (bit_4) /* MRC/MRC2 */
>> +		    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +							"mrc/mrc2", dsc);
>> +		  else /* MCR/MCR2 */
>> +		     return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +							"mcr/mcr2", dsc);
>> +		}
>> +	      else /* CDP/CDP 2 */
>> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +						    "cdp/cdp2", dsc);
>> +	    }
> 
> Likewise I'm not sure there is any need to decode to such depth, if the
> instruction in the end all can be copied unmodified.

OK.  Patch length can be reduced then.

>>  static int
>> +thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, struct regcache *regs,
>> +			      struct displaced_step_closure *dsc,
>> +			      int rd, unsigned int imm)
>> +{
>> +  /* Encoding T3: ADDS Rd, Rd, #imm */
> Why do you refer to ADDS?  The instruction you generate is ADD (with no S bit),
> which is actually correct -- so it seems just the comment is wrong.

It is a mistake in comment.  ADR doesn't update flags, we don't have S
bit in ADD.

>> +  dsc->modinsn[0] = (0xf100 | rd);
>> +  dsc->modinsn[1] = (0x0 | (rd << 8) | imm);
>> +
>> +  dsc->numinsns = 2;
>> +
>> +  install_pc_relative (gdbarch, regs, dsc, rd);
>> +
>> +  return 0;
>> +}
>> +
>> +static int
>> +thumb_decode_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
>> +				uint16_t insn2, struct regcache *regs,
>> +				struct displaced_step_closure *dsc)
>> +{
>> +  unsigned int rd = bits (insn2, 8, 11);
>> +  /* Since immeidate has the same encoding in both ADR and ADDS, so we simply
> typo
>> +     extract raw immediate encoding rather than computing immediate.  When
>> +     generating ADDS instruction, we can simply perform OR operation to set
>> +     immediate into ADDS.  */
> See above for ADDS vs. ADD.

s/ADDS/ADD/ in comments.

>> +  unsigned int imm = (insn2 & 0x70ff) | (bit (insn1, 10) << 26);
> 
> The last bit will get lost, since thumb_copy_pc_relative_32bit only or's
> the value to the second 16-bit halfword.

Then, we have separately set the bit 10 (i bit) in dsc->modinsn[0] per
original insn1's i bit.


>> +  if (debug_displaced)
>> +    fprintf_unfiltered (gdb_stdlog,
>> +			"displaced: copying thumb adr r%d, #%d insn %.4x%.4x\n",
>> +			rd, imm, insn1, insn2);
>> +
>> +  return thumb_copy_pc_relative_32bit (gdbarch, regs, dsc, rd, imm);
>> +}
> 
> B.t.w. I think the distinction between a _decode_ and a _copy_ routine is
> pointless in this case since the _decode_ routine is only ever called for
> one single instruction that matches ... it doesn't actually decode anything.
> 

thumb_decode_pc_relative_32bit is merged to thumb_copy_pc_relative_32bit.

> 
>> +static int
>> +decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
>> +				 uint16_t insn1, uint16_t insn2,
>> +				 struct regcache *regs,
>> +				 struct displaced_step_closure *dsc)
>> +{
>> +  int rd = bits (insn2, 12, 15);
>> +  int user_mode = (bits (insn2, 8, 11) == 0xe);
>> +  int err = 0;
>> +  int writeback = 0;
>> +
>> +  switch (bits (insn1, 5, 6))
>> +    {
>> +    case 0: /* Load byte and memory hints */
>> +      if (rd == 0xf) /* PLD/PLI */
>> +	{
>> +	  if (bits (insn2, 6, 11))
> This check doesn't look right to me.

This part is re-written.

>> +	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
>> +	  else
>> +	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
> 
> In any case, see the comments above on handling preload instructions.  You
> should only need to handle the "literal" variants.
> 

Right.  thumb2_copy_preload_reg is removed, and this part of code is
adjusted as well.

> 
> 
>>  static void
>>  thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
>>  				    uint16_t insn2, struct regcache *regs,
>>  				    struct displaced_step_closure *dsc)
>>  {
>> -  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
>> +  int err = 0;
>> +  unsigned short op = bit (insn2, 15);
>> +  unsigned int op1 = bits (insn1, 11, 12);
>> +
>> +  switch (op1)
>> +    {
>> +    case 1:
>> +      {
>> +	switch (bits (insn1, 9, 10))
>> +	  {
>> +	  case 0: /* load/store multiple */
>> +	    switch (bits (insn1, 7, 8))
>> +	      {
>> +	      case 0: case 3: /* SRS, RFE */
>> +		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +						   "srs/rfe", dsc);
>> +		break;
>> +	      case 1: case 2: /* LDM/STM/PUSH/POP */
>> +		/* These Thumb 32-bit insns have the same encodings as ARM
>> +		   counterparts.  */
> "same encodings" isn't quite true ...

This line of comment is out of date.  Removed.

>> +		err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
>> +	      }
>> +	    break;
> 
> Hmm, it seems this case is missing code to handle the load/store dual,
> load/store exclusive, and table branch instructions (page A6-24 / table A6-17);
> there should be a check whether bit 6 is zero or one somewhere.
> 

routine thumb2_copy_table_branch is added to handle table branch
instructions.  load/store dual and load/store exclusive don't use PC, so
they are copy-unmodified.

>> +	  case 1:
>> +	    /* Data-processing (shift register).  In ARM archtecture reference
>> +	       manual, this entry is
>> +	       "Data-processing (shifted register) on page A6-31".  However,
>> +	    instructions in table A6-31 shows that they are `alu_reg'
>> +	    instructions.  There is no alu_shifted_reg instructions in
>> +	    Thumb-2.  */
> 
> Well ... they are not *register*-shifted register instructions like
> there are in ARM mode (i.e. register shifted by another register),
> but they are still *shifted* register instructions (i.e. register
> shifted by an immediate).
> 

Thanks for the clarification.  Only leave the 1st sentence of comment,
and remove the rest of them.

>> +	    err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs,
>> +					       dsc);
> (see comments at that function ...)

Add a new function thumb2_decode_dp_shift_reg and call it here.

>> +	    break;
>> +	  default: /* Coprocessor instructions */
>> +	    /* Thumb 32bit coprocessor instructions have the same encoding
>> +	       as ARM's.  */
> (see above as to "same encoding" ... also, some ARM coprocessor instruction
> may in fact use the PC, while no Thumb coprocessor instruction can ... so
> there is probably no need to decode them further at this point)

As we discussed, STC/STC/VLDR may still use PC.  Leave it there.

> 
>> +    case 2: /* op1 = 2 */
>> +      if (op) /* Branch and misc control.  */
>> +	{
>> +	  if (bit (insn2, 14)) /* BLX/BL */
>> +	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
>> +	  else if (!bits (insn2, 12, 14) && bits (insn1, 8, 10) != 0x7)
> I don't understand this condition, but it looks wrong to me ...
> 

This condition is about "Conditional Branch".  The 2nd half of condition
should be "bits (insn1, 7, 9) != 0x7", corresponding to the first line
of table A6-13 "op1 = 0x0, op is not x111xxx".

>> +	      else /* Store single data item */
>> +		{
>> +		  int user_mode = (bits (insn2, 8, 11) == 0xe);
>> +		  int byte = (bits (insn1, 5, 7) == 0
>> +			      || bits (insn1, 5, 7) == 4);
>> +		  int writeback = 0;
>> +
>> +		  if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
>> +		    writeback = bit (insn2, 8);
> 
> If things get this complicated, a decode routine might be appropriate.

OK, move these logics into a new function
"decode_thumb_32bit_store_single_data_item".

Note that patch sits on top of this patch,

  [patch] refactor arm-tdep.c:install_ldr_str_ldrb_strb to handle halfword
  http://sourceware.org/ml/gdb-patches/2011-07/msg00183.html

-- 
Yao

[-- Attachment #2: 0003-Support-displaced-stepping-for-Thumb-32-bit-insns.patch --]
[-- Type: text/x-patch, Size: 28770 bytes --]

         Support displaced stepping for Thumb 32-bit insns.

         * arm-tdep.c (thumb_copy_unmodified_32bit): New.
         (thumb2_copy_preload): New.
         (thumb2_copy_copro_load_store): New.
         (thumb2_copy_b_bl_blx): New.
         (thumb2_copy_alu_imm): New.
         (thumb2_copy_load_store): New.
         (thumb2_copy_block_xfer): New.
         (thumb_32bit_copy_undef): New.
         (thumb_32bit_copy_unpred): New.
         (thumb2_decode_ext_reg_ld_st): New.
         (thumb2_decode_svc_copro): New.
         (decode_thumb_32bit_store_single_data_item): New.
         (thumb_copy_pc_relative_32bit): New.
         (thumb_decode_pc_relative_32bit): New.
         (decode_thumb_32bit_ld_mem_hints): New.
         (thumb2_copy_table_branch): New
         (thumb_process_displaced_32bit_insn): Process Thumb 32-bit
         instructions.
---
 gdb/arm-tdep.c |  840 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 839 insertions(+), 1 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index b0074bd..bd92193 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5341,6 +5341,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+			     uint16_t insn2, const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
+			"opcode/class '%s' unmodified\n", insn1, insn2,
+			iname);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
    modification.  */
 static int
@@ -5408,6 +5425,54 @@ arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int u_bit = bit (insn1, 7);
+  int imm12 = bits (insn2, 0, 11);
+  ULONGEST pc_val;
+
+  if (rn != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
+
+  /* PC is only allowed to use in PLI (immeidate,literal) Encoding T3, and
+     PLD (literal) Encoding T1.  */
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
+			(unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
+			imm12);
+
+  if (!u_bit)
+    imm12 = -1 * imm12;
+
+  /* Rewrite instruction {pli/pld} PC imm12 into:
+     Preapre: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
+
+     {pli/pld} [r0, r1]
+
+     Cleanup: r0 <- tmp[0], r1 <- tmp[1].  */
+
+  dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+  dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+
+  pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+
+  displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
+  dsc->u.preload.immed = 0;
+
+  /* {pli/pld} [r0, r1] */
+  dsc->modinsn[0] = insn1 & 0xff00;
+  dsc->modinsn[1] = 0xf001;
+  dsc->numinsns = 2;
+
+  dsc->cleanup = &cleanup_preload;
+  return 0;
+}
+
 /* Preload instructions with register offset.  */
 
 static void
@@ -5517,6 +5582,30 @@ arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
+			      uint16_t insn2, struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (rn == ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					"copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.4x%.4x\n", insn1, insn2);
+
+  dsc->modinsn[0] = insn1 & 0xfff0;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  install_copro_load_store (gdbarch, regs, dsc, bit (insn1, 9), rn);
+
+  return 0;
+}
+
 /* Clean up branch instructions (actually perform the branch, by setting
    PC).  */
 
@@ -5604,6 +5693,61 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
+		      uint16_t insn2, struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  int link = bit (insn2, 14);
+  int exchange = link && !bit (insn2, 12);
+  int cond = INST_AL;
+  long offset =0;
+  int j1 = bit (insn2, 13);
+  int j2 = bit (insn2, 11);
+  int s = sbits (insn1, 10, 10);
+  int i1 = !(j1 ^ bit (insn1, 10));
+  int i2 = !(j2 ^ bit (insn1, 10));
+
+  if (!link && !exchange) /* B */
+    {
+      offset = (bits (insn2, 0, 10) << 1);
+      if (bit (insn2, 12)) /* Encoding T4 */
+	{
+	  offset |= (bits (insn1, 0, 9) << 12)
+	    | (i2 << 22)
+	    | (i1 << 23)
+	    | (s << 24);
+	  cond = INST_AL;
+	}
+      else /* Encoding T3 */
+	{
+	  offset |= (bits (insn1, 0, 5) << 12)
+	    | (j1 << 18)
+	    | (j2 << 19)
+	    | (s << 20);
+	  cond = bits (insn1, 6, 9);
+	}
+    }
+  else
+    {
+      offset = (bits (insn1, 0, 9) << 12);
+      offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
+      offset |= exchange ?
+	(bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
+			"%.4x %.4x with offset %.8lx\n",
+			link ? (exchange) ? "blx" : "bl" : "b",
+			insn1, insn2, offset);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
+  return 0;
+}
+
 /* Copy B Thumb instructions.  */
 static int
 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
@@ -5767,6 +5911,58 @@ arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
+		     uint16_t insn2, struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  unsigned int op = bits (insn1, 5, 8);
+  unsigned int rn, rm, rd;
+  ULONGEST rd_val, rn_val;
+
+  rn = bits (insn1, 0, 3); /* Rn */
+  rm = bits (insn2, 0, 3); /* Rm */
+  rd = bits (insn2, 8, 11); /* Rd */
+
+  /* This routine is only called for instruction MOV.  */
+  gdb_assert (op == 0x2 && rn == 0xf);
+
+  if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
+			"ALU", insn1, insn2);
+
+  /* Instruction is of form:
+
+     <op><cond> rd, [rn,] #imm
+
+     Rewrite as:
+
+     Preparation: tmp1, tmp2 <- r0, r1;
+		  r0, r1 <- rd, rn
+     Insn: <op><cond> r0, r1, #imm
+     Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
+  */
+
+  dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+  dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+  rn_val = displaced_read_reg (regs, dsc, rn);
+  rd_val = displaced_read_reg (regs, dsc, rd);
+  displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
+  dsc->rd = rd;
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
+  dsc->numinsns = 2;
+
+  dsc->cleanup = &cleanup_alu_imm;
+
+  return 0;
+}
+
 /* Copy/cleanup arithmetic/logic insns with register RHS.  */
 
 static void
@@ -6135,6 +6331,69 @@ install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
+thumb2_copy_load_store (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+			struct regcache *regs,
+			struct displaced_step_closure *dsc, int load, int size,
+			int usermode, int writeback)
+{
+  int immed = !bit (insn1, 9);
+  unsigned int rt = bits (insn2, 12, 15);
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);  /* Only valid if !immed.  */
+
+  if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM
+      && (immed || rm != ARM_PC_REGNUM))
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load/store",
+					dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.4x%.4x\n",
+			load ? (size == 1 ? "ldrb" : (size == 2 ? "ldrh" : "ldr"))
+			: (size == 1 ? "strb" : (size == 2 ? "strh" : "str")),
+			usermode ? "t" : "",
+			rt, rn, insn1, insn2);
+
+  install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
+		      usermode, rt, rm, rn);
+
+  if (load || rt != ARM_PC_REGNUM)
+    {
+      dsc->u.ldst.restore_r4 = 0;
+
+      if (immed)
+	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
+	{
+	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+	  dsc->modinsn[1] = insn2 & 0x0fff;
+	}
+      else
+	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, r3].  */
+	{
+	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+	  dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
+	}
+
+      dsc->numinsns = 2;
+    }
+  else
+    {
+      /* In Thumb-32 instructions, the behavior is unpredictable when Rt is
+	 PC, while the behavior is undefined when Rn is PC.  Shortly, neither
+	 Rt nor Rn can be PC.  */
+
+      gdb_assert (0);
+    }
+
+  return 0;
+}
+
+
+static int
 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 			    struct regcache *regs,
 			    struct displaced_step_closure *dsc,
@@ -6524,6 +6783,87 @@ arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+			struct regcache *regs,
+			struct displaced_step_closure *dsc)
+{
+  int rn = bits (insn1, 0, 3);
+  int load = bit (insn1, 4);
+  int writeback = bit (insn1, 5);
+
+  /* Block transfers which don't mention PC can be run directly
+     out-of-line.  */
+  if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
+
+  if (rn == ARM_PC_REGNUM)
+    {
+      warning (_("displaced: Unpredictable LDM or STM with "
+		 "base register r15"));
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "unpredictable ldm/stm", dsc);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
+			"%.4x%.4x\n", insn1, insn2);
+
+  /* Clear bit 13, since it should be always zero.  */
+  dsc->u.block.regmask = (insn2 & 0xdfff);
+  dsc->u.block.rn = rn;
+
+  dsc->u.block.load = bit (insn1, 4);
+  dsc->u.block.user = bit (insn1, 6);
+  dsc->u.block.increment = bit (insn1, 7);
+  dsc->u.block.before = bit (insn1, 8);
+  dsc->u.block.writeback = writeback;
+  dsc->u.block.cond = INST_AL;
+
+  if (load)
+    {
+      if (dsc->u.block.regmask == 0xffff)
+	{
+	  /* This branch is impossible to happen.  */
+	  gdb_assert (0);
+	}
+      else
+	{
+	  unsigned int regmask = dsc->u.block.regmask;
+	  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
+	  unsigned int to = 0, from = 0, i, new_rn;
+
+	  for (i = 0; i < num_in_list; i++)
+	    dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
+
+	  if (writeback)
+	    insn1 &= ~(1 << 5);
+
+	  new_regmask = (1 << num_in_list) - 1;
+
+	  if (debug_displaced)
+	    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
+				"{..., pc}: original reg list %.4x, modified "
+				"list %.4x\n"), rn, writeback ? "!" : "",
+				(int) dsc->u.block.regmask, new_regmask);
+
+	  dsc->modinsn[0] = insn1;
+	  dsc->modinsn[1] = (new_regmask & 0xffff);
+	  dsc->numinsns = 2;
+
+	  dsc->cleanup = &cleanup_block_load_pc;
+	}
+    }
+  else
+    {
+      dsc->modinsn[0] = insn1;
+      dsc->modinsn[1] = insn2;
+      dsc->numinsns = 2;
+      dsc->cleanup = &cleanup_block_store_pc;
+    }
+  return 0;
+}
+
 /* Cleanup/copy SVC (SWI) instructions.  These two functions are overridden
    for Linux, where some SVC instructions must be treated specially.  */
 
@@ -6609,6 +6949,23 @@ arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+                       struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
+                       "%.4x %.4x\n", (unsigned short) insn1,
+                       (unsigned short) insn2);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy unpredictable instructions.  */
 
 static int
@@ -6624,6 +6981,23 @@ arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_32bit_copy_unpred (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredicatable insn "
+			"%.4x %.4x\n", (unsigned short) insn1,
+			(unsigned short) insn2);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* The decode_* functions are instruction decoding helpers.  They mostly follow
    the presentation in the ARM ARM.  */
 
@@ -7005,6 +7379,91 @@ arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
   return 1;
 }
 
+/* Decode shifted register instructions.  */
+
+static int
+thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
+			    uint16_t insn2,  struct regcache *regs,
+			    struct displaced_step_closure *dsc)
+{
+  /* Data processing (shift register) instructions can be grouped according to
+     their encondings:
+
+     1. Insn X Rn :inst1,3-0 Rd: insn2,8-11, Rm: insn2,3-0. Rd=15 & S=1, Insn Y.
+     Rn != PC, Rm ! = PC.
+     X: AND, Y: TST (REG)
+     X: EOR, Y: TEQ (REG)
+     X: ADD, Y: CMN (REG)
+     X: SUB, Y: CMP (REG)
+
+     2. Insn X Rn : ins1,3-0, Rm: insn2, 3-0; Rm! = PC, Rn != PC
+     Insn X: TST, TEQ, PKH, CMN, and CMP.
+
+     3. Insn X Rn:inst1,3-0 Rd:insn2,8-11, Rm:insn2, 3-0. Rn != PC, Rd != PC,
+     Rm != PC.
+     X: BIC, ADC, SBC, and RSB.
+
+     4. Insn X Rn:inst1,3-0 Rd:insn2,8-11, Rm:insn2,3-0.  Rd = 15, Insn Y.
+     X: ORR, Y: MOV (REG).
+     X: ORN, Y: MVN (REG).
+
+     5.  Insn X Rd: insn2, 8-11, Rm: insn2, 3-0.
+     X: MVN, Rd != PC, Rm != PC
+     X: MOV: Rd/Rm can be PC.
+
+     PC is only allowed to be used in instruction MOV.
+*/
+
+  unsigned int op = bits (insn1, 5, 8);
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (op == 0x2 && rn == 0xf) /* MOV */
+    return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
+  else
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					"dp (shift reg)", dsc);
+}
+
+
+/* Decode extension register load/store.  Exactly the same as
+   arm_decode_ext_reg_ld_st.  */
+
+static int
+thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
+			     uint16_t insn2,  struct regcache *regs,
+			     struct displaced_step_closure *dsc)
+{
+  unsigned int opcode = bits (insn1, 4, 8);
+
+  switch (opcode)
+    {
+    case 0x04: case 0x05:
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vmov", dsc);
+
+    case 0x08: case 0x0c: /* 01x00 */
+    case 0x0a: case 0x0e: /* 01x10 */
+    case 0x12: case 0x16: /* 10x10 */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vstm/vpush", dsc);
+
+    case 0x09: case 0x0d: /* 01x01 */
+    case 0x0b: case 0x0f: /* 01x11 */
+    case 0x13: case 0x17: /* 10x11 */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vldm/vpop", dsc);
+
+    case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vstr", dsc);
+    case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
+      return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
+    }
+
+  /* Should be unreachable.  */
+  return 1;
+}
+
 static int
 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 		      struct regcache *regs, struct displaced_step_closure *dsc)
@@ -7051,6 +7510,49 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
     return arm_copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
 
+static int
+thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  unsigned int coproc = bits (insn2, 8, 11);
+  unsigned int op1 = bits (insn1, 4, 9);
+  unsigned int bit_5_8 = bits (insn1, 5, 8);
+  unsigned int bit_9 = bit (insn1, 9);
+  unsigned int bit_4 = bit (insn1, 4);
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (bit_9 == 0)
+    {
+      if (bit_5_8 == 2)
+	return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					    "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
+					    dsc);
+      else if (bit_5_8 == 0) /* UNDEFINED.  */
+	return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+      else
+	{
+	   /*coproc is 101x.  SIMD/VFP, ext registers load/store.  */
+	  if ((coproc & 0xe) == 0xa)
+	    return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
+						dsc);
+	  else /* coproc is not 101x.  */
+	    {
+	      if (bit_4 == 0) /* STC/STC2.  */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "stc/stc2", dsc);
+	      else /* LDC/LDC2 {literal, immeidate}.  */
+		return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+						     regs, dsc);
+	    }
+	}
+    }
+  else
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
+
+  return 0;
+}
+
 static void
 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
 		     struct displaced_step_closure *dsc, int rd)
@@ -7100,6 +7602,35 @@ thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
 }
 
 static int
+thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+			      uint16_t insn2, struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn2, 8, 11);
+  /* Since immeidate has the same encoding in both ADR and ADD, so we simply
+     extract raw immediate encoding rather than computing immediate.  When
+     generating ADD instruction, we can simply perform OR operation to set
+     immediate into ADD.  */
+  unsigned int imm_3_8 = insn2 & 0x70ff;
+  unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10.  */
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
+			rd, imm_i, imm_3_8, insn1, insn2);
+
+  /* Encoding T3: ADD Rd, Rd, #imm */
+  dsc->modinsn[0] = (0xf100 | rd | imm_i);
+  dsc->modinsn[1] = ((rd << 8) | imm_3_8);
+
+  dsc->numinsns = 2;
+
+  install_pc_relative (gdbarch, regs, dsc, rd);
+
+  return 0;
+}
+
+static int
 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
 			      struct regcache *regs,
 			      struct displaced_step_closure *dsc)
@@ -7181,6 +7712,51 @@ thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
   return 0;
 }
 
+/* Copy Table Brach Byte/Halfword */
+static int
+thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
+			  uint16_t insn2, struct regcache *regs,
+			  struct displaced_step_closure *dsc)
+{
+  ULONGEST rn_val, rm_val;
+  int is_tbh = bit (insn2, 4);
+  CORE_ADDR halfwords = 0;
+  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+
+  rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
+  rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
+
+  if (is_tbh)
+    {
+      gdb_byte buf[2];
+
+      target_read_memory (rn_val + 2 * rm_val, buf, 2);
+      halfwords = extract_unsigned_integer (buf, 2, byte_order);
+    }
+  else
+    {
+      gdb_byte buf[1];
+
+      target_read_memory (rn_val + rm_val, buf, 1);
+      halfwords = extract_unsigned_integer (buf, 1, byte_order);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
+			" offset 0x%x\n", is_tbh ? "tbh" : "tbb",
+			(unsigned int) rn_val, (unsigned int) rm_val,
+			(unsigned int) halfwords);
+
+  dsc->u.branch.cond = INST_AL;
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+  dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+
 static void
 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
 			  struct displaced_step_closure *dsc)
@@ -7374,12 +7950,274 @@ thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
 		    _("thumb_process_displaced_16bit_insn: Instruction decode error"));
 }
 
+static int
+decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
+				 uint16_t insn1, uint16_t insn2,
+				 struct regcache *regs,
+				 struct displaced_step_closure *dsc)
+{
+  int rt = bits (insn2, 12, 15);
+  int rn = bits (insn1, 0, 3);
+  int op1 = bits (insn1, 7, 8);
+  int user_mode = (bits (insn2, 8, 11) == 0xe);
+  int err = 0;
+  int writeback = 0;
+
+  switch (bits (insn1, 5, 6))
+    {
+    case 0: /* Load byte and memory hints */
+      if (rt == 0xf) /* PLD/PLI */
+	{
+	  if (rn == 0xf)
+	    {
+	      /* PLD literal or Encoding T3 of PLI(immediate, literal).  */
+	      return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+	    }
+	  else
+	    {
+	      switch (op1)
+		{
+		case 0: case 2:
+		  if (bits (insn2, 8, 11) == 0x1110
+		      || (bits (insn2, 8, 11) & 0x6) == 0x9)
+		    return thumb_32bit_copy_unpred (gdbarch, insn1, insn2, dsc);
+		  else
+		    /* PLI/PLD (reigster, immediate) doesn't use PC.  */
+		    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+							"pli/pld", dsc);
+		  break;
+		case 1: /* PLD/PLDW (immediate) */
+		case 3: /* PLI (immediate, literal) */
+		  return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						      "pli/pld", dsc);
+		  break;
+
+		}
+	    }
+	}
+      else
+	{
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	  return thumb2_copy_load_store (gdbarch, insn1, insn2, regs, dsc, 1, 1,
+					 user_mode, writeback);
+	}
+
+      break;
+    case 1: /* Load halfword and memory hints.  */
+      if (rt == 0xf) /* PLD{W} and Unalloc memory hint.  */
+	{
+	  if (rn == 0xf)
+	    {
+	      if (op1 == 0 || op1 == 1)
+		return thumb_32bit_copy_unpred (gdbarch, insn1, insn2, dsc);
+	      else
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "unalloc memhint", dsc);
+	    }
+	  else
+	    {
+	      if ((op1 == 0 || op1 == 2)
+		  && (bits (insn2, 8, 11) == 0xe
+		      || ((bits (insn2, 8, 11) & 0x9) == 0x9)))
+		return thumb_32bit_copy_unpred (gdbarch, insn1, insn2, dsc);
+	      else thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						"pld/unalloc memhint", dsc);
+	    }
+	}
+      else
+	{
+	  int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+	  return thumb2_copy_load_store (gdbarch, insn1, insn2, regs, dsc, 1,
+					 2, user_mode, writeback);
+	}
+      break;
+    case 2: /* Load word */
+      {
+	int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	return thumb2_copy_load_store (gdbarch, insn1, insn2, regs, dsc, 1, 4,
+				       user_mode, writeback);
+	break;
+      }
+    default:
+      return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+      break;
+    }
+  return 0;
+}
+
+
+static int
+decode_thumb_32bit_store_single_data_item (struct gdbarch *gdbarch,
+					   uint16_t insn1, uint16_t insn2,
+					   struct regcache *regs,
+					   struct displaced_step_closure *dsc)
+{
+  int user_mode = (bits (insn2, 8, 11) == 0xe);
+  int size = 0;
+  int writeback = 0;
+  int op1 = bits (insn1, 5, 7);
+
+  switch (op1)
+    {
+    case 0: case 4: size = 1; break;
+    case 1: case 5: size = 2; break;
+    case 2: case 6: size = 4; break;
+    }
+  if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
+    writeback = bit (insn2, 8);
+
+  return thumb2_copy_load_store (gdbarch, insn1, insn2, regs,
+				 dsc, 0, size, user_mode,
+				 writeback);
+
+}
+
 static void
 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
 				    uint16_t insn2, struct regcache *regs,
 				    struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+  int err = 0;
+  unsigned short op = bit (insn2, 15);
+  unsigned int op1 = bits (insn1, 11, 12);
+
+  switch (op1)
+    {
+    case 1:
+      {
+	switch (bits (insn1, 9, 10))
+	  {
+	  case 0:
+	    if (bit (insn1, 6))
+	      {
+		/* Load/store {dual, execlusive}, table branch.  */
+		if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
+		    && bits (insn2, 5, 7) == 0)
+		  err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
+						  dsc);
+		else
+		  /* PC is not allowed to use in load/store {dual, exclusive}
+		     instructions.  */
+		  err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						     "load/store dual/ex", dsc);
+	      }
+	    else /* load/store multiple */
+	      {
+		switch (bits (insn1, 7, 8))
+		  {
+		  case 0: case 3: /* SRS, RFE */
+		    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						       "srs/rfe", dsc);
+		    break;
+		  case 1: case 2: /* LDM/STM/PUSH/POP */
+		    err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
+		    break;
+		  }
+	      }
+	    break;
+
+	  case 1:
+	    /* Data-processing (shift register).  */
+	    err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
+					      dsc);
+	    break;
+	  default: /* Coprocessor instructions.  */
+	    /* Thumb 32bit coprocessor instructions have the same encoding
+	       as ARM's.  */
+	    err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	    break;
+	  }
+      break;
+      }
+    case 2: /* op1 = 2 */
+      if (op) /* Branch and misc control.  */
+	{
+	  if (bit (insn2, 14)) /* BLX/BL */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else if (!bits (insn2, 12, 14) && bits (insn1, 7, 9) != 0x7)
+	    /* Conditional Branch */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					       "misc ctrl", dsc);
+	}
+      else
+	{
+	  if (bit (insn1, 9)) /* Data processing (plain binary imm).  */
+	    {
+	      int op = bits (insn1, 4, 8);
+	      int rn = bits (insn1, 0, 4);
+	      if ((op == 0 || op == 0xa) && rn == 0xf)
+		err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
+						    regs, dsc);
+	      else
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "dp/pb", dsc);
+	    }
+	  else /* Data processing (modified immeidate) */
+	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					       "dp/mi", dsc);
+	}
+      break;
+    case 3: /* op1 = 3 */
+      switch (bits (insn1, 9, 10))
+	{
+	case 0:
+	  if ((bits (insn1, 4, 6) & 0x5) == 0x1)
+	    err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
+						   regs, dsc);
+	  else
+	    {
+	      if (bit (insn1, 8)) /* NEON Load/Store */
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "neon elt/struct load/store",
+						   dsc);
+	      else /* Store single data item */
+		err = decode_thumb_32bit_store_single_data_item (gdbarch,
+								 insn1, insn2,
+								 regs, dsc);
+
+	    }
+	  break;
+	case 1: /* op1 = 3, bits (9, 10) == 1 */
+	  switch (bits (insn1, 7, 8))
+	    {
+	    case 0: case 1: /* Data processing (register) */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "dp(reg)", dsc);
+	      break;
+	    case 2: /* Multiply and absolute difference */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "mul/mua/diff", dsc);
+	      break;
+	    case 3: /* Long multiply and divide */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "lmul/lmua", dsc);
+	      break;
+	    }
+	  break;
+	default: /* Coprocessor instructions */
+	  err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	  break;
+	}
+      break;
+    default:
+      err = 1;
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_32bit_insn: Instruction decode error"));
+
 }
 
 static void
-- 
1.7.0.4


^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-07-06 10:55         ` Yao Qi
@ 2011-07-15 19:57           ` Ulrich Weigand
  2011-07-18  9:26             ` Yao Qi
  0 siblings, 1 reply; 66+ messages in thread
From: Ulrich Weigand @ 2011-07-15 19:57 UTC (permalink / raw)
  To: Yao Qi; +Cc: gdb-patches

Hi Yao,

I just sent a review of your latest patch, but it doesn't show up on
gdb-patches ...  Did I just mess up CC, or did you not get it at all?

Thanks,
Ulrich

-- 
  Dr. Ulrich Weigand
  GNU Toolchain for Linux on System z and Cell BE
  Ulrich.Weigand@de.ibm.com

^ permalink raw reply	[flat|nested] 66+ messages in thread

* Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
  2011-07-15 19:57           ` Ulrich Weigand
@ 2011-07-18  9:26             ` Yao Qi
  0 siblings, 0 replies; 66+ messages in thread
From: Yao Qi @ 2011-07-18  9:26 UTC (permalink / raw)
  To: Ulrich Weigand; +Cc: gdb-patches

On 07/16/2011 02:56 AM, Ulrich Weigand wrote:
> Hi Yao,
> 
> I just sent a review of your latest patch, but it doesn't show up on
> gdb-patches ...  Did I just mess up CC, or did you not get it at all?
> 

I got you review mail, but gdb-patches@ was not copied.  I'll reply to
that mail and copy gdb-patches@.  Again, thanks for your careful and
patient review.

-- 
Yao (齐尧)

^ permalink raw reply	[flat|nested] 66+ messages in thread

end of thread, other threads:[~2011-07-18  4:05 UTC | newest]

Thread overview: 66+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-12-25 14:17 [patch 0/3] Displaced stepping for 16-bit Thumb instructions Yao Qi
2010-12-25 14:22 ` [patch 1/3] " Yao Qi
2011-02-17 19:09   ` Ulrich Weigand
2010-12-25 17:09 ` [patch 2/3] " Yao Qi
2011-02-17 19:46   ` Ulrich Weigand
2011-02-18  6:33     ` Yao Qi
2011-02-18 12:18       ` Ulrich Weigand
2011-02-21  7:41         ` Yao Qi
2011-02-21 20:14           ` Ulrich Weigand
2011-02-25 18:09             ` Yao Qi
2011-02-25 20:17               ` Ulrich Weigand
2011-02-26 14:07                 ` Yao Qi
2011-02-28 17:37                   ` Ulrich Weigand
2011-03-01  9:01                     ` Yao Qi
2011-03-01 16:11                       ` Ulrich Weigand
2010-12-25 17:54 ` [patch 3/3] " Yao Qi
2010-12-27 15:15   ` Yao Qi
2011-02-17 20:55   ` Ulrich Weigand
2011-02-18  7:30     ` Yao Qi
2011-02-18 13:25       ` Ulrich Weigand
2011-02-28  2:04     ` Displaced stepping 0003: " Yao Qi
2010-12-29  5:48 ` [patch 0/3] Displaced stepping " Yao Qi
2011-01-13 12:38 ` Yao Qi
2011-02-10  6:48 ` Ping 2 " Yao Qi
2011-02-26 17:50 ` Displaced stepping 0002: refactor and create some copy helpers Yao Qi
2011-02-28 17:53   ` Ulrich Weigand
2011-02-28  2:15 ` Displaced stepping 0004: wip: 32-bit Thumb instructions Yao Qi
2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
2011-03-24 13:56   ` [try 2nd 1/8] Fix cleanup_branch to take Thumb into account Yao Qi
2011-04-06 20:46     ` Ulrich Weigand
2011-04-07  3:45       ` Yao Qi
2011-03-24 13:58   ` [try 2nd 2/8] Rename copy_* functions to arm_copy_* Yao Qi
2011-04-06 20:51     ` Ulrich Weigand
2011-04-07  8:02       ` Yao Qi
2011-04-19  9:07         ` Yao Qi
2011-04-26 17:09         ` Ulrich Weigand
2011-04-27 10:27           ` Yao Qi
2011-04-27 13:32             ` Ulrich Weigand
2011-04-28  5:05               ` Yao Qi
2011-03-24 14:01   ` [try 2nd 3/8] Refactor copy_svc_os Yao Qi
2011-04-06 20:55     ` Ulrich Weigand
2011-04-07  4:19       ` Yao Qi
2011-03-24 14:05   ` [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn Yao Qi
2011-05-05 13:24     ` Yao Qi
2011-05-10 13:58       ` Ulrich Weigand
2011-05-11 13:06         ` Yao Qi
2011-05-16 17:19           ` Ulrich Weigand
2011-05-17 14:29             ` Yao Qi
2011-05-17 17:20               ` Ulrich Weigand
2011-03-24 14:05   ` [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns Yao Qi
2011-05-05 13:25     ` Yao Qi
2011-05-17 17:14       ` Ulrich Weigand
2011-05-23 11:32         ` Yao Qi
2011-05-23 11:32         ` Yao Qi
2011-05-27 22:11           ` Ulrich Weigand
2011-07-06 10:55         ` Yao Qi
2011-07-15 19:57           ` Ulrich Weigand
2011-07-18  9:26             ` Yao Qi
2011-03-24 14:06   ` [try 2nd 6/8] Rename some functions to arm_* Yao Qi
2011-04-06 20:52     ` Ulrich Weigand
2011-04-07  4:26       ` Yao Qi
2011-03-24 14:11   ` [try 2nd 7/8] Test case Yao Qi
2011-05-05 13:26     ` Yao Qi
2011-05-11 13:15       ` [try 2nd 7/8] Test case: V3 Yao Qi
2011-05-17 17:24         ` Ulrich Weigand
2011-03-24 15:14   ` [try 2nd 8/8] NEWS Yao Qi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).