public inbox for systemtap@sourceware.org
 help / color / mirror / Atom feed
* [PATCH]kprobe booster for IA64
@ 2006-06-06  6:35 Masami Hiramatsu
  2006-06-07  8:17 ` bibo,mao
  2006-06-29  3:16 ` Keshavamurthy Anil S
  0 siblings, 2 replies; 13+ messages in thread
From: Masami Hiramatsu @ 2006-06-06  6:35 UTC (permalink / raw)
  To: Keshavamurthy, Anil S, Ananth N Mavinakayanahalli, SystemTAP
  Cc: Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

[-- Attachment #1: Type: text/plain, Size: 7628 bytes --]

Hi, Anil and Ananth

I ported the kprobe-booster to the IA64 architecture.
This patch can be applied against 2.6.17-rc5-mm3.
And here is the patch. Could you review it?

This patch modifies kprobe as below;
- Boost if the target bundle don't use B and X unit.
- Introduce INST_FLAG_BOOSTABLE value for ainsn.insn_flag.
  If this flag is set, the kprobe is boostable.
- Change instruction buffer(ainsn.insn) to an array of
  bundles which has three elements. The 2nd element and the
  3rd element of this array are used for dynamic execution.

And this patch is Lindent clean ;)

I measured the overhead of the booster by using
the benchmark kernel module attached to this mail.

noprobe: 436 machine cycles
noboost: 1162 machine cycles
boosted: 829 machine cycles

CPU spec: Itanium2 1.3GHz (2CPUs)

-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: hiramatu@sdl.hitachi.co.jp

 arch/ia64/kernel/kprobes.c |   84 +++++++++++++++++++++++++++++++++++++++++----
 include/asm-ia64/kprobes.h |    9 +++-
 2 files changed, 85 insertions(+), 8 deletions(-)
diff --exclude=CVS -Narup a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
--- a/arch/ia64/kernel/kprobes.c	2006-06-05 13:02:54.000000000 +0900
+++ b/arch/ia64/kernel/kprobes.c	2006-06-05 13:34:07.000000000 +0900
@@ -78,6 +78,35 @@ static enum instruction_type bundle_enco
 };

 /*
+ * In this function, we check whether the target bundle is possible
+ * to modify IP.
+ */
+static __always_inline int can_boost(uint template)
+{
+	template &= 0x1e;
+	if (template >= 0x10 ||	/* including B unit */
+	    template == 0x04 ||	/* including X unit */
+	    template == 0x06) {	/* undefined */
+		return 0;
+	}
+	return 1;
+}
+
+/* Insert a long branch code */
+static __always_inline void set_brl_inst(void *from, void *to)
+{
+	s64 rel = ((s64) to - (s64) from) >> 4;
+	bundle_t *brl;
+	brl = (bundle_t *) ((u64) from & ~0xf);
+	brl->quad0.template = 0x05;	/* [MLX](stop) */
+	brl->quad0.slot0 = NOP_M_INST;	/* nop.m 0x0 */
+	brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2;
+	brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46);
+	/* brl.cond.sptk.many.clr rel<<4 (qp=0) */
+	brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff);
+}
+
+/*
  * In this function we check to see if the instruction
  * is IP relative instruction and update the kprobe
  * inst flag accordingly
@@ -125,6 +154,10 @@ static void __kprobes update_kprobe_inst
 		  break;
 		}
 	}
+
+	if (can_boost(template)) {
+		p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
+	}
 	return;
 }

@@ -218,7 +251,7 @@ static void __kprobes prepare_break_inst
 					 struct kprobe *p)
 {
 	unsigned long break_inst = BREAK_INST;
-	bundle_t *bundle = &p->ainsn.insn.bundle;
+	bundle_t *bundle = &p->ainsn.insn[0].bundle;

 	/*
 	 * Copy the original kprobe_inst qualifying predicate(qp)
@@ -249,6 +282,14 @@ static void __kprobes prepare_break_inst
 	 * single step on original instruction
 	 */
 	update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
+
+	/* If the bundle can be boosted, prepare boost bundles */
+	if (p->ainsn.inst_flag & INST_FLAG_BOOSTABLE) {
+		memcpy(&p->ainsn.insn[1].bundle, &p->opcode.bundle,
+		       sizeof(bundle_t));
+		set_brl_inst(&p->ainsn.insn[2].bundle,
+			     (bundle_t *) p->addr + 1);
+	}
 }

 static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
@@ -424,10 +465,10 @@ int __kprobes arch_prepare_kprobe(struct
 	unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
 	unsigned long kprobe_inst=0;
 	unsigned int slot = addr & 0xf, template, major_opcode = 0;
-	bundle_t *bundle = &p->ainsn.insn.bundle;
+	bundle_t *bundle = &p->ainsn.insn[0].bundle;

 	memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
-	memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
+	memcpy(&p->ainsn.insn[0].bundle, kprobe_addr, sizeof(bundle_t));

  	template = bundle->quad0.template;

@@ -454,7 +495,7 @@ void __kprobes arch_arm_kprobe(struct kp
 	unsigned long addr = (unsigned long)p->addr;
 	unsigned long arm_addr = addr & ~0xFULL;

-	memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
+	memcpy((char *)arm_addr, &p->ainsn.insn[0].bundle, sizeof(bundle_t));
 	flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
 }

@@ -471,7 +512,7 @@ void __kprobes arch_disarm_kprobe(struct
 /*
  * We are resuming execution after a single step fault, so the pt_regs
  * structure reflects the register state after we executed the instruction
- * located in the kprobe (p->ainsn.insn.bundle).  We still need to adjust
+ * located in the kprobe (p->ainsn.insn[0].bundle).  We still need to adjust
  * the ip to point back to the original stack address. To set the IP address
  * to original stack address, handle the case where we need to fixup the
  * relative IP address and/or fixup branch register.
@@ -488,7 +529,7 @@ static void __kprobes resume_execution(s
  	if (slot == 1 && bundle_encoding[template][1] == L)
  		slot = 2;

-	if (p->ainsn.inst_flag) {
+	if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {

 		if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
 			/* Fix relative IP address */
@@ -563,6 +604,24 @@ static void __kprobes prepare_ss(struct
 	ia64_psr(regs)->ss = 1;
 }

+/* prepare to execute directly */
+static void __kprobes prepare_boost(struct kprobe *p, struct pt_regs *regs)
+{
+	unsigned long slot = (unsigned long)p->addr & 0xf;
+
+	regs->cr_iip = (unsigned long)&p->ainsn.insn[1].bundle & ~0xFULL;
+
+	if (slot > 2)
+		slot = 0;
+
+	ia64_psr(regs)->ri = slot;
+
+	/* turn off single stepping */
+	ia64_psr(regs)->ss = 0;
+
+	reset_current_kprobe();
+}
+
 static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
 {
 	unsigned int slot = ia64_psr(regs)->ri;
@@ -602,6 +661,11 @@ static int __kprobes pre_kprobes_handler
 	struct pt_regs *regs = args->regs;
 	kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
 	struct kprobe_ctlblk *kcb;
+#ifdef CONFIG_PREEMPT
+	unsigned pre_preempt_count = preempt_count();
+#else
+	unsigned pre_preempt_count = 1;
+#endif

 	/*
 	 * We don't want to be preempted for the entire
@@ -681,6 +745,14 @@ static int __kprobes pre_kprobes_handler
 		 */
 		return 1;

+	if (pre_preempt_count && p->ainsn.inst_flag == INST_FLAG_BOOSTABLE &&
+	    !p->post_handler) {
+		/* Boost up -- we can execute copied instructions directly */
+		prepare_boost(p, regs);
+		preempt_enable_no_resched();
+		return 1;
+	}
+
 ss_probe:
 	prepare_ss(p, regs);
 	kcb->kprobe_status = KPROBE_HIT_SS;
diff --exclude=CVS -Narup a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
--- a/include/asm-ia64/kprobes.h	2006-06-05 13:03:07.000000000 +0900
+++ b/include/asm-ia64/kprobes.h	2006-06-05 13:34:07.000000000 +0900
@@ -29,8 +29,12 @@
 #include <linux/percpu.h>
 #include <asm/break.h>

-#define MAX_INSN_SIZE   16
+#define MAX_INSN_SIZE   3	/* 3 bundles */
 #define BREAK_INST	(long)(__IA64_BREAK_KPROBE << 6)
+#define NOP_M_INST	(long)(1<<27)
+#define BRL_INST(i1,i2) (long)((0xcL << 37) |	/* brl */  \
+			       (1L << 12) |	/* many */ \
+			       (((i1) & 1) << 36) | ((i2) << 13))	/* imm */

 typedef union cmp_inst {
 	struct {
@@ -108,10 +112,11 @@ struct fnptr {
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
 	/* copy of the instruction to be emulated */
-	kprobe_opcode_t insn;
+	kprobe_opcode_t insn[3];
  #define INST_FLAG_FIX_RELATIVE_IP_ADDR		1
  #define INST_FLAG_FIX_BRANCH_REG		2
  #define INST_FLAG_BREAK_INST			4
+ #define INST_FLAG_BOOSTABLE			8
  	unsigned long inst_flag;
  	unsigned short target_br_reg;
 };


[-- Attachment #2: probe_bench.c --]
[-- Type: text/plain, Size: 1341 bytes --]

/*
 * boost probe bench 
 * Copyright (c) 2006 Hitachi,Ltd.,
 * Created by Masami Hiramatsu<hiramatu@sdl.hitachi.co.jp>
 */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/kprobes.h>

MODULE_AUTHOR("M.Hiramatsu");
MODULE_LICENSE("GPL");

int dummy_function(int n)
{
	int k,l=0;
	for (k=1; k<n; k++) {
		l += (k + 1) / k;
	}
	return l;
}

static int probe_handler (struct kprobe * kp,
			  struct pt_regs * regs)
{
	return 0;
}

#include <linux/time.h>
#define CALLB 14
#define CALLN (1<<CALLB)

static int bench_probe(void) 
{
	int i;
	cycles_t c = get_cycles();
	for (i=0; i<CALLN; i++) {
		dummy_function(10);
	}
	return (get_cycles() - c)>>CALLB;
}

static struct kprobe kp;

static int install_probe(void) 
{
	int ret = -10000;
	
	kp.pre_handler = probe_handler;
	kp.addr = *(void **)dummy_function;

	printk("noprobe: %d machine cycles\n", bench_probe());
	ret = register_kprobe(&kp);
	if (ret != 0) {
		printk("probe install error: %d\n",ret);
		return -EINVAL;
	}
	printk("boosted: %d machine cycles \n", bench_probe());
	kp.ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE;
	printk("noboost: %d machine cycles\n", bench_probe());
	
	unregister_kprobe(&kp);
	return -1;
}

static void uninstall_probe(void)
{
	printk("module removed\n");
}

module_init(install_probe);
module_exit(uninstall_probe);

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH]kprobe booster for IA64
  2006-06-06  6:35 [PATCH]kprobe booster for IA64 Masami Hiramatsu
@ 2006-06-07  8:17 ` bibo,mao
  2006-06-08 14:31   ` Masami Hiramatsu
  2006-06-29  3:16 ` Keshavamurthy Anil S
  1 sibling, 1 reply; 13+ messages in thread
From: bibo,mao @ 2006-06-07  8:17 UTC (permalink / raw)
  To: Masami Hiramatsu
  Cc: Keshavamurthy, Anil S, Ananth N Mavinakayanahalli, SystemTAP,
	Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

hiramatu san,
If the probed instruction can cause one fault, there may be some problem.
Because original instruction is copied into 2nd element of ainsn.insn, 
instruction execution address is different, search_exception_tables result
will be different also.

Thanks
bibo,mao

Masami Hiramatsu wrote:
> Hi, Anil and Ananth
> 
> I ported the kprobe-booster to the IA64 architecture.
> This patch can be applied against 2.6.17-rc5-mm3.
> And here is the patch. Could you review it?
> 
> This patch modifies kprobe as below;
> - Boost if the target bundle don't use B and X unit.
> - Introduce INST_FLAG_BOOSTABLE value for ainsn.insn_flag.
>   If this flag is set, the kprobe is boostable.
> - Change instruction buffer(ainsn.insn) to an array of
>   bundles which has three elements. The 2nd element and the
>   3rd element of this array are used for dynamic execution.
> 
> And this patch is Lindent clean ;)
> 
> I measured the overhead of the booster by using
> the benchmark kernel module attached to this mail.
> 
> noprobe: 436 machine cycles
> noboost: 1162 machine cycles
> boosted: 829 machine cycles
> 
> CPU spec: Itanium2 1.3GHz (2CPUs)
> 
> 
> 
> ------------------------------------------------------------------------
> 
> /*
>  * boost probe bench 
>  * Copyright (c) 2006 Hitachi,Ltd.,
>  * Created by Masami Hiramatsu<hiramatu@sdl.hitachi.co.jp>
>  */
> #include <linux/module.h>
> #include <linux/kernel.h>
> #include <linux/init.h>
> #include <linux/kprobes.h>
> 
> MODULE_AUTHOR("M.Hiramatsu");
> MODULE_LICENSE("GPL");
> 
> int dummy_function(int n)
> {
> 	int k,l=0;
> 	for (k=1; k<n; k++) {
> 		l += (k + 1) / k;
> 	}
> 	return l;
> }
> 
> static int probe_handler (struct kprobe * kp,
> 			  struct pt_regs * regs)
> {
> 	return 0;
> }
> 
> #include <linux/time.h>
> #define CALLB 14
> #define CALLN (1<<CALLB)
> 
> static int bench_probe(void) 
> {
> 	int i;
> 	cycles_t c = get_cycles();
> 	for (i=0; i<CALLN; i++) {
> 		dummy_function(10);
> 	}
> 	return (get_cycles() - c)>>CALLB;
> }
> 
> static struct kprobe kp;
> 
> static int install_probe(void) 
> {
> 	int ret = -10000;
> 	
> 	kp.pre_handler = probe_handler;
> 	kp.addr = *(void **)dummy_function;
> 
> 	printk("noprobe: %d machine cycles\n", bench_probe());
> 	ret = register_kprobe(&kp);
> 	if (ret != 0) {
> 		printk("probe install error: %d\n",ret);
> 		return -EINVAL;
> 	}
> 	printk("boosted: %d machine cycles \n", bench_probe());
> 	kp.ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE;
> 	printk("noboost: %d machine cycles\n", bench_probe());
> 	
> 	unregister_kprobe(&kp);
> 	return -1;
> }
> 
> static void uninstall_probe(void)
> {
> 	printk("module removed\n");
> }
> 
> module_init(install_probe);
> module_exit(uninstall_probe);

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH]kprobe booster for IA64
  2006-06-07  8:17 ` bibo,mao
@ 2006-06-08 14:31   ` Masami Hiramatsu
  2006-06-09  3:03     ` bibo,mao
  0 siblings, 1 reply; 13+ messages in thread
From: Masami Hiramatsu @ 2006-06-08 14:31 UTC (permalink / raw)
  To: bibo,mao
  Cc: Keshavamurthy, Anil S, Ananth N Mavinakayanahalli, SystemTAP,
	Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

Hi bibo,

bibo,mao wrote:
> hiramatu san,
> If the probed instruction can cause one fault, there may be some problem.
> Because original instruction is copied into 2nd element of ainsn.insn,
> instruction execution address is different, search_exception_tables result
> will be different also.

Thank you for your good advice.
As far as I can see, the search_exception_tables() checks whether the
page-fault address is expected to cause an exception.
If it is correct, I think the problem can be avoided by disabling
booster if the table contains the target address.
For example;

	if (can_boost(template) && !search_exception_tables(p->addr)) {
		p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
	}

What would you think about this idea?

Thanks

> 
> Thanks
> bibo,mao
> 
> Masami Hiramatsu wrote:
>> Hi, Anil and Ananth
>>
>> I ported the kprobe-booster to the IA64 architecture.
>> This patch can be applied against 2.6.17-rc5-mm3.
>> And here is the patch. Could you review it?
>>
>> This patch modifies kprobe as below;
>> - Boost if the target bundle don't use B and X unit.
>> - Introduce INST_FLAG_BOOSTABLE value for ainsn.insn_flag.
>>   If this flag is set, the kprobe is boostable.
>> - Change instruction buffer(ainsn.insn) to an array of
>>   bundles which has three elements. The 2nd element and the
>>   3rd element of this array are used for dynamic execution.
>>
>> And this patch is Lindent clean ;)
>>
>> I measured the overhead of the booster by using
>> the benchmark kernel module attached to this mail.
>>
>> noprobe: 436 machine cycles
>> noboost: 1162 machine cycles
>> boosted: 829 machine cycles
>>
>> CPU spec: Itanium2 1.3GHz (2CPUs)
>>
>>
>>
-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: hiramatu@sdl.hitachi.co.jp

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH]kprobe booster for IA64
  2006-06-08 14:31   ` Masami Hiramatsu
@ 2006-06-09  3:03     ` bibo,mao
  2006-06-15  5:46       ` Masami Hiramatsu
  0 siblings, 1 reply; 13+ messages in thread
From: bibo,mao @ 2006-06-09  3:03 UTC (permalink / raw)
  To: Masami Hiramatsu
  Cc: Keshavamurthy, Anil S, Ananth N Mavinakayanahalli, SystemTAP,
	Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

Masami Hiramatsu wrote:
> Hi bibo,
> 
> bibo,mao wrote:
>  > hiramatu san,
>  > If the probed instruction can cause one fault, there may be some problem.
>  > Because original instruction is copied into 2nd element of ainsn.insn,
>  > instruction execution address is different, search_exception_tables result
>  > will be different also.
> 
> Thank you for your good advice.
> As far as I can see, the search_exception_tables() checks whether the
> page-fault address is expected to cause an exception.
> If it is correct, I think the problem can be avoided by disabling
> booster if the table contains the target address.
> For example;
> 
>         if (can_boost(template) && !search_exception_tables(p->addr)) {
>                 p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
>         }
> 
> What would you think about this idea?
> 
> Thanks
> 
That's good for me. BTW in IA64 one bundle has three instructions, I think
that this bundle should be judged whether it is within exception table.

Thanks
bibo,mao

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH]kprobe booster for IA64
  2006-06-09  3:03     ` bibo,mao
@ 2006-06-15  5:46       ` Masami Hiramatsu
  2006-06-27  0:16         ` Masami Hiramatsu
  0 siblings, 1 reply; 13+ messages in thread
From: Masami Hiramatsu @ 2006-06-15  5:46 UTC (permalink / raw)
  To: bibo,mao
  Cc: Keshavamurthy, Anil S, Ananth N Mavinakayanahalli, SystemTAP,
	Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

Hi, bibo

bibo,mao wrote:
> That's good for me. BTW in IA64 one bundle has three instructions, I think
> that this bundle should be judged whether it is within exception table.

I updated the kprobe-booster against 2.6.17-rc6-mm2 attached
to this mail. In this patch, I checked the extable in
can_boost()function by using search_exception_tables().

Unfortunately, the search_exception_tables() in IA64 seems corrupted.
As far as I can see, It doesn't work correctly, because the lookup
routine expects that the address format of the exception_table_entry
is "IP + slot", but the compiler (gcc-3.4.5) generates it as
"IP + (slot << 2)". Thus the lookup routine always fails to find the
corresponding entry.
I already made a patch to fix this bug, and will send it to Tony Luck,
Linux-IA64 maintainer, as soon as possible.

Thanks,

-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: hiramatu@sdl.hitachi.co.jp s


 arch/ia64/kernel/kprobes.c |   90 ++++++++++++++++++++++++++++++++++++++++++---
 include/asm-ia64/kprobes.h |    9 +++-
 2 files changed, 91 insertions(+), 8 deletions(-)
diff --exclude=CVS -Narup a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
--- a/arch/ia64/kernel/kprobes.c	2006-06-13 15:04:18.000000000 +0900
+++ b/arch/ia64/kernel/kprobes.c	2006-06-14 16:15:35.000000000 +0900
@@ -78,6 +78,41 @@ static enum instruction_type bundle_enco
 };

 /*
+ * In this function, we check whether the target bundle is possible
+ * to modify IP and whether it may occur an exception.
+ */
+static __always_inline int can_boost(uint template, unsigned long addr,
+				     uint slot)
+{
+	addr &= ~0xFULL;
+	do {
+		if (search_exception_tables(addr + (++slot)))	/* we must check the next slot. */
+			return 0;	/* exception may occur in this bundle*/
+	} while (slot < 3);
+	template &= 0x1e;
+	if (template >= 0x10 ||	/* including B unit */
+	    template == 0x04 ||	/* including X unit */
+	    template == 0x06) {	/* undefined */
+		return 0;
+	}
+	return 1;
+}
+
+/* Insert a long branch code */
+static __always_inline void set_brl_inst(void *from, void *to)
+{
+	s64 rel = ((s64) to - (s64) from) >> 4;
+	bundle_t *brl;
+	brl = (bundle_t *) ((u64) from & ~0xf);
+	brl->quad0.template = 0x05;	/* [MLX](stop) */
+	brl->quad0.slot0 = NOP_M_INST;	/* nop.m 0x0 */
+	brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2;
+	brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46);
+	/* brl.cond.sptk.many.clr rel<<4 (qp=0) */
+	brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff);
+}
+
+/*
  * In this function we check to see if the instruction
  * is IP relative instruction and update the kprobe
  * inst flag accordingly
@@ -125,6 +160,10 @@ static void __kprobes update_kprobe_inst
 		  break;
 		}
 	}
+
+	if (can_boost(template, (unsigned long)p->addr, slot)) {
+		p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
+	}
 	return;
 }

@@ -218,7 +257,7 @@ static void __kprobes prepare_break_inst
 					 struct kprobe *p)
 {
 	unsigned long break_inst = BREAK_INST;
-	bundle_t *bundle = &p->ainsn.insn.bundle;
+	bundle_t *bundle = &p->ainsn.insn[0].bundle;

 	/*
 	 * Copy the original kprobe_inst qualifying predicate(qp)
@@ -249,6 +288,14 @@ static void __kprobes prepare_break_inst
 	 * single step on original instruction
 	 */
 	update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
+
+	/* If the bundle can be boosted, prepare boost bundles */
+	if (p->ainsn.inst_flag & INST_FLAG_BOOSTABLE) {
+		memcpy(&p->ainsn.insn[1].bundle, &p->opcode.bundle,
+		       sizeof(bundle_t));
+		set_brl_inst(&p->ainsn.insn[2].bundle,
+			     (bundle_t *) p->addr + 1);
+	}
 }

 static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
@@ -424,10 +471,10 @@ int __kprobes arch_prepare_kprobe(struct
 	unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
 	unsigned long kprobe_inst=0;
 	unsigned int slot = addr & 0xf, template, major_opcode = 0;
-	bundle_t *bundle = &p->ainsn.insn.bundle;
+	bundle_t *bundle = &p->ainsn.insn[0].bundle;

 	memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
-	memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
+	memcpy(&p->ainsn.insn[0].bundle, kprobe_addr, sizeof(bundle_t));

  	template = bundle->quad0.template;

@@ -454,7 +501,7 @@ void __kprobes arch_arm_kprobe(struct kp
 	unsigned long addr = (unsigned long)p->addr;
 	unsigned long arm_addr = addr & ~0xFULL;

-	memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
+	memcpy((char *)arm_addr, &p->ainsn.insn[0].bundle, sizeof(bundle_t));
 	flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
 }

@@ -471,7 +518,7 @@ void __kprobes arch_disarm_kprobe(struct
 /*
  * We are resuming execution after a single step fault, so the pt_regs
  * structure reflects the register state after we executed the instruction
- * located in the kprobe (p->ainsn.insn.bundle).  We still need to adjust
+ * located in the kprobe (p->ainsn.insn[0].bundle).  We still need to adjust
  * the ip to point back to the original stack address. To set the IP address
  * to original stack address, handle the case where we need to fixup the
  * relative IP address and/or fixup branch register.
@@ -488,7 +535,7 @@ static void __kprobes resume_execution(s
  	if (slot == 1 && bundle_encoding[template][1] == L)
  		slot = 2;

-	if (p->ainsn.inst_flag) {
+	if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {

 		if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
 			/* Fix relative IP address */
@@ -563,6 +610,24 @@ static void __kprobes prepare_ss(struct
 	ia64_psr(regs)->ss = 1;
 }

+/* prepare to execute directly */
+static void __kprobes prepare_boost(struct kprobe *p, struct pt_regs *regs)
+{
+	unsigned long slot = (unsigned long)p->addr & 0xf;
+
+	regs->cr_iip = (unsigned long)&p->ainsn.insn[1].bundle & ~0xFULL;
+
+	if (slot > 2)
+		slot = 0;
+
+	ia64_psr(regs)->ri = slot;
+
+	/* turn off single stepping */
+	ia64_psr(regs)->ss = 0;
+
+	reset_current_kprobe();
+}
+
 static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
 {
 	unsigned int slot = ia64_psr(regs)->ri;
@@ -602,6 +667,11 @@ static int __kprobes pre_kprobes_handler
 	struct pt_regs *regs = args->regs;
 	kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
 	struct kprobe_ctlblk *kcb;
+#ifdef CONFIG_PREEMPT
+	unsigned pre_preempt_count = preempt_count();
+#else
+	unsigned pre_preempt_count = 1;
+#endif

 	/*
 	 * We don't want to be preempted for the entire
@@ -681,6 +751,14 @@ static int __kprobes pre_kprobes_handler
 		 */
 		return 1;

+	if (pre_preempt_count && p->ainsn.inst_flag == INST_FLAG_BOOSTABLE &&
+	    !p->post_handler) {
+		/* Boost up -- we can execute copied instructions directly */
+		prepare_boost(p, regs);
+		preempt_enable_no_resched();
+		return 1;
+	}
+
 ss_probe:
 	prepare_ss(p, regs);
 	kcb->kprobe_status = KPROBE_HIT_SS;
diff --exclude=CVS -Narup a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
--- a/include/asm-ia64/kprobes.h	2006-06-13 15:04:36.000000000 +0900
+++ b/include/asm-ia64/kprobes.h	2006-06-14 10:12:49.000000000 +0900
@@ -29,8 +29,12 @@
 #include <linux/percpu.h>
 #include <asm/break.h>

-#define MAX_INSN_SIZE   16
+#define MAX_INSN_SIZE   3	/* 3 bundles */
 #define BREAK_INST	(long)(__IA64_BREAK_KPROBE << 6)
+#define NOP_M_INST	(long)(1<<27)
+#define BRL_INST(i1,i2) (long)((0xcL << 37) |	/* brl */  \
+			       (1L << 12) |	/* many */ \
+			       (((i1) & 1) << 36) | ((i2) << 13))	/* imm */

 typedef union cmp_inst {
 	struct {
@@ -108,10 +112,11 @@ struct fnptr {
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
 	/* copy of the instruction to be emulated */
-	kprobe_opcode_t insn;
+	kprobe_opcode_t insn[3];
  #define INST_FLAG_FIX_RELATIVE_IP_ADDR		1
  #define INST_FLAG_FIX_BRANCH_REG		2
  #define INST_FLAG_BREAK_INST			4
+ #define INST_FLAG_BOOSTABLE			8
  	unsigned long inst_flag;
  	unsigned short target_br_reg;
 };



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH]kprobe booster for IA64
  2006-06-15  5:46       ` Masami Hiramatsu
@ 2006-06-27  0:16         ` Masami Hiramatsu
  0 siblings, 0 replies; 13+ messages in thread
From: Masami Hiramatsu @ 2006-06-27  0:16 UTC (permalink / raw)
  To: Masami Hiramatsu
  Cc: bibo,mao, Keshavamurthy, Anil S, Ananth N Mavinakayanahalli,
	SystemTAP, Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

Hi,

Masami Hiramatsu wrote:
>  /*
> + * In this function, we check whether the target bundle is possible
> + * to modify IP and whether it may occur an exception.
> + */
> +static __always_inline int can_boost(uint template, unsigned long addr,
> +				     uint slot)
> +{
> +	addr &= ~0xFULL;
> +	do {
> +		if (search_exception_tables(addr + (++slot)))	/* we must check the next slot. */

I mixed up the trap with other faults. We must check the fault
probability from the target slot. So I must change it to ;

> +		if (search_exception_tables(addr + (slot++)))

Thanks,

-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: hiramatu@sdl.hitachi.co.jp


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH]kprobe booster for IA64
  2006-06-06  6:35 [PATCH]kprobe booster for IA64 Masami Hiramatsu
  2006-06-07  8:17 ` bibo,mao
@ 2006-06-29  3:16 ` Keshavamurthy Anil S
  2006-07-04 22:56   ` Masami Hiramatsu
  1 sibling, 1 reply; 13+ messages in thread
From: Keshavamurthy Anil S @ 2006-06-29  3:16 UTC (permalink / raw)
  To: Masami Hiramatsu
  Cc: Keshavamurthy, Anil S, Ananth N Mavinakayanahalli, SystemTAP,
	Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

On Tue, Jun 06, 2006 at 03:35:40PM +0900, Masami Hiramatsu wrote:

> +#ifdef CONFIG_PREEMPT
> +	unsigned pre_preempt_count = preempt_count();
> +#else
> +	unsigned pre_preempt_count = 1;
> +#endif
> 
>  	/*
>  	 * We don't want to be preempted for the entire
> @@ -681,6 +745,14 @@ static int __kprobes pre_kprobes_handler
>  		 */
>  		return 1;
> 
> +	if (pre_preempt_count && p->ainsn.inst_flag == INST_FLAG_BOOSTABLE &&
> +	    !p->post_handler) {

pre_preempt_count will always be one here, since 
notify_die()->atomic_notifier_call_chain()->read_rcu_lock()->preeempt_disable(). 
So currently you might be preparing for boosting even for 
preemptable code path. Can you verify this.

-Anil

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH]kprobe booster for IA64
  2006-06-29  3:16 ` Keshavamurthy Anil S
@ 2006-07-04 22:56   ` Masami Hiramatsu
  2006-07-12  6:22     ` Preemption-safe kprobe-booster(Re: [PATCH]kprobe booster for IA64) Masami Hiramatsu
  0 siblings, 1 reply; 13+ messages in thread
From: Masami Hiramatsu @ 2006-07-04 22:56 UTC (permalink / raw)
  To: Keshavamurthy Anil S
  Cc: Ananth N Mavinakayanahalli, SystemTAP, Yumiko Sugita,
	Satoshi Oshima, Hideo Aoki, Prasanna S Panchamukhi, Jim Keniston

Hi, Anil

Keshavamurthy Anil S wrote:
> pre_preempt_count will always be one here, since 
> notify_die()->atomic_notifier_call_chain()->read_rcu_lock()->preeempt_disable(). 
> So currently you might be preparing for boosting even for 
> preemptable code path. Can you verify this.

Thank you for the advice!
I hadn't realized it. Now, I verified it and had some ideas.
The problem comes from reusing the insn buffer, because
there might be some processes sleeping on the buffer.
So, I think we can avoid this problem as below:
First, we disable a kprobe (remove the break). Next,
wait until all preempted processes are waken up. And Last,
we release its insn buffer to reuse. Then, it will be safe.
Because there is no process slept on the buffer.

For this purpose, I'd like to use stop_machine_run().
However we can't execute it each releasing time because
it is very costly.
I think we can resolve it by using garbage collector.

I describe my idea below:
The kprobe frees its insn slot(buffer) after disable it.
Thus, when an insn slot is freed, it will be just marked as
a garbage. And when get_insn_slot() can't find any free slot,
it will call the garbage collector to try to refill free slots.
And the garbage collector uses stop_machine_run() to ensure safety.
What do you think about this idea?

Best regards,

-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: hiramatu@sdl.hitachi.co.jp
(New)E-mail: masami.hiramatsu.pt@hitachi.com (This is available from 8th, Jul)





^ permalink raw reply	[flat|nested] 13+ messages in thread

* Preemption-safe kprobe-booster(Re: [PATCH]kprobe booster for IA64)
  2006-07-04 22:56   ` Masami Hiramatsu
@ 2006-07-12  6:22     ` Masami Hiramatsu
  2006-07-12  8:09       ` Keshavamurthy Anil S
  0 siblings, 1 reply; 13+ messages in thread
From: Masami Hiramatsu @ 2006-07-12  6:22 UTC (permalink / raw)
  To: Keshavamurthy Anil S
  Cc: Ananth N Mavinakayanahalli, SystemTAP, Yumiko Sugita,
	Satoshi Oshima, Hideo Aoki, Prasanna S Panchamukhi, Jim Keniston

Hi, Anil

Masami Hiramatsu wrote:
> Hi, Anil
> 
> Keshavamurthy Anil S wrote:
>> pre_preempt_count will always be one here, since 
>> notify_die()->atomic_notifier_call_chain()->read_rcu_lock()->preeempt_disable(). 
>> So currently you might be preparing for boosting even for 
>> preemptable code path. Can you verify this.
> 
> Thank you for the advice!
> I hadn't realized it. Now, I verified it and had some ideas.
> The problem comes from reusing the insn buffer, because
> there might be some processes sleeping on the buffer.
> So, I think we can avoid this problem as below:
> First, we disable a kprobe (remove the break). Next,
> wait until all preempted processes are waken up. And Last,
> we release its insn buffer to reuse. Then, it will be safe.
> Because there is no process slept on the buffer.
> 
> For this purpose, I'd like to use stop_machine_run().

I and Oshima-san found that stop_machine_run() was not enough
to ensure safety. Instead of that, we focused on synchronize_sched().
As far as we know, this function waits until all processes
are expired. And any preempted processes can't be expired, only
the processes who are scheduled by itself are expired.
This means these processes already left from kprobe's slots.
So, after that, we can release/reuse these slots safely.

> However we can't execute it each releasing time because
> it is very costly.

synchronize_sched() is also costly.

> I think we can resolve it by using garbage collector.
> 
> I describe my idea below:
> The kprobe frees its insn slot(buffer) after disable it.
> Thus, when an insn slot is freed, it will be just marked as
> a garbage. And when get_insn_slot() can't find any free slot,
> it will call the garbage collector to try to refill free slots.

And the garbage collector uses synchronize_sched() to ensure safety.

> What do you think about this idea?
> 
> Best regards,
> 

Thanks,

-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: masami.hiramatsu.pt@hitachi.com



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: Preemption-safe kprobe-booster(Re: [PATCH]kprobe booster for IA64)
  2006-07-12  6:22     ` Preemption-safe kprobe-booster(Re: [PATCH]kprobe booster for IA64) Masami Hiramatsu
@ 2006-07-12  8:09       ` Keshavamurthy Anil S
  2006-07-13 11:25         ` Masami Hiramatsu
  0 siblings, 1 reply; 13+ messages in thread
From: Keshavamurthy Anil S @ 2006-07-12  8:09 UTC (permalink / raw)
  To: Masami Hiramatsu
  Cc: Keshavamurthy Anil S, Ananth N Mavinakayanahalli, SystemTAP,
	Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

On Wed, Jul 12, 2006 at 03:22:37PM +0900, Masami Hiramatsu wrote:
> Hi, Anil
> 
> > Thank you for the advice!
> > I hadn't realized it. Now, I verified it and had some ideas.
> > The problem comes from reusing the insn buffer, because
> > there might be some processes sleeping on the buffer.
> > So, I think we can avoid this problem as below:
> > First, we disable a kprobe (remove the break). Next,
> > wait until all preempted processes are waken up. And Last,
No, you can't wait until all preempted process are waken up.
Say for example, i have thread that is sleeping indefinitely
waiting on some signal or waiting on some inter-process message
or some thing like that. This thread will never ever get a chance to
run during the life-time(up time) of the system. Then your logic
of waiting until all process are waken up will not work.

> > For this purpose, I'd like to use stop_machine_run().
> 
> I and Oshima-san found that stop_machine_run() was not enough
> to ensure safety. Instead of that, we focused on synchronize_sched().

You are correct, stop_machine_run() will not help you here.

> As far as we know, this function waits until all processes
> are expired. And any preempted processes can't be expired, only
> the processes who are scheduled by itself are expired.
> This means these processes already left from kprobe's slots.
> So, after that, we can release/reuse these slots safely.
I am not sure about the behavior of the synchronized_sched() for
preemptable kernels. When you say this function waits until
all processes are expired, can this mean that process'es alloted time
slot expired and hence it is scheduled out. If this is true,
then it could very well happen that when we finish executing 
the original instruction in the booster buffer and just before
jumping back to original code sequence, this process time slot
can get expired and can be scheduled out and synchronize_sched() 
returns and you try to reuse the buffer :(
Please confirm the behavior of synchroniz_sched() before going
ahead and implementing the code.

-Anil

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: Preemption-safe kprobe-booster(Re: [PATCH]kprobe booster for  IA64)
  2006-07-12  8:09       ` Keshavamurthy Anil S
@ 2006-07-13 11:25         ` Masami Hiramatsu
  2006-07-14 18:41           ` Keshavamurthy Anil S
  0 siblings, 1 reply; 13+ messages in thread
From: Masami Hiramatsu @ 2006-07-13 11:25 UTC (permalink / raw)
  To: Keshavamurthy Anil S
  Cc: Ananth N Mavinakayanahalli, SystemTAP, Yumiko Sugita,
	Satoshi Oshima, Hideo Aoki, Prasanna S Panchamukhi, Jim Keniston

Hi, Anil
Thank you for precise advice.

Keshavamurthy Anil S wrote:
> On Wed, Jul 12, 2006 at 03:22:37PM +0900, Masami Hiramatsu wrote:
>> As far as we know, this function waits until all processes
>> are expired. And any preempted processes can't be expired, only
>> the processes who are scheduled by itself are expired.
>> This means these processes already left from kprobe's slots.
>> So, after that, we can release/reuse these slots safely.
> I am not sure about the behavior of the synchronized_sched() for
> preemptable kernels. When you say this function waits until
> all processes are expired, can this mean that process'es alloted time
> slot expired and hence it is scheduled out. If this is true,
> then it could very well happen that when we finish executing 
> the original instruction in the booster buffer and just before
> jumping back to original code sequence, this process time slot
> can get expired and can be scheduled out and synchronize_sched() 
> returns and you try to reuse the buffer :(
> Please confirm the behavior of synchroniz_sched() before going
> ahead and implementing the code.

I confirmed it and found that synchronize_sched() is an interface
to wait the RCU quiescent state. And the RCU quiescent state means;
- switch processes
- switch to user space
- idle state
As you say, it is not enough to ensure safety of kprobe-booster.

Now, we are searching for other good solutions against this problem.

Anyway, I'd like to disable booster when CONFIG_PREEMPT is defined
until this problem is solved. I attached the patch to this mail.
Please review it.

Thanks,

-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: masami.hiramatsu.pt@hitachi.com

 kprobes.c |    9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)
diff --exclude=CVS -Narup a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
--- a/arch/i386/kernel/kprobes.c	2006-07-10 12:11:43.000000000 +0900
+++ b/arch/i386/kernel/kprobes.c	2006-07-10 15:41:17.000000000 +0900
@@ -256,11 +256,6 @@ static int __kprobes kprobe_handler(stru
 	int ret = 0;
 	kprobe_opcode_t *addr;
 	struct kprobe_ctlblk *kcb;
-#ifdef CONFIG_PREEMPT
-	unsigned pre_preempt_count = preempt_count();
-#else
-	unsigned pre_preempt_count = 1;
-#endif

 	addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));

@@ -338,13 +333,15 @@ static int __kprobes kprobe_handler(stru
 		return 1;

 ss_probe:
-	if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){
+#ifdef CONFIG_PREEMPT
+	if (p->ainsn.boostable == 1 && !p->post_handler){
 		/* Boost up -- we can execute copied instructions directly */
 		reset_current_kprobe();
 		regs->eip = (unsigned long)p->ainsn.insn;
 		preempt_enable_no_resched();
 		return 1;
 	}
+#endif
 	prepare_singlestep(p, regs);
 	kcb->kprobe_status = KPROBE_HIT_SS;
 	return 1;

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: Preemption-safe kprobe-booster(Re: [PATCH]kprobe booster for IA64)
  2006-07-13 11:25         ` Masami Hiramatsu
@ 2006-07-14 18:41           ` Keshavamurthy Anil S
  2006-07-15  6:20             ` Masami Hiramatsu
  0 siblings, 1 reply; 13+ messages in thread
From: Keshavamurthy Anil S @ 2006-07-14 18:41 UTC (permalink / raw)
  To: Masami Hiramatsu
  Cc: Keshavamurthy Anil S, Ananth N Mavinakayanahalli, SystemTAP,
	Yumiko Sugita, Satoshi Oshima, Hideo Aoki,
	Prasanna S Panchamukhi, Jim Keniston

On Thu, Jul 13, 2006 at 08:25:17PM +0900, Masami Hiramatsu wrote:
> Hi, Anil
> Thank you for precise advice.
> 
> Keshavamurthy Anil S wrote:
> > On Wed, Jul 12, 2006 at 03:22:37PM +0900, Masami Hiramatsu wrote:
> Now, we are searching for other good solutions against this problem.
> 
> Anyway, I'd like to disable booster when CONFIG_PREEMPT is defined
> until this problem is solved. I attached the patch to this mail.
> Please review it.
> 

--Comment here please 
 /* Booster probe is not support for preemptable kernel */
> +#ifdef CONFIG_PREEMPT
   ^^^^^^ Should be #ifndef
> +	if (p->ainsn.boostable == 1 && !p->post_handler){
>  		/* Boost up -- we can execute copied instructions directly */
>  		reset_current_kprobe();
>  		regs->eip = (unsigned long)p->ainsn.insn;
>  		preempt_enable_no_resched();
>  		return 1;
>  	}
> +#endif
>  	prepare_singlestep(p, regs);
>  	kcb->kprobe_status = KPROBE_HIT_SS;
>  	return 1;

Also you may want to do the same thing for kretprobe booster.

Thanks,
Anil

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: Preemption-safe kprobe-booster(Re: [PATCH]kprobe booster for  IA64)
  2006-07-14 18:41           ` Keshavamurthy Anil S
@ 2006-07-15  6:20             ` Masami Hiramatsu
  0 siblings, 0 replies; 13+ messages in thread
From: Masami Hiramatsu @ 2006-07-15  6:20 UTC (permalink / raw)
  To: Keshavamurthy Anil S
  Cc: Ananth N Mavinakayanahalli, SystemTAP, Yumiko Sugita,
	Satoshi Oshima, Hideo Aoki, Prasanna S Panchamukhi, Jim Keniston

Hi, Anil

Keshavamurthy Anil S wrote:
> --Comment here please 
>  /* Booster probe is not support for preemptable kernel */
>> +#ifdef CONFIG_PREEMPT
>    ^^^^^^ Should be #ifndef

That's for sure, I fixed it.

>> +	if (p->ainsn.boostable == 1 && !p->post_handler){
>>  		/* Boost up -- we can execute copied instructions directly */
>>  		reset_current_kprobe();
>>  		regs->eip = (unsigned long)p->ainsn.insn;
>>  		preempt_enable_no_resched();
>>  		return 1;
>>  	}
>> +#endif
>>  	prepare_singlestep(p, regs);
>>  	kcb->kprobe_status = KPROBE_HIT_SS;
>>  	return 1;
> 
> Also you may want to do the same thing for kretprobe booster.

I think the kretprobe booster is not affected by kernel preemption.
Because all codes on the execution path of kretprobe-booster
are the static assembly codes. These codes are never removed.

> 
> Thanks,
> Anil
> 
> 

Thanks,

-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: masami.hiramatsu.pt@hitachi.com

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2006-07-15  6:20 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-06-06  6:35 [PATCH]kprobe booster for IA64 Masami Hiramatsu
2006-06-07  8:17 ` bibo,mao
2006-06-08 14:31   ` Masami Hiramatsu
2006-06-09  3:03     ` bibo,mao
2006-06-15  5:46       ` Masami Hiramatsu
2006-06-27  0:16         ` Masami Hiramatsu
2006-06-29  3:16 ` Keshavamurthy Anil S
2006-07-04 22:56   ` Masami Hiramatsu
2006-07-12  6:22     ` Preemption-safe kprobe-booster(Re: [PATCH]kprobe booster for IA64) Masami Hiramatsu
2006-07-12  8:09       ` Keshavamurthy Anil S
2006-07-13 11:25         ` Masami Hiramatsu
2006-07-14 18:41           ` Keshavamurthy Anil S
2006-07-15  6:20             ` Masami Hiramatsu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).