* [RFC][Patch 2/2][take3]kprobe: kprobe-booster against 2.6.15-rc5-mm3 for i386
@ 2005-12-22 13:24 Masami Hiramatsu
2006-01-24 9:04 ` Ananth N Mavinakayanahalli
0 siblings, 1 reply; 4+ messages in thread
From: Masami Hiramatsu @ 2005-12-22 13:24 UTC (permalink / raw)
To: ananth, maneesh, anil.s.keshavamurthy
Cc: systemtap, Yumiko Sugita, Satoshi Oshima, Hideo Aoki
Hi,
Here is a patch of kprobe-booster for i386 arch.
This patch works with kernel preemption. On preemptable kernel,
kprobe-booster works only when the preemption is disabled.
--
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: hiramatu@sdl.hitachi.co.jp
arch/i386/kernel/kprobes.c | 83 +++++++++++++++++++++++++++++++++++++++++++--
include/asm-i386/kprobes.h | 4 ++
2 files changed, 85 insertions(+), 2 deletions(-)
diff -Narup a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
--- a/arch/i386/kernel/kprobes.c 2005-12-20 19:57:56.000000000 +0900
+++ b/arch/i386/kernel/kprobes.c 2005-12-20 20:05:01.000000000 +0900
@@ -41,6 +41,49 @@ void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+/* insert a jmp code */
+static inline void set_jmp_op(void *from, void *to)
+{
+ struct __arch_jmp_op {
+ char op;
+ long raddr;
+ } __attribute__((packed)) *jop;
+ jop = (struct __arch_jmp_op *)from;
+ jop->raddr = (long)(to) - ((long)(from) + 5);
+ jop->op = RELATIVEJUMP_INSTRUCTION;
+}
+
+/*
+ * returns non-zero if opcodes can be boosted.
+ */
+static inline int can_boost(kprobe_opcode_t opcode)
+{
+ switch (opcode & 0xf0 ) {
+ case 0x70:
+ return 0; /* can't boost conditional jump */
+ case 0x90:
+ /* can't boost call and pushf */
+ return opcode != 0x9a && opcode != 0x9c;
+ case 0xc0:
+ /* can't boost undefined opcodes and soft-interruptions */
+ return (0xc1 < opcode && opcode < 0xc6) ||
+ (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
+ case 0xd0:
+ /* can boost AA* and XLAT */
+ return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
+ case 0xe0:
+ /* can boost in/out and (may be) jmps */
+ return (0xe3 < opcode && opcode != 0xe8);
+ case 0xf0:
+ /* clear and set flags can be boost */
+ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+ default:
+ /* currently, can't boost 2 bytes opcodes */
+ return opcode != 0x0f;
+ }
+}
+
+
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
@@ -60,6 +103,11 @@ int __kprobes arch_prepare_kprobe(struct
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
+ if (can_boost(p->opcode)) {
+ p->ainsn.boostable = 0;
+ } else {
+ p->ainsn.boostable = -1;
+ }
return 0;
}
@@ -227,6 +275,19 @@ static int __kprobes kprobe_handler(stru
/* handler has already set things up, so skip ss setup */
return 1;
+ if (p->ainsn.boostable == 1 &&
+#ifdef CONFIG_PREEMPT
+ preempt_count() != 1 && /* This enables booster when the
+ direct execution path aren't preempted. */
+#endif /* CONFIG_PREEMPT */
+ !p->post_handler && !p->break_handler ) {
+ /* Boost up -- we can execute copied instructions directly */
+ reset_current_kprobe();
+ regs->eip = (unsigned long)&p->ainsn.insn;
+ preempt_enable_no_resched();
+ return 1;
+ }
+
ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
@@ -332,6 +393,8 @@ int __kprobes trampoline_probe_handler(s
* 2) If the single-stepped instruction was a call, the return address
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
+ *
+ * This function also checks instruction size for preparing direct execution.
*/
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
@@ -352,6 +415,7 @@ static void __kprobes resume_execution(s
case 0xca:
case 0xea: /* jmp absolute -- eip is correct */
/* eip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
goto no_change;
case 0xe8: /* call relative - Fix return addr */
*tos = orig_eip + (*tos - copy_eip);
@@ -359,18 +423,33 @@ static void __kprobes resume_execution(s
case 0xff:
if ((p->ainsn.insn[1] & 0x30) == 0x10) {
/* call absolute, indirect */
- /* Fix return addr; eip is correct. */
+ /* Fix return addr; eip is correct
+ But this is not boostable */
*tos = orig_eip + (*tos - copy_eip);
goto no_change;
} else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
- /* eip is correct. */
+ /* eip is correct. And this is boostable */
+ p->ainsn.boostable = 1;
goto no_change;
}
default:
break;
}
+ if (p->ainsn.boostable == 0) {
+ if ( regs->eip > copy_eip &&
+ (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
+ /* these instructions can be executed directly if it
+ jumps back to correct address. */
+ set_jmp_op((void *)regs->eip,
+ (void *)orig_eip + (regs->eip - copy_eip));
+ p->ainsn.boostable = 1;
+ } else {
+ p->ainsn.boostable = -1;
+ }
+ }
+
regs->eip = orig_eip + (regs->eip - copy_eip);
no_change:
diff -Narup a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
--- a/include/asm-i386/kprobes.h 2005-12-20 19:55:51.000000000 +0900
+++ b/include/asm-i386/kprobes.h 2005-12-20 20:03:11.000000000 +0900
@@ -31,6 +31,7 @@ struct pt_regs;
typedef u8 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xcc
+#define RELATIVEJUMP_INSTRUCTION 0xe9
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
@@ -48,6 +49,9 @@ void kretprobe_trampoline(void);
struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t insn[MAX_INSN_SIZE];
+ /* If this flag is not 0, this kprobe can be boost when its
+ post_handler and break_handler is not set. */
+ int boostable;
};
struct prev_kprobe {
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [RFC][Patch 2/2][take3]kprobe: kprobe-booster against 2.6.15-rc5-mm3 for i386
2005-12-22 13:24 [RFC][Patch 2/2][take3]kprobe: kprobe-booster against 2.6.15-rc5-mm3 for i386 Masami Hiramatsu
@ 2006-01-24 9:04 ` Ananth N Mavinakayanahalli
2006-01-24 13:22 ` Masami Hiramatsu
0 siblings, 1 reply; 4+ messages in thread
From: Ananth N Mavinakayanahalli @ 2006-01-24 9:04 UTC (permalink / raw)
To: Masami Hiramatsu
Cc: maneesh, anil.s.keshavamurthy, systemtap, Yumiko Sugita,
Satoshi Oshima, Hideo Aoki
On Thu, Dec 22, 2005 at 10:24:04PM +0900, Masami Hiramatsu wrote:
> Hi,
>
> Here is a patch of kprobe-booster for i386 arch.
>
> This patch works with kernel preemption. On preemptable kernel,
> kprobe-booster works only when the preemption is disabled.
I don't know the intricacies of the IA32 instruction set as well as the
opcode tricks you could play. However, some comments (^^^) below.
Ananth
>
> --
> Masami HIRAMATSU
> 2nd Research Dept.
> Hitachi, Ltd., Systems Development Laboratory
> E-mail: hiramatu@sdl.hitachi.co.jp
>
> arch/i386/kernel/kprobes.c | 83 +++++++++++++++++++++++++++++++++++++++++++--
> include/asm-i386/kprobes.h | 4 ++
> 2 files changed, 85 insertions(+), 2 deletions(-)
> diff -Narup a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
> --- a/arch/i386/kernel/kprobes.c 2005-12-20 19:57:56.000000000 +0900
> +++ b/arch/i386/kernel/kprobes.c 2005-12-20 20:05:01.000000000 +0900
> @@ -41,6 +41,49 @@ void jprobe_return_end(void);
> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
> DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> +/* insert a jmp code */
> +static inline void set_jmp_op(void *from, void *to)
> +{
> + struct __arch_jmp_op {
> + char op;
> + long raddr;
> + } __attribute__((packed)) *jop;
> + jop = (struct __arch_jmp_op *)from;
> + jop->raddr = (long)(to) - ((long)(from) + 5);
> + jop->op = RELATIVEJUMP_INSTRUCTION;
> +}
> +
> +/*
> + * returns non-zero if opcodes can be boosted.
> + */
> +static inline int can_boost(kprobe_opcode_t opcode)
> +{
> + switch (opcode & 0xf0 ) {
> + case 0x70:
> + return 0; /* can't boost conditional jump */
> + case 0x90:
> + /* can't boost call and pushf */
> + return opcode != 0x9a && opcode != 0x9c;
> + case 0xc0:
> + /* can't boost undefined opcodes and soft-interruptions */
> + return (0xc1 < opcode && opcode < 0xc6) ||
> + (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
> + case 0xd0:
> + /* can boost AA* and XLAT */
> + return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
> + case 0xe0:
> + /* can boost in/out and (may be) jmps */
> + return (0xe3 < opcode && opcode != 0xe8);
> + case 0xf0:
> + /* clear and set flags can be boost */
> + return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
> + default:
> + /* currently, can't boost 2 bytes opcodes */
> + return opcode != 0x0f;
> + }
> +}
> +
> +
> /*
> * returns non-zero if opcode modifies the interrupt flag.
> */
> @@ -60,6 +103,11 @@ int __kprobes arch_prepare_kprobe(struct
> {
> memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
> p->opcode = *p->addr;
> + if (can_boost(p->opcode)) {
> + p->ainsn.boostable = 0;
> + } else {
> + p->ainsn.boostable = -1;
> + }
> return 0;
> }
>
> @@ -227,6 +275,19 @@ static int __kprobes kprobe_handler(stru
> /* handler has already set things up, so skip ss setup */
> return 1;
>
> + if (p->ainsn.boostable == 1 &&
> +#ifdef CONFIG_PREEMPT
> + preempt_count() != 1 && /* This enables booster when the
> + direct execution path aren't preempted. */
^^^^^
Why is the comparison against an absolute value? preempt_disable/enable()
can be nested and on each such invocation, the count is
incremented/decremented respectively - this number can be 0 or hold
any positive value.
And multiline comments in
/*
*
*/
format please
> +#endif /* CONFIG_PREEMPT */
> + !p->post_handler && !p->break_handler ) {
> + /* Boost up -- we can execute copied instructions directly */
> + reset_current_kprobe();
> + regs->eip = (unsigned long)&p->ainsn.insn;
> + preempt_enable_no_resched();
> + return 1;
> + }
> +
> ss_probe:
> prepare_singlestep(p, regs);
> kcb->kprobe_status = KPROBE_HIT_SS;
> @@ -332,6 +393,8 @@ int __kprobes trampoline_probe_handler(s
> * 2) If the single-stepped instruction was a call, the return address
> * that is atop the stack is the address following the copied instruction.
> * We need to make it the address following the original instruction.
> + *
> + * This function also checks instruction size for preparing direct execution.
> */
> static void __kprobes resume_execution(struct kprobe *p,
> struct pt_regs *regs, struct kprobe_ctlblk *kcb)
> @@ -352,6 +415,7 @@ static void __kprobes resume_execution(s
> case 0xca:
> case 0xea: /* jmp absolute -- eip is correct */
> /* eip is already adjusted, no more changes required */
> + p->ainsn.boostable = 1;
> goto no_change;
> case 0xe8: /* call relative - Fix return addr */
> *tos = orig_eip + (*tos - copy_eip);
> @@ -359,18 +423,33 @@ static void __kprobes resume_execution(s
> case 0xff:
> if ((p->ainsn.insn[1] & 0x30) == 0x10) {
> /* call absolute, indirect */
> - /* Fix return addr; eip is correct. */
> + /* Fix return addr; eip is correct
> + But this is not boostable */
^^^ Please fix comment style
> *tos = orig_eip + (*tos - copy_eip);
> goto no_change;
> } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
> ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
> - /* eip is correct. */
> + /* eip is correct. And this is boostable */
> + p->ainsn.boostable = 1;
> goto no_change;
> }
> default:
> break;
> }
>
> + if (p->ainsn.boostable == 0) {
> + if ( regs->eip > copy_eip &&
^^^ Coding-style please :)
> + (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
> + /* these instructions can be executed directly if it
> + jumps back to correct address. */
^^^ Comment style fix please
> + set_jmp_op((void *)regs->eip,
> + (void *)orig_eip + (regs->eip - copy_eip));
> + p->ainsn.boostable = 1;
> + } else {
> + p->ainsn.boostable = -1;
> + }
> + }
> +
> regs->eip = orig_eip + (regs->eip - copy_eip);
>
> no_change:
> diff -Narup a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
> --- a/include/asm-i386/kprobes.h 2005-12-20 19:55:51.000000000 +0900
> +++ b/include/asm-i386/kprobes.h 2005-12-20 20:03:11.000000000 +0900
> @@ -31,6 +31,7 @@ struct pt_regs;
>
> typedef u8 kprobe_opcode_t;
> #define BREAKPOINT_INSTRUCTION 0xcc
> +#define RELATIVEJUMP_INSTRUCTION 0xe9
> #define MAX_INSN_SIZE 16
> #define MAX_STACK_SIZE 64
> #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
> @@ -48,6 +49,9 @@ void kretprobe_trampoline(void);
> struct arch_specific_insn {
> /* copy of the original instruction */
> kprobe_opcode_t insn[MAX_INSN_SIZE];
> + /* If this flag is not 0, this kprobe can be boost when its
> + post_handler and break_handler is not set. */
^^^
Pls fix comment style
> + int boostable;
> };
>
> struct prev_kprobe {
>
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [RFC][Patch 2/2][take3]kprobe: kprobe-booster against 2.6.15-rc5-mm3 for i386
2006-01-24 9:04 ` Ananth N Mavinakayanahalli
@ 2006-01-24 13:22 ` Masami Hiramatsu
2006-01-24 14:17 ` Masami Hiramatsu
0 siblings, 1 reply; 4+ messages in thread
From: Masami Hiramatsu @ 2006-01-24 13:22 UTC (permalink / raw)
To: ananth
Cc: maneesh, anil.s.keshavamurthy, systemtap, Yumiko Sugita,
Satoshi Oshima, Hideo Aoki
Hi, Ananth
Thank you for review.
Ananth N Mavinakayanahalli wrote:
>>@@ -227,6 +275,19 @@ static int __kprobes kprobe_handler(stru
>> /* handler has already set things up, so skip ss setup */
>> return 1;
>>
>>+ if (p->ainsn.boostable == 1 &&
>>+#ifdef CONFIG_PREEMPT
>>+ preempt_count() != 1 && /* This enables booster when the
>>+ direct execution path aren't preempted. */
> ^^^^^
> Why is the comparison against an absolute value? preempt_disable/enable()
> can be nested and on each such invocation, the count is
> incremented/decremented respectively - this number can be 0 or hold
> any positive value.
I could not find validate function/macro of preempt_count(), so I used an
absolute value.
I have an idea to remove the absolute value.
I will store the value of preempt_count() into an local variable before
the preempt_disable() calling, and will check the local variable instead
of preempt_count() itself. Thus I can write as below:
#ifdef CONFIG_PREEMPT
!(local_preempt_count) &&
#endif
Is this OK?
> And multiline comments in
> /*
> *
> */
> format please
OK. I will fix that.
--
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: hiramatu@sdl.hitachi.co.jp
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [RFC][Patch 2/2][take3]kprobe: kprobe-booster against 2.6.15-rc5-mm3 for i386
2006-01-24 13:22 ` Masami Hiramatsu
@ 2006-01-24 14:17 ` Masami Hiramatsu
0 siblings, 0 replies; 4+ messages in thread
From: Masami Hiramatsu @ 2006-01-24 14:17 UTC (permalink / raw)
To: ananth
Cc: Masami Hiramatsu, maneesh, anil.s.keshavamurthy, systemtap,
Yumiko Sugita, Satoshi Oshima, Hideo Aoki
Hi,
Here is a cleaned patch of kprobe-booster.
- fix coding style.
- remove an absolute number.
--
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: hiramatu@sdl.hitachi.co.jp
arch/i386/kernel/kprobes.c | 92 ++++++++++++++++++++++++++++++++++++++++++++-
include/asm-i386/kprobes.h | 6 ++
2 files changed, 96 insertions(+), 2 deletions(-)
diff -Narup a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
--- a/arch/i386/kernel/kprobes.c 2006-01-24 22:43:17.000000000 +0900
+++ b/arch/i386/kernel/kprobes.c 2006-01-24 23:12:23.000000000 +0900
@@ -41,6 +41,49 @@ void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+/* insert a jmp code */
+static inline void set_jmp_op(void *from, void *to)
+{
+ struct __arch_jmp_op {
+ char op;
+ long raddr;
+ } __attribute__((packed)) *jop;
+ jop = (struct __arch_jmp_op *)from;
+ jop->raddr = (long)(to) - ((long)(from) + 5);
+ jop->op = RELATIVEJUMP_INSTRUCTION;
+}
+
+/*
+ * returns non-zero if opcodes can be boosted.
+ */
+static inline int can_boost(kprobe_opcode_t opcode)
+{
+ switch (opcode & 0xf0 ) {
+ case 0x70:
+ return 0; /* can't boost conditional jump */
+ case 0x90:
+ /* can't boost call and pushf */
+ return opcode != 0x9a && opcode != 0x9c;
+ case 0xc0:
+ /* can't boost undefined opcodes and soft-interruptions */
+ return (0xc1 < opcode && opcode < 0xc6) ||
+ (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
+ case 0xd0:
+ /* can boost AA* and XLAT */
+ return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
+ case 0xe0:
+ /* can boost in/out and (may be) jmps */
+ return (0xe3 < opcode && opcode != 0xe8);
+ case 0xf0:
+ /* clear and set flags can be boost */
+ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+ default:
+ /* currently, can't boost 2 bytes opcodes */
+ return opcode != 0x0f;
+ }
+}
+
+
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
@@ -60,6 +103,11 @@ int __kprobes arch_prepare_kprobe(struct
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
+ if (can_boost(p->opcode)) {
+ p->ainsn.boostable = 0;
+ } else {
+ p->ainsn.boostable = -1;
+ }
return 0;
}
@@ -146,6 +194,9 @@ static int __kprobes kprobe_handler(stru
kprobe_opcode_t *addr = NULL;
unsigned long *lp;
struct kprobe_ctlblk *kcb;
+#ifdef CONFIG_PREEMPT
+ unsigned pre_preempt_count = preempt_count();
+#endif /* CONFIG_PREEMPT */
/*
* We don't want to be preempted for the entire
@@ -240,6 +291,21 @@ static int __kprobes kprobe_handler(stru
/* handler has already set things up, so skip ss setup */
return 1;
+ if (p->ainsn.boostable == 1 &&
+#ifdef CONFIG_PREEMPT
+ !(pre_preempt_count()) && /*
+ * This enables booster when the direct
+ * execution path aren't preempted.
+ */
+#endif /* CONFIG_PREEMPT */
+ !p->post_handler && !p->break_handler ) {
+ /* Boost up -- we can execute copied instructions directly */
+ reset_current_kprobe();
+ regs->eip = (unsigned long)&p->ainsn.insn;
+ preempt_enable_no_resched();
+ return 1;
+ }
+
ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
@@ -345,6 +411,8 @@ int __kprobes trampoline_probe_handler(s
* 2) If the single-stepped instruction was a call, the return address
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
+ *
+ * This function also checks instruction size for preparing direct execution.
*/
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
@@ -365,6 +433,7 @@ static void __kprobes resume_execution(s
case 0xca:
case 0xea: /* jmp absolute -- eip is correct */
/* eip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
goto no_change;
case 0xe8: /* call relative - Fix return addr */
*tos = orig_eip + (*tos - copy_eip);
@@ -372,18 +441,37 @@ static void __kprobes resume_execution(s
case 0xff:
if ((p->ainsn.insn[1] & 0x30) == 0x10) {
/* call absolute, indirect */
- /* Fix return addr; eip is correct. */
+ /*
+ * Fix return addr; eip is correct.
+ * But this is not boostable
+ */
*tos = orig_eip + (*tos - copy_eip);
goto no_change;
} else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
- /* eip is correct. */
+ /* eip is correct. And this is boostable */
+ p->ainsn.boostable = 1;
goto no_change;
}
default:
break;
}
+ if (p->ainsn.boostable == 0) {
+ if ((regs->eip > copy_eip) &&
+ (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
+ /*
+ * These instructions can be executed directly if it
+ * jumps back to correct address.
+ */
+ set_jmp_op((void *)regs->eip,
+ (void *)orig_eip + (regs->eip - copy_eip));
+ p->ainsn.boostable = 1;
+ } else {
+ p->ainsn.boostable = -1;
+ }
+ }
+
regs->eip = orig_eip + (regs->eip - copy_eip);
no_change:
diff -Narup a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
--- a/include/asm-i386/kprobes.h 2006-01-24 19:07:39.000000000 +0900
+++ b/include/asm-i386/kprobes.h 2006-01-24 22:54:35.000000000 +0900
@@ -31,6 +31,7 @@ struct pt_regs;
typedef u8 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xcc
+#define RELATIVEJUMP_INSTRUCTION 0xe9
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
@@ -48,6 +49,11 @@ void kretprobe_trampoline(void);
struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t insn[MAX_INSN_SIZE];
+ /*
+ * If this flag is not 0, this kprobe can be boost when its
+ * post_handler and break_handler is not set.
+ */
+ int boostable;
};
struct prev_kprobe {
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2006-01-24 14:17 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2005-12-22 13:24 [RFC][Patch 2/2][take3]kprobe: kprobe-booster against 2.6.15-rc5-mm3 for i386 Masami Hiramatsu
2006-01-24 9:04 ` Ananth N Mavinakayanahalli
2006-01-24 13:22 ` Masami Hiramatsu
2006-01-24 14:17 ` Masami Hiramatsu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).