public inbox for systemtap@sourceware.org
 help / color / mirror / Atom feed
* [PATCH] [1/5] Switch Kprobes inline functions to __kprobes for i386
@ 2006-04-10  5:57 Prasanna S Panchamukhi
  2006-04-10  5:58 ` [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64 Prasanna S Panchamukhi
  0 siblings, 1 reply; 7+ messages in thread
From: Prasanna S Panchamukhi @ 2006-04-10  5:57 UTC (permalink / raw)
  To: akpm, Andi Kleen, davem
  Cc: linux-kernel, ananth, anil.s.keshavamurthy, systemtap

Hi,

Please find the set of patches that removes inline for kprobes routines and adds
__kprobes. These patches have been tested on i386, x86_64, ppc64 and
ia64 architectures.

Thanks
Prasanna

Andrew Morton pointed out that compiler might not inline the functions
marked for inline in kprobes. There by allowing the insertion of probes
on these kprobes routines, which might cause recursion. This patch
removes all such inline and adds them to kprobes section there by
disallowing probes on all such routines. Some of the routines can
even still be inlined, since these routines gets executed after
the kprobes had done necessay setup for reentrancy.

Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>


 arch/i386/kernel/kprobes.c |   18 +++++++++---------
 1 files changed, 9 insertions(+), 9 deletions(-)

diff -puN arch/i386/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions arch/i386/kernel/kprobes.c
--- linux-2.6.17-rc1-mm2/arch/i386/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions	2006-04-10 10:58:56.000000000 +0530
+++ linux-2.6.17-rc1-mm2-prasanna/arch/i386/kernel/kprobes.c	2006-04-10 10:58:56.000000000 +0530
@@ -43,7 +43,7 @@ DEFINE_PER_CPU(struct kprobe *, current_
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
 /* insert a jmp code */
-static inline void set_jmp_op(void *from, void *to)
+static __always_inline void set_jmp_op(void *from, void *to)
 {
 	struct __arch_jmp_op {
 		char op;
@@ -57,7 +57,7 @@ static inline void set_jmp_op(void *from
 /*
  * returns non-zero if opcodes can be boosted.
  */
-static inline int can_boost(kprobe_opcode_t opcode)
+static __always_inline int can_boost(kprobe_opcode_t opcode)
 {
 	switch (opcode & 0xf0 ) {
 	case 0x70:
@@ -88,7 +88,7 @@ static inline int can_boost(kprobe_opcod
 /*
  * returns non-zero if opcode modifies the interrupt flag.
  */
-static inline int is_IF_modifier(kprobe_opcode_t opcode)
+static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
 {
 	switch (opcode) {
 	case 0xfa:		/* cli */
@@ -138,7 +138,7 @@ void __kprobes arch_remove_kprobe(struct
 	mutex_unlock(&kprobe_mutex);
 }
 
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	kcb->prev_kprobe.kp = kprobe_running();
 	kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -146,7 +146,7 @@ static inline void save_previous_kprobe(
 	kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
 }
 
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
 	kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -154,7 +154,7 @@ static inline void restore_previous_kpro
 	kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
 }
 
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 				struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = p;
@@ -164,7 +164,7 @@ static inline void set_current_kprobe(st
 		kcb->kprobe_saved_eflags &= ~IF_MASK;
 }
 
-static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 {
 	regs->eflags |= TF_MASK;
 	regs->eflags &= ~IF_MASK;
@@ -507,7 +507,7 @@ no_change:
  * Interrupts are disabled on entry as trap1 is an interrupt gate and they
  * remain disabled thoroughout this function.
  */
-static inline int post_kprobe_handler(struct pt_regs *regs)
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
 {
 	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -543,7 +543,7 @@ out:
 	return 1;
 }
 
-static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 {
 	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

_
-- 
Prasanna S Panchamukhi
Linux Technology Center
India Software Labs, IBM Bangalore
Email: prasanna@in.ibm.com
Ph: 91-80-51776329

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64
  2006-04-10  5:57 [PATCH] [1/5] Switch Kprobes inline functions to __kprobes for i386 Prasanna S Panchamukhi
@ 2006-04-10  5:58 ` Prasanna S Panchamukhi
  2006-04-10  5:59   ` [PATCH] [3/5] Switch Kprobes inline functions to __kprobes for ppc64 Prasanna S Panchamukhi
  2006-04-11  2:43   ` [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64 Andi Kleen
  0 siblings, 2 replies; 7+ messages in thread
From: Prasanna S Panchamukhi @ 2006-04-10  5:58 UTC (permalink / raw)
  To: akpm, Andi Kleen, davem
  Cc: linux-kernel, ananth, anil.s.keshavamurthy, systemtap

Andrew Morton pointed out that compiler might not inline the functions
marked for inline in kprobes. There by allowing the insertion of probes
on these kprobes routines, which might cause recursion. This patch
removes all such inline and adds them to kprobes section there by
disallowing probes on all such routines. Some of the routines can
even still be inlined, since these routines gets executed after
the kprobes had done necessay setup for reentrancy.

Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>


 arch/x86_64/kernel/kprobes.c |   10 +++++-----
 1 files changed, 5 insertions(+), 5 deletions(-)

diff -puN arch/x86_64/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions-x86_64 arch/x86_64/kernel/kprobes.c
--- linux-2.6.17-rc1-mm2/arch/x86_64/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions-x86_64	2006-04-10 10:59:25.000000000 +0530
+++ linux-2.6.17-rc1-mm2-prasanna/arch/x86_64/kernel/kprobes.c	2006-04-10 10:59:25.000000000 +0530
@@ -53,7 +53,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kpr
 /*
  * returns non-zero if opcode modifies the interrupt flag.
  */
-static inline int is_IF_modifier(kprobe_opcode_t *insn)
+static __always_inline int is_IF_modifier(kprobe_opcode_t *insn)
 {
 	switch (*insn) {
 	case 0xfa:		/* cli */
@@ -84,7 +84,7 @@ int __kprobes arch_prepare_kprobe(struct
  * If it does, return the address of the 32-bit displacement word.
  * If not, return null.
  */
-static inline s32 *is_riprel(u8 *insn)
+static s32 __kprobes *is_riprel(u8 *insn)
 {
 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf)		      \
 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
@@ -229,7 +229,7 @@ void __kprobes arch_remove_kprobe(struct
 	mutex_unlock(&kprobe_mutex);
 }
 
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	kcb->prev_kprobe.kp = kprobe_running();
 	kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -237,7 +237,7 @@ static inline void save_previous_kprobe(
 	kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
 }
 
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
 	kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -245,7 +245,7 @@ static inline void restore_previous_kpro
 	kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
 }
 
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 				struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = p;

_
-- 
Prasanna S Panchamukhi
Linux Technology Center
India Software Labs, IBM Bangalore
Email: prasanna@in.ibm.com
Ph: 91-80-51776329

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] [3/5] Switch Kprobes inline functions to __kprobes for ppc64
  2006-04-10  5:58 ` [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64 Prasanna S Panchamukhi
@ 2006-04-10  5:59   ` Prasanna S Panchamukhi
  2006-04-10  6:00     ` [PATCH] [4/5] Switch Kprobes inline functions to __kprobes for ia64 Prasanna S Panchamukhi
  2006-04-11  2:43   ` [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64 Andi Kleen
  1 sibling, 1 reply; 7+ messages in thread
From: Prasanna S Panchamukhi @ 2006-04-10  5:59 UTC (permalink / raw)
  To: akpm, Andi Kleen, davem
  Cc: linux-kernel, ananth, anil.s.keshavamurthy, systemtap

Andrew Morton pointed out that compiler might not inline the functions
marked for inline in kprobes. There by allowing the insertion of probes
on these kprobes routines, which might cause recursion. This patch
removes all such inline and adds them to kprobes section there by
disallowing probes on all such routines. Some of the routines can
even still be inlined, since these routines gets executed after
the kprobes had done necessay setup for reentrancy.

Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>


 arch/powerpc/kernel/kprobes.c |   14 +++++++-------
 1 files changed, 7 insertions(+), 7 deletions(-)

diff -puN arch/powerpc/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions-ppc64 arch/powerpc/kernel/kprobes.c
--- linux-2.6.17-rc1-mm2/arch/powerpc/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions-ppc64	2006-04-10 11:00:06.000000000 +0530
+++ linux-2.6.17-rc1-mm2-prasanna/arch/powerpc/kernel/kprobes.c	2006-04-10 11:00:06.000000000 +0530
@@ -88,7 +88,7 @@ void __kprobes arch_remove_kprobe(struct
 	mutex_unlock(&kprobe_mutex);
 }
 
-static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 {
 	kprobe_opcode_t insn = *p->ainsn.insn;
 
@@ -101,21 +101,21 @@ static inline void prepare_singlestep(st
 		regs->nip = (unsigned long)p->ainsn.insn;
 }
 
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	kcb->prev_kprobe.kp = kprobe_running();
 	kcb->prev_kprobe.status = kcb->kprobe_status;
 	kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
 }
 
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
 	kcb->kprobe_status = kcb->prev_kprobe.status;
 	kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
 }
 
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 				struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = p;
@@ -141,7 +141,7 @@ void __kprobes arch_prepare_kretprobe(st
 	}
 }
 
-static inline int kprobe_handler(struct pt_regs *regs)
+static int __kprobes kprobe_handler(struct pt_regs *regs)
 {
 	struct kprobe *p;
 	int ret = 0;
@@ -334,7 +334,7 @@ static void __kprobes resume_execution(s
 		regs->nip = (unsigned long)p->addr + 4;
 }
 
-static inline int post_kprobe_handler(struct pt_regs *regs)
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
 {
 	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -370,7 +370,7 @@ out:
 	return 1;
 }
 
-static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 {
 	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

_
-- 
Prasanna S Panchamukhi
Linux Technology Center
India Software Labs, IBM Bangalore
Email: prasanna@in.ibm.com
Ph: 91-80-51776329

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] [4/5] Switch Kprobes inline functions to __kprobes for ia64
  2006-04-10  5:59   ` [PATCH] [3/5] Switch Kprobes inline functions to __kprobes for ppc64 Prasanna S Panchamukhi
@ 2006-04-10  6:00     ` Prasanna S Panchamukhi
  2006-04-10  6:01       ` [PATCH] [5/5] Switch Kprobes inline functions to __kprobes for sparc64 Prasanna S Panchamukhi
  0 siblings, 1 reply; 7+ messages in thread
From: Prasanna S Panchamukhi @ 2006-04-10  6:00 UTC (permalink / raw)
  To: akpm, Andi Kleen, davem
  Cc: linux-kernel, ananth, anil.s.keshavamurthy, systemtap

Andrew Morton pointed out that compiler might not inline the functions
marked for inline in kprobes. There by allowing the insertion of probes
on these kprobes routines, which might cause recursion. This patch
removes all such inline and adds them to kprobes section there by
disallowing probes on all such routines. Some of the routines can
even still be inlined, since these routines gets executed after
the kprobes had done necessay setup for reentrancy.

Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>


 arch/ia64/kernel/kprobes.c |   10 +++++-----
 1 files changed, 5 insertions(+), 5 deletions(-)

diff -puN arch/ia64/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions-ia64 arch/ia64/kernel/kprobes.c
--- linux-2.6.17-rc1-mm2/arch/ia64/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions-ia64	2006-04-10 11:00:37.000000000 +0530
+++ linux-2.6.17-rc1-mm2-prasanna/arch/ia64/kernel/kprobes.c	2006-04-10 11:00:37.000000000 +0530
@@ -251,7 +251,7 @@ static void __kprobes prepare_break_inst
 	update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
 }
 
-static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
+static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
 	       	unsigned long *kprobe_inst, uint *major_opcode)
 {
 	unsigned long kprobe_inst_p0, kprobe_inst_p1;
@@ -278,7 +278,7 @@ static inline void get_kprobe_inst(bundl
 }
 
 /* Returns non-zero if the addr is in the Interrupt Vector Table */
-static inline int in_ivt_functions(unsigned long addr)
+static int __kprobes in_ivt_functions(unsigned long addr)
 {
 	return (addr >= (unsigned long)__start_ivt_text
 		&& addr < (unsigned long)__end_ivt_text);
@@ -308,19 +308,19 @@ static int __kprobes valid_kprobe_addr(i
 	return 0;
 }
 
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	kcb->prev_kprobe.kp = kprobe_running();
 	kcb->prev_kprobe.status = kcb->kprobe_status;
 }
 
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
 	kcb->kprobe_status = kcb->prev_kprobe.status;
 }
 
-static inline void set_current_kprobe(struct kprobe *p,
+static void __kprobes set_current_kprobe(struct kprobe *p,
 			struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = p;

_
-- 
Prasanna S Panchamukhi
Linux Technology Center
India Software Labs, IBM Bangalore
Email: prasanna@in.ibm.com
Ph: 91-80-51776329

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] [5/5] Switch Kprobes inline functions to __kprobes for sparc64
  2006-04-10  6:00     ` [PATCH] [4/5] Switch Kprobes inline functions to __kprobes for ia64 Prasanna S Panchamukhi
@ 2006-04-10  6:01       ` Prasanna S Panchamukhi
  2006-04-10  6:11         ` David S. Miller
  0 siblings, 1 reply; 7+ messages in thread
From: Prasanna S Panchamukhi @ 2006-04-10  6:01 UTC (permalink / raw)
  To: akpm, Andi Kleen, davem
  Cc: linux-kernel, ananth, anil.s.keshavamurthy, systemtap

Andrew Morton pointed out that compiler might not inline the functions
marked for inline in kprobes. There by allowing the insertion of probes
on these kprobes routines, which might cause recursion. This patch
removes all such inline and adds them to kprobes section there by
disallowing probes on all such routines. Some of the routines can
even still be inlined, since these routines gets executed after
the kprobes had done necessay setup for reentrancy.

Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>


 arch/sparc64/kernel/kprobes.c |   12 ++++++------
 1 files changed, 6 insertions(+), 6 deletions(-)

diff -puN arch/sparc64/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions-sparc64 arch/sparc64/kernel/kprobes.c
--- linux-2.6.17-rc1-mm2/arch/sparc64/kernel/kprobes.c~kprobes-remove-inline-kprobe-functions-sparc64	2006-04-10 11:01:07.000000000 +0530
+++ linux-2.6.17-rc1-mm2-prasanna/arch/sparc64/kernel/kprobes.c	2006-04-10 11:01:07.000000000 +0530
@@ -63,7 +63,7 @@ void __kprobes arch_disarm_kprobe(struct
 	flushi(p->addr);
 }
 
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	kcb->prev_kprobe.kp = kprobe_running();
 	kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -71,7 +71,7 @@ static inline void save_previous_kprobe(
 	kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
 }
 
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
 	kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -79,7 +79,7 @@ static inline void restore_previous_kpro
 	kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
 }
 
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 				struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = p;
@@ -87,7 +87,7 @@ static inline void set_current_kprobe(st
 	kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
 }
 
-static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
 			struct kprobe_ctlblk *kcb)
 {
 	regs->tstate |= TSTATE_PIL;
@@ -273,7 +273,7 @@ static void __kprobes resume_execution(s
 			kcb->kprobe_orig_tstate_pil);
 }
 
-static inline int post_kprobe_handler(struct pt_regs *regs)
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
 {
 	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -300,7 +300,7 @@ out:
 	return 1;
 }
 
-static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 {
 	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

_
-- 
Prasanna S Panchamukhi
Linux Technology Center
India Software Labs, IBM Bangalore
Email: prasanna@in.ibm.com
Ph: 91-80-51776329

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] [5/5] Switch Kprobes inline functions to __kprobes for  sparc64
  2006-04-10  6:01       ` [PATCH] [5/5] Switch Kprobes inline functions to __kprobes for sparc64 Prasanna S Panchamukhi
@ 2006-04-10  6:11         ` David S. Miller
  0 siblings, 0 replies; 7+ messages in thread
From: David S. Miller @ 2006-04-10  6:11 UTC (permalink / raw)
  To: prasanna; +Cc: akpm, ak, linux-kernel, ananth, anil.s.keshavamurthy, systemtap

From: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Date: Mon, 10 Apr 2006 11:31:28 +0530

> Andrew Morton pointed out that compiler might not inline the functions
> marked for inline in kprobes. There by allowing the insertion of probes
> on these kprobes routines, which might cause recursion. This patch
> removes all such inline and adds them to kprobes section there by
> disallowing probes on all such routines. Some of the routines can
> even still be inlined, since these routines gets executed after
> the kprobes had done necessay setup for reentrancy.
> 
> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>

Signed-off-by: David S. Miller <davem@davemloft.net>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64
  2006-04-10  5:58 ` [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64 Prasanna S Panchamukhi
  2006-04-10  5:59   ` [PATCH] [3/5] Switch Kprobes inline functions to __kprobes for ppc64 Prasanna S Panchamukhi
@ 2006-04-11  2:43   ` Andi Kleen
  1 sibling, 0 replies; 7+ messages in thread
From: Andi Kleen @ 2006-04-11  2:43 UTC (permalink / raw)
  To: prasanna
  Cc: akpm, davem, linux-kernel, ananth, anil.s.keshavamurthy, systemtap

On Monday 10 April 2006 07:58, Prasanna S Panchamukhi wrote:
> Andrew Morton pointed out that compiler might not inline the functions
> marked for inline in kprobes. There by allowing the insertion of probes
> on these kprobes routines, which might cause recursion. This patch
> removes all such inline and adds them to kprobes section there by
> disallowing probes on all such routines. Some of the routines can
> even still be inlined, since these routines gets executed after
> the kprobes had done necessay setup for reentrancy.

Ok for me.

-Andi

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2006-04-11  2:43 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-04-10  5:57 [PATCH] [1/5] Switch Kprobes inline functions to __kprobes for i386 Prasanna S Panchamukhi
2006-04-10  5:58 ` [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64 Prasanna S Panchamukhi
2006-04-10  5:59   ` [PATCH] [3/5] Switch Kprobes inline functions to __kprobes for ppc64 Prasanna S Panchamukhi
2006-04-10  6:00     ` [PATCH] [4/5] Switch Kprobes inline functions to __kprobes for ia64 Prasanna S Panchamukhi
2006-04-10  6:01       ` [PATCH] [5/5] Switch Kprobes inline functions to __kprobes for sparc64 Prasanna S Panchamukhi
2006-04-10  6:11         ` David S. Miller
2006-04-11  2:43   ` [PATCH] [2/5] Switch Kprobes inline functions to __kprobes for x86_64 Andi Kleen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).