public inbox for systemtap@sourceware.org
 help / color / mirror / Atom feed
* [RFC/PATCH] replace preempt_* calls with rcu_read_* variants
@ 2006-03-14 13:44 Ananth N Mavinakayanahalli
  2006-03-14 14:00 ` Mathieu Desnoyers
  2006-03-14 22:12 ` Keshavamurthy Anil S
  0 siblings, 2 replies; 3+ messages in thread
From: Ananth N Mavinakayanahalli @ 2006-03-14 13:44 UTC (permalink / raw)
  To: SystemTAP

Hi,

Can somebody please remind me why we are not using rcu_read_unlock/lock()
to RCU protect the kprobe handlers, instead, are using
preempt_disable/enable()?

We do use preempt_enable_no_resched() in the code. Is there a fallout if
we check need_resched()?

Realistically, I'd like us to adhere to the RCU locking framework. The
RT changes are trickling in to the mainline and it includes changes to
the RCU infrastructure. So, read_lock_rcu() which, as of now is just a
preempt_disable(), may change to something more than that in the future.

Basically, a plain preempt_disable/enable() may not protect us in the
RCU read side critical section for much longer.

The following patch is against 2.6.16-rc6 - tested on IA32.

Comments? Thoughts?

Ananth
---

Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>


 arch/i386/kernel/kprobes.c    |   27 +++++++++++----------------
 arch/ia64/kernel/kprobes.c    |   27 +++++++++++----------------
 arch/powerpc/kernel/kprobes.c |   27 +++++++++++----------------
 arch/sparc64/kernel/kprobes.c |   19 ++++++++-----------
 arch/x86_64/kernel/kprobes.c  |   27 +++++++++++----------------
 5 files changed, 52 insertions(+), 75 deletions(-)

Index: linux-2.6.16-rc6/arch/i386/kernel/kprobes.c
===================================================================
--- linux-2.6.16-rc6.orig/arch/i386/kernel/kprobes.c
+++ linux-2.6.16-rc6/arch/i386/kernel/kprobes.c
@@ -31,7 +31,6 @@
 #include <linux/config.h>
 #include <linux/kprobes.h>
 #include <linux/ptrace.h>
-#include <linux/preempt.h>
 #include <asm/cacheflush.h>
 #include <asm/kdebug.h>
 #include <asm/desc.h>
@@ -159,11 +158,8 @@ static int __kprobes kprobe_handler(stru
 	unsigned long *lp;
 	struct kprobe_ctlblk *kcb;
 
-	/*
-	 * We don't want to be preempted for the entire
-	 * duration of kprobe processing
-	 */
-	preempt_disable();
+	/* Make entire duration of kprobe processing RCU safe */
+	rcu_read_lock();
 	kcb = get_kprobe_ctlblk();
 
 	/* Check if the application is using LDT entry for its code segment and
@@ -258,7 +254,7 @@ ss_probe:
 	return 1;
 
 no_kprobe:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 	return ret;
 }
 
@@ -326,12 +322,11 @@ int __kprobes trampoline_probe_handler(s
 
 	reset_current_kprobe();
 	spin_unlock_irqrestore(&kretprobe_lock, flags);
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 
 	/*
-	 * By returning a non-zero value, we are telling
-	 * kprobe_handler() that we don't want the post_handler
-	 * to run (and have re-enabled preemption)
+	 * By returning a non-zero value, we are telling kprobe_handler()
+	 * that we don't want the post_handler to run
 	 */
         return 1;
 }
@@ -435,7 +430,7 @@ static inline int post_kprobe_handler(st
 	}
 	reset_current_kprobe();
 out:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 
 	/*
 	 * if somebody else is singlestepping across a probe point, eflags
@@ -461,7 +456,7 @@ static inline int kprobe_fault_handler(s
 		regs->eflags |= kcb->kprobe_old_eflags;
 
 		reset_current_kprobe();
-		preempt_enable_no_resched();
+		rcu_read_unlock();
 	}
 	return 0;
 }
@@ -487,11 +482,11 @@ int __kprobes kprobe_exceptions_notify(s
 	case DIE_GPF:
 	case DIE_PAGE_FAULT:
 		/* kprobe_running() needs smp_processor_id() */
-		preempt_disable();
+		rcu_read_lock();
 		if (kprobe_running() &&
 		    kprobe_fault_handler(args->regs, args->trapnr))
 			ret = NOTIFY_STOP;
-		preempt_enable();
+		rcu_read_unlock();
 		break;
 	default:
 		break;
@@ -558,7 +553,7 @@ int __kprobes longjmp_break_handler(stru
 		*regs = kcb->jprobe_saved_regs;
 		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
 		       MIN_STACK_SIZE(stack_addr));
-		preempt_enable_no_resched();
+		rcu_read_unlock();
 		return 1;
 	}
 	return 0;
Index: linux-2.6.16-rc6/arch/ia64/kernel/kprobes.c
===================================================================
--- linux-2.6.16-rc6.orig/arch/ia64/kernel/kprobes.c
+++ linux-2.6.16-rc6/arch/ia64/kernel/kprobes.c
@@ -28,7 +28,6 @@
 #include <linux/ptrace.h>
 #include <linux/string.h>
 #include <linux/slab.h>
-#include <linux/preempt.h>
 #include <linux/moduleloader.h>
 
 #include <asm/pgtable.h>
@@ -387,12 +386,11 @@ int __kprobes trampoline_probe_handler(s
 
 	reset_current_kprobe();
 	spin_unlock_irqrestore(&kretprobe_lock, flags);
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 
 	/*
-	 * By returning a non-zero value, we are telling
-	 * kprobe_handler() that we don't want the post_handler
-	 * to run (and have re-enabled preemption)
+	 * By returning a non-zero value, we are telling kprobe_handler()
+	 * that we don't want the post_handler to run
 	 */
 	return 1;
 }
@@ -602,11 +600,8 @@ static int __kprobes pre_kprobes_handler
 	kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
 	struct kprobe_ctlblk *kcb;
 
-	/*
-	 * We don't want to be preempted for the entire
-	 * duration of kprobe processing
-	 */
-	preempt_disable();
+	/* Make the entire duration of kprobe processing RCU safe */
+	rcu_read_lock();
 	kcb = get_kprobe_ctlblk();
 
 	/* Handle recursion cases */
@@ -686,7 +681,7 @@ ss_probe:
 	return 1;
 
 no_kprobe:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 	return ret;
 }
 
@@ -713,7 +708,7 @@ static int __kprobes post_kprobes_handle
 	reset_current_kprobe();
 
 out:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 	return 1;
 }
 
@@ -728,7 +723,7 @@ static int __kprobes kprobes_fault_handl
 	if (kcb->kprobe_status & KPROBE_HIT_SS) {
 		resume_execution(cur, regs);
 		reset_current_kprobe();
-		preempt_enable_no_resched();
+		rcu_read_unlock();
 	}
 
 	return 0;
@@ -755,11 +750,11 @@ int __kprobes kprobe_exceptions_notify(s
 		break;
 	case DIE_PAGE_FAULT:
 		/* kprobe_running() needs smp_processor_id() */
-		preempt_disable();
+		rcu_read_lock();
 		if (kprobe_running() &&
 			kprobes_fault_handler(args->regs, args->trapnr))
 			ret = NOTIFY_STOP;
-		preempt_enable();
+		rcu_read_unlock();
 	default:
 		break;
 	}
@@ -851,7 +846,7 @@ int __kprobes longjmp_break_handler(stru
 		bytes );
 	invalidate_stacked_regs();
 
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 	return 1;
 }
 
Index: linux-2.6.16-rc6/arch/powerpc/kernel/kprobes.c
===================================================================
--- linux-2.6.16-rc6.orig/arch/powerpc/kernel/kprobes.c
+++ linux-2.6.16-rc6/arch/powerpc/kernel/kprobes.c
@@ -30,7 +30,6 @@
 #include <linux/config.h>
 #include <linux/kprobes.h>
 #include <linux/ptrace.h>
-#include <linux/preempt.h>
 #include <asm/cacheflush.h>
 #include <asm/kdebug.h>
 #include <asm/sstep.h>
@@ -147,11 +146,8 @@ static inline int kprobe_handler(struct 
 	unsigned int *addr = (unsigned int *)regs->nip;
 	struct kprobe_ctlblk *kcb;
 
-	/*
-	 * We don't want to be preempted for the entire
-	 * duration of kprobe processing
-	 */
-	preempt_disable();
+	/* Make the entire duration of kprobe processing RCU safe */
+	rcu_read_lock();
 	kcb = get_kprobe_ctlblk();
 
 	/* Check we're not actually recursing */
@@ -235,7 +231,7 @@ ss_probe:
 	return 1;
 
 no_kprobe:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 	return ret;
 }
 
@@ -304,12 +300,11 @@ int __kprobes trampoline_probe_handler(s
 
 	reset_current_kprobe();
 	spin_unlock_irqrestore(&kretprobe_lock, flags);
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 
         /*
-         * By returning a non-zero value, we are telling
-         * kprobe_handler() that we don't want the post_handler
-         * to run (and have re-enabled preemption)
+         * By returning a non-zero value, we are telling kprobe_handler()
+	 * that we don't want the post_handler to run
          */
         return 1;
 }
@@ -356,7 +351,7 @@ static inline int post_kprobe_handler(st
 	}
 	reset_current_kprobe();
 out:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 
 	/*
 	 * if somebody else is singlestepping across a probe point, msr
@@ -383,7 +378,7 @@ static inline int kprobe_fault_handler(s
 		regs->msr |= kcb->kprobe_saved_msr;
 
 		reset_current_kprobe();
-		preempt_enable_no_resched();
+		rcu_read_unlock();
 	}
 	return 0;
 }
@@ -408,11 +403,11 @@ int __kprobes kprobe_exceptions_notify(s
 		break;
 	case DIE_PAGE_FAULT:
 		/* kprobe_running() needs smp_processor_id() */
-		preempt_disable();
+		rcu_read_lock();
 		if (kprobe_running() &&
 		    kprobe_fault_handler(args->regs, args->trapnr))
 			ret = NOTIFY_STOP;
-		preempt_enable();
+		rcu_read_unlock();
 		break;
 	default:
 		break;
@@ -453,7 +448,7 @@ int __kprobes longjmp_break_handler(stru
 	 * saved regs...
 	 */
 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 	return 1;
 }
 
Index: linux-2.6.16-rc6/arch/sparc64/kernel/kprobes.c
===================================================================
--- linux-2.6.16-rc6.orig/arch/sparc64/kernel/kprobes.c
+++ linux-2.6.16-rc6/arch/sparc64/kernel/kprobes.c
@@ -107,11 +107,8 @@ static int __kprobes kprobe_handler(stru
 	int ret = 0;
 	struct kprobe_ctlblk *kcb;
 
-	/*
-	 * We don't want to be preempted for the entire
-	 * duration of kprobe processing
-	 */
-	preempt_disable();
+	/* Make the entire duration of kprobe processing RCU safe */
+	rcu_read_lock();
 	kcb = get_kprobe_ctlblk();
 
 	if (kprobe_running()) {
@@ -177,7 +174,7 @@ ss_probe:
 	return 1;
 
 no_kprobe:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 	return ret;
 }
 
@@ -293,7 +290,7 @@ static inline int post_kprobe_handler(st
 	}
 	reset_current_kprobe();
 out:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 
 	return 1;
 }
@@ -310,7 +307,7 @@ static inline int kprobe_fault_handler(s
 		resume_execution(cur, regs, kcb);
 
 		reset_current_kprobe();
-		preempt_enable_no_resched();
+		rcu_read_unlock();
 	}
 	return 0;
 }
@@ -336,11 +333,11 @@ int __kprobes kprobe_exceptions_notify(s
 	case DIE_GPF:
 	case DIE_PAGE_FAULT:
 		/* kprobe_running() needs smp_processor_id() */
-		preempt_disable();
+		rcu_read_lock();
 		if (kprobe_running() &&
 		    kprobe_fault_handler(args->regs, args->trapnr))
 			ret = NOTIFY_STOP;
-		preempt_enable();
+		rcu_read_unlock();
 		break;
 	default:
 		break;
@@ -430,7 +427,7 @@ int __kprobes longjmp_break_handler(stru
 		       &(kcb->jprobe_saved_stack),
 		       sizeof(kcb->jprobe_saved_stack));
 
-		preempt_enable_no_resched();
+		rcu_read_unlock();
 		return 1;
 	}
 	return 0;
Index: linux-2.6.16-rc6/arch/x86_64/kernel/kprobes.c
===================================================================
--- linux-2.6.16-rc6.orig/arch/x86_64/kernel/kprobes.c
+++ linux-2.6.16-rc6/arch/x86_64/kernel/kprobes.c
@@ -36,7 +36,6 @@
 #include <linux/ptrace.h>
 #include <linux/string.h>
 #include <linux/slab.h>
-#include <linux/preempt.h>
 
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
@@ -292,11 +291,8 @@ int __kprobes kprobe_handler(struct pt_r
 	kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
 	struct kprobe_ctlblk *kcb;
 
-	/*
-	 * We don't want to be preempted for the entire
-	 * duration of kprobe processing
-	 */
-	preempt_disable();
+	/* Make the entire duration of kprobe processing RCU safe */
+	rcu_read_lock();
 	kcb = get_kprobe_ctlblk();
 
 	/* Check we're not actually recursing */
@@ -383,7 +379,7 @@ ss_probe:
 	return 1;
 
 no_kprobe:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 	return ret;
 }
 
@@ -451,12 +447,11 @@ int __kprobes trampoline_probe_handler(s
 
 	reset_current_kprobe();
 	spin_unlock_irqrestore(&kretprobe_lock, flags);
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 
         /*
-         * By returning a non-zero value, we are telling
-         * kprobe_handler() that we don't want the post_handler
-	 * to run (and have re-enabled preemption)
+         * By returning a non-zero value, we are telling kprobe_handler()
+	 * that we don't want the post_handler to run
          */
         return 1;
 }
@@ -561,7 +556,7 @@ int __kprobes post_kprobe_handler(struct
 	}
 	reset_current_kprobe();
 out:
-	preempt_enable_no_resched();
+	rcu_read_unlock();
 
 	/*
 	 * if somebody else is singlestepping across a probe point, eflags
@@ -587,7 +582,7 @@ int __kprobes kprobe_fault_handler(struc
 		regs->eflags |= kcb->kprobe_old_rflags;
 
 		reset_current_kprobe();
-		preempt_enable_no_resched();
+		rcu_read_unlock();
 	}
 	return 0;
 }
@@ -613,11 +608,11 @@ int __kprobes kprobe_exceptions_notify(s
 	case DIE_GPF:
 	case DIE_PAGE_FAULT:
 		/* kprobe_running() needs smp_processor_id() */
-		preempt_disable();
+		rcu_read_lock();
 		if (kprobe_running() &&
 		    kprobe_fault_handler(args->regs, args->trapnr))
 			ret = NOTIFY_STOP;
-		preempt_enable();
+		rcu_read_unlock();
 		break;
 	default:
 		break;
@@ -683,7 +678,7 @@ int __kprobes longjmp_break_handler(stru
 		*regs = kcb->jprobe_saved_regs;
 		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
 		       MIN_STACK_SIZE(stack_addr));
-		preempt_enable_no_resched();
+		rcu_read_unlock();
 		return 1;
 	}
 	return 0;

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [RFC/PATCH] replace preempt_* calls with rcu_read_* variants
  2006-03-14 13:44 [RFC/PATCH] replace preempt_* calls with rcu_read_* variants Ananth N Mavinakayanahalli
@ 2006-03-14 14:00 ` Mathieu Desnoyers
  2006-03-14 22:12 ` Keshavamurthy Anil S
  1 sibling, 0 replies; 3+ messages in thread
From: Mathieu Desnoyers @ 2006-03-14 14:00 UTC (permalink / raw)
  To: Ananth N Mavinakayanahalli; +Cc: SystemTAP

Just beware that preempt_enable() should not be called from the scheduler, as it
can trigger a wakeup and cause a deadlock.

Mathieu

* Ananth N Mavinakayanahalli (ananth@in.ibm.com) wrote:
> Hi,
> 
> Can somebody please remind me why we are not using rcu_read_unlock/lock()
> to RCU protect the kprobe handlers, instead, are using
> preempt_disable/enable()?
> 
> We do use preempt_enable_no_resched() in the code. Is there a fallout if
> we check need_resched()?
> 
> Realistically, I'd like us to adhere to the RCU locking framework. The
> RT changes are trickling in to the mainline and it includes changes to
> the RCU infrastructure. So, read_lock_rcu() which, as of now is just a
> preempt_disable(), may change to something more than that in the future.
> 
> Basically, a plain preempt_disable/enable() may not protect us in the
> RCU read side critical section for much longer.
> 
> The following patch is against 2.6.16-rc6 - tested on IA32.
> 
> Comments? Thoughts?
> 
> Ananth
> ---
> 
> Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
> 
> 
>  arch/i386/kernel/kprobes.c    |   27 +++++++++++----------------
>  arch/ia64/kernel/kprobes.c    |   27 +++++++++++----------------
>  arch/powerpc/kernel/kprobes.c |   27 +++++++++++----------------
>  arch/sparc64/kernel/kprobes.c |   19 ++++++++-----------
>  arch/x86_64/kernel/kprobes.c  |   27 +++++++++++----------------
>  5 files changed, 52 insertions(+), 75 deletions(-)
> 
> Index: linux-2.6.16-rc6/arch/i386/kernel/kprobes.c
> ===================================================================
> --- linux-2.6.16-rc6.orig/arch/i386/kernel/kprobes.c
> +++ linux-2.6.16-rc6/arch/i386/kernel/kprobes.c
> @@ -31,7 +31,6 @@
>  #include <linux/config.h>
>  #include <linux/kprobes.h>
>  #include <linux/ptrace.h>
> -#include <linux/preempt.h>
>  #include <asm/cacheflush.h>
>  #include <asm/kdebug.h>
>  #include <asm/desc.h>
> @@ -159,11 +158,8 @@ static int __kprobes kprobe_handler(stru
>  	unsigned long *lp;
>  	struct kprobe_ctlblk *kcb;
>  
> -	/*
> -	 * We don't want to be preempted for the entire
> -	 * duration of kprobe processing
> -	 */
> -	preempt_disable();
> +	/* Make entire duration of kprobe processing RCU safe */
> +	rcu_read_lock();
>  	kcb = get_kprobe_ctlblk();
>  
>  	/* Check if the application is using LDT entry for its code segment and
> @@ -258,7 +254,7 @@ ss_probe:
>  	return 1;
>  
>  no_kprobe:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  	return ret;
>  }
>  
> @@ -326,12 +322,11 @@ int __kprobes trampoline_probe_handler(s
>  
>  	reset_current_kprobe();
>  	spin_unlock_irqrestore(&kretprobe_lock, flags);
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  
>  	/*
> -	 * By returning a non-zero value, we are telling
> -	 * kprobe_handler() that we don't want the post_handler
> -	 * to run (and have re-enabled preemption)
> +	 * By returning a non-zero value, we are telling kprobe_handler()
> +	 * that we don't want the post_handler to run
>  	 */
>          return 1;
>  }
> @@ -435,7 +430,7 @@ static inline int post_kprobe_handler(st
>  	}
>  	reset_current_kprobe();
>  out:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  
>  	/*
>  	 * if somebody else is singlestepping across a probe point, eflags
> @@ -461,7 +456,7 @@ static inline int kprobe_fault_handler(s
>  		regs->eflags |= kcb->kprobe_old_eflags;
>  
>  		reset_current_kprobe();
> -		preempt_enable_no_resched();
> +		rcu_read_unlock();
>  	}
>  	return 0;
>  }
> @@ -487,11 +482,11 @@ int __kprobes kprobe_exceptions_notify(s
>  	case DIE_GPF:
>  	case DIE_PAGE_FAULT:
>  		/* kprobe_running() needs smp_processor_id() */
> -		preempt_disable();
> +		rcu_read_lock();
>  		if (kprobe_running() &&
>  		    kprobe_fault_handler(args->regs, args->trapnr))
>  			ret = NOTIFY_STOP;
> -		preempt_enable();
> +		rcu_read_unlock();
>  		break;
>  	default:
>  		break;
> @@ -558,7 +553,7 @@ int __kprobes longjmp_break_handler(stru
>  		*regs = kcb->jprobe_saved_regs;
>  		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
>  		       MIN_STACK_SIZE(stack_addr));
> -		preempt_enable_no_resched();
> +		rcu_read_unlock();
>  		return 1;
>  	}
>  	return 0;
> Index: linux-2.6.16-rc6/arch/ia64/kernel/kprobes.c
> ===================================================================
> --- linux-2.6.16-rc6.orig/arch/ia64/kernel/kprobes.c
> +++ linux-2.6.16-rc6/arch/ia64/kernel/kprobes.c
> @@ -28,7 +28,6 @@
>  #include <linux/ptrace.h>
>  #include <linux/string.h>
>  #include <linux/slab.h>
> -#include <linux/preempt.h>
>  #include <linux/moduleloader.h>
>  
>  #include <asm/pgtable.h>
> @@ -387,12 +386,11 @@ int __kprobes trampoline_probe_handler(s
>  
>  	reset_current_kprobe();
>  	spin_unlock_irqrestore(&kretprobe_lock, flags);
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  
>  	/*
> -	 * By returning a non-zero value, we are telling
> -	 * kprobe_handler() that we don't want the post_handler
> -	 * to run (and have re-enabled preemption)
> +	 * By returning a non-zero value, we are telling kprobe_handler()
> +	 * that we don't want the post_handler to run
>  	 */
>  	return 1;
>  }
> @@ -602,11 +600,8 @@ static int __kprobes pre_kprobes_handler
>  	kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
>  	struct kprobe_ctlblk *kcb;
>  
> -	/*
> -	 * We don't want to be preempted for the entire
> -	 * duration of kprobe processing
> -	 */
> -	preempt_disable();
> +	/* Make the entire duration of kprobe processing RCU safe */
> +	rcu_read_lock();
>  	kcb = get_kprobe_ctlblk();
>  
>  	/* Handle recursion cases */
> @@ -686,7 +681,7 @@ ss_probe:
>  	return 1;
>  
>  no_kprobe:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  	return ret;
>  }
>  
> @@ -713,7 +708,7 @@ static int __kprobes post_kprobes_handle
>  	reset_current_kprobe();
>  
>  out:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  	return 1;
>  }
>  
> @@ -728,7 +723,7 @@ static int __kprobes kprobes_fault_handl
>  	if (kcb->kprobe_status & KPROBE_HIT_SS) {
>  		resume_execution(cur, regs);
>  		reset_current_kprobe();
> -		preempt_enable_no_resched();
> +		rcu_read_unlock();
>  	}
>  
>  	return 0;
> @@ -755,11 +750,11 @@ int __kprobes kprobe_exceptions_notify(s
>  		break;
>  	case DIE_PAGE_FAULT:
>  		/* kprobe_running() needs smp_processor_id() */
> -		preempt_disable();
> +		rcu_read_lock();
>  		if (kprobe_running() &&
>  			kprobes_fault_handler(args->regs, args->trapnr))
>  			ret = NOTIFY_STOP;
> -		preempt_enable();
> +		rcu_read_unlock();
>  	default:
>  		break;
>  	}
> @@ -851,7 +846,7 @@ int __kprobes longjmp_break_handler(stru
>  		bytes );
>  	invalidate_stacked_regs();
>  
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  	return 1;
>  }
>  
> Index: linux-2.6.16-rc6/arch/powerpc/kernel/kprobes.c
> ===================================================================
> --- linux-2.6.16-rc6.orig/arch/powerpc/kernel/kprobes.c
> +++ linux-2.6.16-rc6/arch/powerpc/kernel/kprobes.c
> @@ -30,7 +30,6 @@
>  #include <linux/config.h>
>  #include <linux/kprobes.h>
>  #include <linux/ptrace.h>
> -#include <linux/preempt.h>
>  #include <asm/cacheflush.h>
>  #include <asm/kdebug.h>
>  #include <asm/sstep.h>
> @@ -147,11 +146,8 @@ static inline int kprobe_handler(struct 
>  	unsigned int *addr = (unsigned int *)regs->nip;
>  	struct kprobe_ctlblk *kcb;
>  
> -	/*
> -	 * We don't want to be preempted for the entire
> -	 * duration of kprobe processing
> -	 */
> -	preempt_disable();
> +	/* Make the entire duration of kprobe processing RCU safe */
> +	rcu_read_lock();
>  	kcb = get_kprobe_ctlblk();
>  
>  	/* Check we're not actually recursing */
> @@ -235,7 +231,7 @@ ss_probe:
>  	return 1;
>  
>  no_kprobe:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  	return ret;
>  }
>  
> @@ -304,12 +300,11 @@ int __kprobes trampoline_probe_handler(s
>  
>  	reset_current_kprobe();
>  	spin_unlock_irqrestore(&kretprobe_lock, flags);
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  
>          /*
> -         * By returning a non-zero value, we are telling
> -         * kprobe_handler() that we don't want the post_handler
> -         * to run (and have re-enabled preemption)
> +         * By returning a non-zero value, we are telling kprobe_handler()
> +	 * that we don't want the post_handler to run
>           */
>          return 1;
>  }
> @@ -356,7 +351,7 @@ static inline int post_kprobe_handler(st
>  	}
>  	reset_current_kprobe();
>  out:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  
>  	/*
>  	 * if somebody else is singlestepping across a probe point, msr
> @@ -383,7 +378,7 @@ static inline int kprobe_fault_handler(s
>  		regs->msr |= kcb->kprobe_saved_msr;
>  
>  		reset_current_kprobe();
> -		preempt_enable_no_resched();
> +		rcu_read_unlock();
>  	}
>  	return 0;
>  }
> @@ -408,11 +403,11 @@ int __kprobes kprobe_exceptions_notify(s
>  		break;
>  	case DIE_PAGE_FAULT:
>  		/* kprobe_running() needs smp_processor_id() */
> -		preempt_disable();
> +		rcu_read_lock();
>  		if (kprobe_running() &&
>  		    kprobe_fault_handler(args->regs, args->trapnr))
>  			ret = NOTIFY_STOP;
> -		preempt_enable();
> +		rcu_read_unlock();
>  		break;
>  	default:
>  		break;
> @@ -453,7 +448,7 @@ int __kprobes longjmp_break_handler(stru
>  	 * saved regs...
>  	 */
>  	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  	return 1;
>  }
>  
> Index: linux-2.6.16-rc6/arch/sparc64/kernel/kprobes.c
> ===================================================================
> --- linux-2.6.16-rc6.orig/arch/sparc64/kernel/kprobes.c
> +++ linux-2.6.16-rc6/arch/sparc64/kernel/kprobes.c
> @@ -107,11 +107,8 @@ static int __kprobes kprobe_handler(stru
>  	int ret = 0;
>  	struct kprobe_ctlblk *kcb;
>  
> -	/*
> -	 * We don't want to be preempted for the entire
> -	 * duration of kprobe processing
> -	 */
> -	preempt_disable();
> +	/* Make the entire duration of kprobe processing RCU safe */
> +	rcu_read_lock();
>  	kcb = get_kprobe_ctlblk();
>  
>  	if (kprobe_running()) {
> @@ -177,7 +174,7 @@ ss_probe:
>  	return 1;
>  
>  no_kprobe:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  	return ret;
>  }
>  
> @@ -293,7 +290,7 @@ static inline int post_kprobe_handler(st
>  	}
>  	reset_current_kprobe();
>  out:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  
>  	return 1;
>  }
> @@ -310,7 +307,7 @@ static inline int kprobe_fault_handler(s
>  		resume_execution(cur, regs, kcb);
>  
>  		reset_current_kprobe();
> -		preempt_enable_no_resched();
> +		rcu_read_unlock();
>  	}
>  	return 0;
>  }
> @@ -336,11 +333,11 @@ int __kprobes kprobe_exceptions_notify(s
>  	case DIE_GPF:
>  	case DIE_PAGE_FAULT:
>  		/* kprobe_running() needs smp_processor_id() */
> -		preempt_disable();
> +		rcu_read_lock();
>  		if (kprobe_running() &&
>  		    kprobe_fault_handler(args->regs, args->trapnr))
>  			ret = NOTIFY_STOP;
> -		preempt_enable();
> +		rcu_read_unlock();
>  		break;
>  	default:
>  		break;
> @@ -430,7 +427,7 @@ int __kprobes longjmp_break_handler(stru
>  		       &(kcb->jprobe_saved_stack),
>  		       sizeof(kcb->jprobe_saved_stack));
>  
> -		preempt_enable_no_resched();
> +		rcu_read_unlock();
>  		return 1;
>  	}
>  	return 0;
> Index: linux-2.6.16-rc6/arch/x86_64/kernel/kprobes.c
> ===================================================================
> --- linux-2.6.16-rc6.orig/arch/x86_64/kernel/kprobes.c
> +++ linux-2.6.16-rc6/arch/x86_64/kernel/kprobes.c
> @@ -36,7 +36,6 @@
>  #include <linux/ptrace.h>
>  #include <linux/string.h>
>  #include <linux/slab.h>
> -#include <linux/preempt.h>
>  
>  #include <asm/cacheflush.h>
>  #include <asm/pgtable.h>
> @@ -292,11 +291,8 @@ int __kprobes kprobe_handler(struct pt_r
>  	kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
>  	struct kprobe_ctlblk *kcb;
>  
> -	/*
> -	 * We don't want to be preempted for the entire
> -	 * duration of kprobe processing
> -	 */
> -	preempt_disable();
> +	/* Make the entire duration of kprobe processing RCU safe */
> +	rcu_read_lock();
>  	kcb = get_kprobe_ctlblk();
>  
>  	/* Check we're not actually recursing */
> @@ -383,7 +379,7 @@ ss_probe:
>  	return 1;
>  
>  no_kprobe:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  	return ret;
>  }
>  
> @@ -451,12 +447,11 @@ int __kprobes trampoline_probe_handler(s
>  
>  	reset_current_kprobe();
>  	spin_unlock_irqrestore(&kretprobe_lock, flags);
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  
>          /*
> -         * By returning a non-zero value, we are telling
> -         * kprobe_handler() that we don't want the post_handler
> -	 * to run (and have re-enabled preemption)
> +         * By returning a non-zero value, we are telling kprobe_handler()
> +	 * that we don't want the post_handler to run
>           */
>          return 1;
>  }
> @@ -561,7 +556,7 @@ int __kprobes post_kprobe_handler(struct
>  	}
>  	reset_current_kprobe();
>  out:
> -	preempt_enable_no_resched();
> +	rcu_read_unlock();
>  
>  	/*
>  	 * if somebody else is singlestepping across a probe point, eflags
> @@ -587,7 +582,7 @@ int __kprobes kprobe_fault_handler(struc
>  		regs->eflags |= kcb->kprobe_old_rflags;
>  
>  		reset_current_kprobe();
> -		preempt_enable_no_resched();
> +		rcu_read_unlock();
>  	}
>  	return 0;
>  }
> @@ -613,11 +608,11 @@ int __kprobes kprobe_exceptions_notify(s
>  	case DIE_GPF:
>  	case DIE_PAGE_FAULT:
>  		/* kprobe_running() needs smp_processor_id() */
> -		preempt_disable();
> +		rcu_read_lock();
>  		if (kprobe_running() &&
>  		    kprobe_fault_handler(args->regs, args->trapnr))
>  			ret = NOTIFY_STOP;
> -		preempt_enable();
> +		rcu_read_unlock();
>  		break;
>  	default:
>  		break;
> @@ -683,7 +678,7 @@ int __kprobes longjmp_break_handler(stru
>  		*regs = kcb->jprobe_saved_regs;
>  		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
>  		       MIN_STACK_SIZE(stack_addr));
> -		preempt_enable_no_resched();
> +		rcu_read_unlock();
>  		return 1;
>  	}
>  	return 0;
> 
OpenPGP public key:              http://krystal.dyndns.org:8080/key/compudj.gpg
Key fingerprint:     8CD5 52C3 8E3C 4140 715F  BA06 3F25 A8FE 3BAE 9A68 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [RFC/PATCH] replace preempt_* calls with rcu_read_* variants
  2006-03-14 13:44 [RFC/PATCH] replace preempt_* calls with rcu_read_* variants Ananth N Mavinakayanahalli
  2006-03-14 14:00 ` Mathieu Desnoyers
@ 2006-03-14 22:12 ` Keshavamurthy Anil S
  1 sibling, 0 replies; 3+ messages in thread
From: Keshavamurthy Anil S @ 2006-03-14 22:12 UTC (permalink / raw)
  To: Ananth N Mavinakayanahalli; +Cc: SystemTAP

On Tue, Mar 14, 2006 at 05:44:15AM -0800, Ananth N Mavinakayanahalli wrote:
> 
>    Hi,
> 
>    Can    somebody    please   remind   me   why   we   are   not   using
>    rcu_read_unlock/lock()
>    to RCU protect the kprobe handlers, instead, are using
>    preempt_disable/enable()?

"In some realtime-friendly RCU implementations, the RCU read-side critical
sections do not disable preemption. So, if you are using synchronize_sched()
on the update side, you need to use preempt_disable() (or some other
primitive that disables preemption, for example, any of the primitives
that disables hardware interrupts) on the read side."

http://www.ussg.iu.edu/hypermail/linux/kernel/0510.2/0537.html

-thanks,
-Anil

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2006-03-14 22:12 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-03-14 13:44 [RFC/PATCH] replace preempt_* calls with rcu_read_* variants Ananth N Mavinakayanahalli
2006-03-14 14:00 ` Mathieu Desnoyers
2006-03-14 22:12 ` Keshavamurthy Anil S

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).