public inbox for systemtap@sourceware.org
 help / color / mirror / Atom feed
From: Frederic Weisbecker <fweisbec@gmail.com>
To: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>,
		Ananth N Mavinakayanahalli <ananth@in.ibm.com>,
		lkml <linux-kernel@vger.kernel.org>,
		systemtap <systemtap@sources.redhat.com>,
		DLE <dle-develop@lists.sourceforge.net>,
		Jim Keniston <jkenisto@us.ibm.com>,
		Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
		Christoph Hellwig <hch@infradead.org>,
		Steven Rostedt <rostedt@goodmis.org>,
		"H. Peter Anvin" <hpa@zytor.com>,
		Anders Kaseorg <andersk@ksplice.com>,
		Tim Abbott <tabbott@ksplice.com>,
	Andi Kleen <andi@firstfloor.org>,
		Jason Baron <jbaron@redhat.com>,
		Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Subject: Re: [PATCH -tip v5 03/10] kprobes: Introduce kprobes jump 	optimization
Date: Tue, 24 Nov 2009 02:44:00 -0000	[thread overview]
Message-ID: <20091124024417.GA6752@nowhere> (raw)
In-Reply-To: <20091123232141.22071.53317.stgit@dhcp-100-2-132.bos.redhat.com>

On Mon, Nov 23, 2009 at 06:21:41PM -0500, Masami Hiramatsu wrote:
> +config OPTPROBES
> +	bool "Kprobes jump optimization support (EXPERIMENTAL)"
> +	default y
> +	depends on KPROBES
> +	depends on !PREEMPT


Why does it depends on !PREEMPT?



> +	depends on HAVE_OPTPROBES
> +	select KALLSYMS_ALL
> +	help
> +	  This option will allow kprobes to optimize breakpoint to
> +	  a jump for reducing its overhead.
> +
>  config HAVE_EFFICIENT_UNALIGNED_ACCESS
>  	bool
>  	help
> @@ -99,6 +110,8 @@ config HAVE_KPROBES
>  config HAVE_KRETPROBES
>  	bool
>  
> +config HAVE_OPTPROBES
> +	bool
>  #
>  # An arch should select this if it provides all these things:
>  #
> diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
> index 1b672f7..aed1f95 100644
> --- a/include/linux/kprobes.h
> +++ b/include/linux/kprobes.h
> @@ -122,6 +122,11 @@ struct kprobe {
>  /* Kprobe status flags */
>  #define KPROBE_FLAG_GONE	1 /* breakpoint has already gone */
>  #define KPROBE_FLAG_DISABLED	2 /* probe is temporarily disabled */
> +#define KPROBE_FLAG_OPTIMIZED	4 /*
> +				   * probe is really optimized.
> +				   * NOTE:
> +				   * this flag is only for optimized_kprobe.
> +				   */
>  
>  /* Has this kprobe gone ? */
>  static inline int kprobe_gone(struct kprobe *p)
> @@ -134,6 +139,12 @@ static inline int kprobe_disabled(struct kprobe *p)
>  {
>  	return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
>  }
> +
> +/* Is this kprobe really running optimized path ? */
> +static inline int kprobe_optimized(struct kprobe *p)
> +{
> +	return p->flags & KPROBE_FLAG_OPTIMIZED;
> +}
>  /*
>   * Special probe type that uses setjmp-longjmp type tricks to resume
>   * execution at a specified entry with a matching prototype corresponding
> @@ -249,6 +260,31 @@ extern kprobe_opcode_t *get_insn_slot(void);
>  extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
>  extern void kprobes_inc_nmissed_count(struct kprobe *p);
>  
> +#ifdef CONFIG_OPTPROBES
> +/*
> + * Internal structure for direct jump optimized probe
> + */
> +struct optimized_kprobe {
> +	struct kprobe kp;
> +	struct list_head list;	/* list for optimizing queue */
> +	struct arch_optimized_insn optinsn;
> +};
> +
> +/* Architecture dependent functions for direct jump optimization */
> +extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
> +extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
> +extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op);
> +extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
> +extern int  arch_optimize_kprobe(struct optimized_kprobe *op);
> +extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
> +extern kprobe_opcode_t *get_optinsn_slot(void);
> +extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty);
> +extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
> +					unsigned long addr);
> +
> +extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
> +#endif /* CONFIG_OPTPROBES */
> +
>  /* Get the kprobe at this addr (if any) - called with preemption disabled */
>  struct kprobe *get_kprobe(void *addr);
>  void kretprobe_hash_lock(struct task_struct *tsk,
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index 10d2ed5..15aa797 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -44,6 +44,7 @@
>  #include <linux/debugfs.h>
>  #include <linux/kdebug.h>
>  #include <linux/memory.h>
> +#include <linux/cpu.h>
>  
>  #include <asm-generic/sections.h>
>  #include <asm/cacheflush.h>
> @@ -301,6 +302,31 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
>  	__free_insn_slot(&kprobe_insn_slots, slot, dirty);
>  	mutex_unlock(&kprobe_insn_mutex);
>  }
> +#ifdef CONFIG_OPTPROBES
> +/* For optimized_kprobe buffer */
> +static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
> +static struct kprobe_insn_cache kprobe_optinsn_slots = {
> +	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
> +	/* .insn_size is initialized later */
> +	.nr_garbage = 0,
> +};
> +/* Get a slot for optimized_kprobe buffer */
> +kprobe_opcode_t __kprobes *get_optinsn_slot(void)
> +{
> +	kprobe_opcode_t *ret = NULL;
> +	mutex_lock(&kprobe_optinsn_mutex);
> +	ret = __get_insn_slot(&kprobe_optinsn_slots);
> +	mutex_unlock(&kprobe_optinsn_mutex);
> +	return ret;
> +}



Just a small nano-neat: could you add a line between variable
declarations and the rest? And also just before the return?
It makes the code a bit easier to review.



> +
> +void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
> +{
> +	mutex_lock(&kprobe_optinsn_mutex);
> +	__free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
> +	mutex_unlock(&kprobe_optinsn_mutex);
> +}
> +#endif
>  #endif
>  
>  /* We have preemption disabled.. so it is safe to use __ versions */
> @@ -334,20 +360,270 @@ struct kprobe __kprobes *get_kprobe(void *addr)
>  	return NULL;
>  }
>  
> +static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
> +
> +/* Return true if the kprobe is an aggregator */
> +static inline int kprobe_aggrprobe(struct kprobe *p)
> +{
> +	return p->pre_handler == aggr_pre_handler;
> +}
> +
> +/*
> + * Keep all fields in the kprobe consistent
> + */
> +static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
> +{
> +	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
> +	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
> +}
> +
> +#ifdef CONFIG_OPTPROBES
> +/*
> + * Call all pre_handler on the list, but ignores its return value.
> + * This must be called from arch-dep optimized caller.
> + */
> +void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
> +{
> +	struct kprobe *kp;
> +
> +	list_for_each_entry_rcu(kp, &p->list, list) {
> +		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
> +			set_kprobe_instance(kp);
> +			kp->pre_handler(kp, regs);
> +		}
> +		reset_kprobe_instance();
> +	}
> +}
> +
> +/* Return true(!0) if the kprobe is ready for optimization. */
> +static inline int kprobe_optready(struct kprobe *p)
> +{
> +	struct optimized_kprobe *op;
> +	if (kprobe_aggrprobe(p)) {
> +		op = container_of(p, struct optimized_kprobe, kp);
> +		return arch_prepared_optinsn(&op->optinsn);
> +	}
> +	return 0;
> +}
> +
> +/* Return an optimized kprobe which replaces instructions including addr. */
> +struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
> +{
> +	int i;
> +	struct kprobe *p = NULL;
> +	struct optimized_kprobe *op;
> +	for (i = 0; !p && i < MAX_OPTIMIZED_LENGTH; i++)
> +		p = get_kprobe((void *)(addr - i));
> +
> +	if (p && kprobe_optready(p)) {
> +		op = container_of(p, struct optimized_kprobe, kp);
> +		if (arch_within_optimized_kprobe(op, addr))
> +			return p;
> +	}
> +	return NULL;
> +}
> +
> +/* Optimization staging list, protected by kprobe_mutex */
> +static LIST_HEAD(optimizing_list);
> +
> +static void kprobe_optimizer(struct work_struct *work);
> +static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
> +#define OPTIMIZE_DELAY 5
> +
> +/* Kprobe jump optimizer */
> +static __kprobes void kprobe_optimizer(struct work_struct *work)
> +{
> +	struct optimized_kprobe *op, *tmp;
> +
> +	/* Lock modules while optimizing kprobes */
> +	mutex_lock(&module_mutex);
> +	mutex_lock(&kprobe_mutex);
> +	if (kprobes_all_disarmed)
> +		goto end;
> +
> +	/* Wait quiesence period for ensuring all interrupts are done */
> +	synchronize_sched();



It's not clear to me why you are doing that.
Is this waiting for pending int 3 kprobes handlers
to complete? If so, why, and what does that prevent?

Also, why is it a delayed work? I'm not sure what we are
waiting for here.


> +
> +	get_online_cpus();	/* Use online_cpus while optimizing */



And this comment doesn't tell us much what this brings us.
The changelog tells it stands to avoid a text_mutex deadlock.
I'm not sure why we would deadlock without it.

Again, I think this dance with live patching protected
by int 3 only, which patching is in turn a necessary
stage before, is a complicated sequence that could be
simplified by choosing only one patching in stop_machine()
time.

  reply	other threads:[~2009-11-24  2:44 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-11-23 23:18 [PATCH -tip v5 00/10] kprobes: Kprobes jump optimization support Masami Hiramatsu
2009-11-23 23:18 ` [PATCH -tip v5 02/10] kprobes: Introduce generic insn_slot framework Masami Hiramatsu
2009-11-23 23:18 ` [PATCH -tip v5 01/10] kprobes/x86: Cleanup RELATIVEJUMP_INSTRUCTION to RELATIVEJUMP_OPCODE Masami Hiramatsu
2009-11-23 23:19 ` [PATCH -tip v5 06/10] kprobes/x86: Cleanup save/restore registers Masami Hiramatsu
2009-11-24  2:51   ` Frederic Weisbecker
2009-11-24 15:36     ` Masami Hiramatsu
2009-11-24 20:19       ` Frederic Weisbecker
2009-11-24 15:40     ` Frank Ch. Eigler
2009-11-24 20:20       ` Frederic Weisbecker
2009-11-23 23:19 ` [PATCH -tip v5 03/10] kprobes: Introduce kprobes jump optimization Masami Hiramatsu
2009-11-24  2:44   ` Frederic Weisbecker [this message]
2009-11-24  3:31     ` Frederic Weisbecker
2009-11-24 15:32       ` Masami Hiramatsu
2009-11-24 20:14         ` Frederic Weisbecker
2009-11-24 20:59           ` Masami Hiramatsu
2009-11-25 21:09             ` Steven Rostedt
2009-11-25 21:30               ` Masami Hiramatsu
2009-11-24 21:12           ` H. Peter Anvin
2009-11-24 15:31     ` Masami Hiramatsu
2009-11-24 19:45       ` Frederic Weisbecker
2009-11-24 21:15         ` Masami Hiramatsu
2009-11-23 23:19 ` [PATCH -tip v5 05/10] kprobes/x86: Boost probes when reentering Masami Hiramatsu
2009-11-23 23:19 ` [PATCH -tip v5 07/10] kprobes/x86: Support kprobes jump optimization on x86 Masami Hiramatsu
2009-11-24  3:15   ` Frederic Weisbecker
2009-11-24 16:27   ` Jason Baron
2009-11-24 17:46     ` Masami Hiramatsu
2009-11-25 16:12       ` Masami Hiramatsu
2009-11-24 16:42   ` H. Peter Anvin
2009-11-24 17:01     ` Masami Hiramatsu
2009-11-23 23:19 ` [PATCH -tip v5 04/10] kprobes: Jump optimization sysctl interface Masami Hiramatsu
2009-11-23 23:19 ` [PATCH -tip v5 09/10] [RFC] x86: Introduce generic jump patching without stop_machine Masami Hiramatsu
2009-11-23 23:19 ` [PATCH -tip v5 08/10] kprobes: Add documents of jump optimization Masami Hiramatsu
2009-11-23 23:19 ` [PATCH -tip v5 10/10] [RFC] kprobes/x86: Use text_poke_fixup() for " Masami Hiramatsu
2009-11-24  2:03 ` [PATCH -tip v5 00/10] kprobes: Kprobes jump optimization support Frederic Weisbecker
2009-11-24  3:20   ` Frederic Weisbecker
2009-11-24  7:53     ` Ingo Molnar
2009-11-24 16:03       ` Masami Hiramatsu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20091124024417.GA6752@nowhere \
    --to=fweisbec@gmail.com \
    --cc=ananth@in.ibm.com \
    --cc=andersk@ksplice.com \
    --cc=andi@firstfloor.org \
    --cc=dle-develop@lists.sourceforge.net \
    --cc=hch@infradead.org \
    --cc=hpa@zytor.com \
    --cc=jbaron@redhat.com \
    --cc=jkenisto@us.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@polymtl.ca \
    --cc=mhiramat@redhat.com \
    --cc=mingo@elte.hu \
    --cc=rostedt@goodmis.org \
    --cc=srikar@linux.vnet.ibm.com \
    --cc=systemtap@sources.redhat.com \
    --cc=tabbott@ksplice.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).