diff --git a/runtime/stp_utrace.c b/runtime/stp_utrace.c index bb2d663..d2d5b70 100644 --- a/runtime/stp_utrace.c +++ b/runtime/stp_utrace.c @@ -26,6 +26,7 @@ #include #include "stp_task_work.c" #include "linux/stp_tracepoint.h" +#include "syscall.h" #include "stp_helper_lock.h" @@ -116,6 +117,8 @@ static void utrace_report_exec(void *cb_data __attribute__ ((unused)), #define __UTRACE_REGISTERED 1 static atomic_t utrace_state = ATOMIC_INIT(__UTRACE_UNREGISTERED); +static int __stp_utrace_syscall_tracing = 0; + // If wake_up_state() is exported, use it. #if defined(STAPCONF_WAKE_UP_STATE_EXPORTED) #define stp_wake_up_state wake_up_state @@ -2064,6 +2067,23 @@ static void utrace_report_syscall_entry(void *cb_data __attribute__ ((unused)), if (atomic_read(&utrace_state) != __UTRACE_REGISTERED) return; + + /* If we're only doing syscall tracing for VMA tracking, then + * let's prefilter the syscall numbers that we need before + * calling the handlers. + * + * This allows us to avoid finding the utrace struct, which + * avoids some locking. */ + if (!__stp_utrace_syscall_tracing) { + long syscall_no = _stp_syscall_get_nr(task, regs); + if (syscall_no != MMAP_SYSCALL_NO(task) + && syscall_no != MMAP2_SYSCALL_NO(task) + && syscall_no != MPROTECT_SYSCALL_NO(task) + && syscall_no != MUNMAP_SYSCALL_NO(task)) + return; + } + + /* Grab the utrace struct for this task. */ utrace = task_utrace_struct(task); /* FIXME: Is this 100% correct? */ @@ -2103,6 +2123,23 @@ static void utrace_report_syscall_exit(void *cb_data __attribute__ ((unused)), if (atomic_read(&utrace_state) != __UTRACE_REGISTERED) return; + + /* If we're only doing syscall tracing for VMA tracking, then + * let's prefilter the syscall numbers that we need before + * calling the handlers. + * + * This allows us to avoid finding the utrace struct, which + * avoids some locking. */ + if (!__stp_utrace_syscall_tracing) { + long syscall_no = _stp_syscall_get_nr(task, regs); + if (syscall_no != MMAP_SYSCALL_NO(task) + && syscall_no != MMAP2_SYSCALL_NO(task) + && syscall_no != MPROTECT_SYSCALL_NO(task) + && syscall_no != MUNMAP_SYSCALL_NO(task)) + return; + } + + /* Grab the utrace struct for this task. */ utrace = task_utrace_struct(task); /* FIXME: Is this 100% correct? */ @@ -2469,4 +2506,9 @@ static void utrace_report_work(struct task_work *work) stp_task_work_func_done(); } +/* If this is called, we're doing utrace-based syscall tracing. */ +static void stap_utrace_syscall_tracing(void) +{ + __stp_utrace_syscall_tracing = 1; +} #endif /* _STP_UTRACE_C */ diff --git a/runtime/stp_utrace.h b/runtime/stp_utrace.h index 9f162bb..34968b1 100644 --- a/runtime/stp_utrace.h +++ b/runtime/stp_utrace.h @@ -329,4 +329,7 @@ static inline enum utrace_resume_action utrace_resume_action(u32 action) return action & UTRACE_RESUME_MASK; } +/* If this is called, we're doing utrace-based syscall tracing. */ +static void stap_utrace_syscall_tracing(void); + #endif /* _STP_UTRACE_H */ diff --git a/tapset-utrace.cxx b/tapset-utrace.cxx index 8580800..154c335 100644 --- a/tapset-utrace.cxx +++ b/tapset-utrace.cxx @@ -1208,6 +1208,12 @@ utrace_derived_probe_group::emit_module_linux_init (systemtap_session& s) return; s.op->newline() << "/* ---- utrace probes ---- */"; + if (flags_seen[UDPF_SYSCALL] || flags_seen[UDPF_SYSCALL_RETURN]) + { + s.op->newline() << "#if !defined(CONFIG_UTRACE)"; + s.op->newline() << "stap_utrace_syscall_tracing();"; + s.op->newline() << "#endif"; + } s.op->newline() << "for (i=0; inewline(1) << "struct stap_utrace_probe *p = &stap_utrace_probes[i];"; s.op->newline() << "probe_point = p->probe->pp;"; // for error messages