public inbox for systemtap@sourceware.org
 help / color / mirror / Atom feed
* [SYSTEMTAP/PATCH 3/4] rt : stp_utrace.c : replace utrace->lock with raw_lock
  2014-09-09  7:08 [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
  2014-09-09  7:08 ` [SYSTEMTAP/PATCH 1/4] rt : replace __stp_tf_task_work_list_lock to raw_ Santosh Shukla
@ 2014-09-09  7:08 ` Santosh Shukla
  2014-09-09  7:08 ` [SYSTEMTAP/PATCH 2/4] rt : replace read write lock with rcu Santosh Shukla
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Santosh Shukla @ 2014-09-09  7:08 UTC (permalink / raw)
  To: systemtap; +Cc: sshukla

In preempt-rt kernel, At the time of launching stap script noticed below
bug_on. Replacing spinlock with Raw fixes this problem.

[  159.433464] Preemption disabled at:[<ffffffff810b74d6>] remove_wait_queue+0x36/0x40

[  159.433466] CPU: 8 PID: 6723 Comm: bash Tainted: GF       W  O 3.14.12-rt-rt9+ #1
[  159.433467] Hardware name: Intel Corporation S2600CP/S2600CP, BIOS SE5C600.86B.02.01.0002.082220131453 08/22/2013
[  159.433471]  ffff88042d917d80 ffff88040f623d90 ffffffff81602b13 ffff8804121e3980
[  159.433474]  ffff88040f623da0 ffffffff815fd2fb ffff88040f623df8 ffffffff81606017
[  159.433478]  ffff88040f623fd8 0000000000017d80 0000000000017d80 ffffffff810bcbcb
[  159.433478] Call Trace:
[  159.433481]  [<ffffffff81602b13>] dump_stack+0x4e/0x7a
[  159.433484]  [<ffffffff815fd2fb>] __schedule_bug+0x9f/0xad
[  159.433486]  [<ffffffff81606017>] __schedule+0x627/0x6a0
[  159.433489]  [<ffffffff810bcbcb>] ? task_blocks_on_rt_mutex+0x19b/0x220
[  159.433492]  [<ffffffff816060c0>] schedule+0x30/0xa0
[  159.433495]  [<ffffffff81607a9d>] rt_spin_lock_slowlock+0xbd/0x1f0
[  159.433498]  [<ffffffff81608645>] rt_spin_lock+0x25/0x30
[  159.433503]  [<ffffffffa076fbf5>] start_report+0x45/0xb0 [stap_c108d00c22143294d42db713b804dbb9_10325]
[  159.433508]  [<ffffffffa0773e38>] utrace_report_syscall_exit+0x88/0x110 [stap_c108d00c22143294d42db713b804dbb9_10325]
[  159.433511]  [<ffffffff81023d30>] syscall_trace_leave+0x100/0x130
[  159.433514]  [<ffffffff8161114b>] int_check_syscall_exit_work+0x34/0x3d

Signed-off-by: Santosh Shukla <sshukla@mvista.com>
---
 runtime/stp_utrace.c |   82 +++++++++++++++++++++++++-------------------------
 1 file changed, 41 insertions(+), 41 deletions(-)

diff --git a/runtime/stp_utrace.c b/runtime/stp_utrace.c
index 0c9d0eb..b145baa 100644
--- a/runtime/stp_utrace.c
+++ b/runtime/stp_utrace.c
@@ -56,7 +56,7 @@
  * in time to have their callbacks seen.
  */
 struct utrace {
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	struct list_head attached, attaching;
 
 	struct utrace_engine *reporting;
@@ -332,7 +332,7 @@ static void utrace_cleanup(struct utrace *utrace)
 
 	/* Free engines associated with the struct utrace, starting
 	 * with the 'attached' list then doing the 'attaching' list. */
-	spin_lock(&utrace->lock);
+	raw_spin_lock(&utrace->lock);
 	list_for_each_entry_safe(engine, next, &utrace->attached, entry) {
 #ifdef STP_TF_DEBUG
 	    printk(KERN_ERR "%s:%d - removing engine\n",
@@ -365,7 +365,7 @@ static void utrace_cleanup(struct utrace *utrace)
 				: "UNKNOWN"));
 		utrace->report_work_added = 0;
 	}
-	spin_unlock(&utrace->lock);
+	raw_spin_unlock(&utrace->lock);
 
 	/* Free the struct utrace itself. */
 	kmem_cache_free(utrace_cachep, utrace);
@@ -460,7 +460,7 @@ static bool utrace_task_alloc(struct task_struct *task)
 
 	if (unlikely(!utrace))
 		return false;
-	spin_lock_init(&utrace->lock);
+	raw_spin_lock_init(&utrace->lock);
 	INIT_LIST_HEAD(&utrace->attached);
 	INIT_LIST_HEAD(&utrace->attaching);
 	utrace->resume = UTRACE_RESUME;
@@ -501,7 +501,7 @@ static void utrace_free(struct utrace *utrace)
 	spin_unlock(&task_utrace_lock);
 
 	/* Free the utrace struct. */
-	spin_lock(&utrace->lock);
+	raw_spin_lock(&utrace->lock);
 #ifdef STP_TF_DEBUG
 	if (unlikely(utrace->reporting)
 	    || unlikely(!list_empty(&utrace->attached))
@@ -530,7 +530,7 @@ static void utrace_free(struct utrace *utrace)
 				: "UNKNOWN"));
 		utrace->report_work_added = 0;
 	}
-	spin_unlock(&utrace->lock);
+	raw_spin_unlock(&utrace->lock);
 
 	kmem_cache_free(utrace_cachep, utrace);
 }
@@ -606,7 +606,7 @@ static int utrace_add_engine(struct task_struct *target,
 {
 	int ret;
 
-	spin_lock(&utrace->lock);
+	raw_spin_lock(&utrace->lock);
 
 	ret = -EEXIST;
 	if ((flags & UTRACE_ATTACH_EXCLUSIVE) &&
@@ -654,7 +654,7 @@ static int utrace_add_engine(struct task_struct *target,
 	utrace_engine_get(engine);
 	ret = 0;
 unlock:
-	spin_unlock(&utrace->lock);
+	raw_spin_unlock(&utrace->lock);
 
 	return ret;
 }
@@ -703,11 +703,11 @@ static struct utrace_engine *utrace_attach_task(
 	if (!(flags & UTRACE_ATTACH_CREATE)) {
 		if (unlikely(!utrace))
 			return ERR_PTR(-ENOENT);
-		spin_lock(&utrace->lock);
+		raw_spin_lock(&utrace->lock);
 		engine = find_matching_engine(utrace, flags, ops, data);
 		if (engine)
 			utrace_engine_get(engine);
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 		return engine ?: ERR_PTR(-ENOENT);
 	}
 
@@ -823,14 +823,14 @@ static struct utrace *get_utrace_lock(struct task_struct *target,
 	}
 
 	utrace = task_utrace_struct(target);
-	spin_lock(&utrace->lock);
+	raw_spin_lock(&utrace->lock);
 	if (unlikely(utrace->reap) || unlikely(!engine->ops) ||
 	    unlikely(engine->ops == &utrace_detached_ops)) {
 		/*
 		 * By the time we got the utrace lock,
 		 * it had been reaped or detached already.
 		 */
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 		utrace = ERR_PTR(-ESRCH);
 		if (!attached && engine->ops == &utrace_detached_ops)
 			utrace = ERR_PTR(-ERESTARTSYS);
@@ -992,7 +992,7 @@ static int utrace_set_events(struct task_struct *target,
 			ret = -EINPROGRESS;
 	}
 unlock:
-	spin_unlock(&utrace->lock);
+	raw_spin_unlock(&utrace->lock);
 
 	return ret;
 }
@@ -1138,7 +1138,7 @@ static bool utrace_reset(struct task_struct *task, struct utrace *utrace)
 	 */
 	rcu_read_lock();
 	utrace->utrace_flags = flags;
-	spin_unlock(&utrace->lock);
+	raw_spin_unlock(&utrace->lock);
 	rcu_read_unlock();
 
 	put_detached_list(&detached);
@@ -1154,7 +1154,7 @@ static void utrace_finish_stop(void)
 	 */
 	if (unlikely(__fatal_signal_pending(current))) {
 		struct utrace *utrace = task_utrace_struct(current);
-		spin_unlock_wait(&utrace->lock);
+		raw_spin_unlock_wait(&utrace->lock);
 	}
 }
 
@@ -1168,7 +1168,7 @@ static void utrace_stop(struct task_struct *task, struct utrace *utrace,
 			enum utrace_resume_action action)
 {
 relock:
-	spin_lock(&utrace->lock);
+	raw_spin_lock(&utrace->lock);
 
 	if (action < utrace->resume) {
 		/*
@@ -1215,7 +1215,7 @@ relock:
 
 	if (unlikely(__fatal_signal_pending(task))) {
 		spin_unlock_irq(&task->sighand->siglock);
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 		return;
 	}
 
@@ -1230,7 +1230,7 @@ relock:
 		task->signal->flags = SIGNAL_STOP_STOPPED;
 
 	spin_unlock_irq(&task->sighand->siglock);
-	spin_unlock(&utrace->lock);
+	raw_spin_unlock(&utrace->lock);
 
 	schedule();
 
@@ -1266,7 +1266,7 @@ static void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace,
 	struct utrace_engine *engine, *next;
 	struct list_head attached;
 
-	spin_lock(&utrace->lock);
+	raw_spin_lock(&utrace->lock);
 
 	if (reap) {
 		/*
@@ -1280,7 +1280,7 @@ static void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace,
 		utrace->reap = 1;
 
 		if (utrace->utrace_flags & _UTRACE_DEATH_EVENTS) {
-			spin_unlock(&utrace->lock);
+			raw_spin_unlock(&utrace->lock);
 			return;
 		}
 	} else {
@@ -1318,7 +1318,7 @@ static void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace,
 	list_replace_init(&utrace->attached, &attached);
 	list_splice_tail_init(&utrace->attaching, &attached);
 
-	spin_unlock(&utrace->lock);
+	raw_spin_unlock(&utrace->lock);
 
 	list_for_each_entry_safe(engine, next, &attached, entry) {
 		if (engine->flags & UTRACE_EVENT(REAP))
@@ -1493,7 +1493,7 @@ static int utrace_control(struct task_struct *target,
 	if (unlikely(target->exit_state)) {
 		ret = utrace_control_dead(target, utrace, action);
 		if (ret) {
-			spin_unlock(&utrace->lock);
+			raw_spin_unlock(&utrace->lock);
 			return ret;
 		}
 		reset = true;
@@ -1627,7 +1627,7 @@ static int utrace_control(struct task_struct *target,
 	if (reset)
 		utrace_reset(target, utrace);
 	else
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 
 	return ret;
 }
@@ -1686,7 +1686,7 @@ static int utrace_barrier(struct task_struct *target,
 			 */
 			if (utrace->reporting != engine)
 				ret = 0;
-			spin_unlock(&utrace->lock);
+			raw_spin_unlock(&utrace->lock);
 			if (!ret)
 				break;
 		}
@@ -1724,12 +1724,12 @@ static enum utrace_resume_action start_report(struct utrace *utrace)
 	enum utrace_resume_action resume = utrace->resume;
 	if (utrace->pending_attach ||
 	    (resume > UTRACE_STOP && resume < UTRACE_RESUME)) {
-		spin_lock(&utrace->lock);
+		raw_spin_lock(&utrace->lock);
 		splice_attaching(utrace);
 		resume = utrace->resume;
 		if (resume > UTRACE_STOP)
 			utrace->resume = UTRACE_RESUME;
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 	}
 	return resume;
 }
@@ -1739,7 +1739,7 @@ static inline void finish_report_reset(struct task_struct *task,
 				       struct utrace_report *report)
 {
 	if (unlikely(report->spurious || report->detaches)) {
-		spin_lock(&utrace->lock);
+		raw_spin_lock(&utrace->lock);
 		if (utrace_reset(task, utrace))
 			report->action = UTRACE_RESUME;
 	}
@@ -1762,7 +1762,7 @@ static void finish_report(struct task_struct *task, struct utrace *utrace,
 		resume = will_not_stop ? UTRACE_REPORT : UTRACE_RESUME;
 
 	if (resume < utrace->resume) {
-		spin_lock(&utrace->lock);
+		raw_spin_lock(&utrace->lock);
 		utrace->resume = resume;
 		if (! utrace->task_work_added) {
 			int rc = stp_task_work_add(task, &utrace->work);
@@ -1778,7 +1778,7 @@ static void finish_report(struct task_struct *task, struct utrace *utrace,
 				       __FUNCTION__, __LINE__, rc);
 			}
 		}
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 	}
 
 	finish_report_reset(task, utrace, report);
@@ -1799,9 +1799,9 @@ static void finish_callback_report(struct task_struct *task,
 		 * This way, a 0 return is an unambiguous indicator that any
 		 * callback returning UTRACE_DETACH has indeed caused detach.
 		 */
-		spin_lock(&utrace->lock);
+		raw_spin_lock(&utrace->lock);
 		engine->ops = &utrace_detached_ops;
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 	}
 
 	/*
@@ -1820,16 +1820,16 @@ static void finish_callback_report(struct task_struct *task,
 			report->resume_action = action;
 
 		if (engine_wants_stop(engine)) {
-			spin_lock(&utrace->lock);
+			raw_spin_lock(&utrace->lock);
 			clear_engine_wants_stop(engine);
-			spin_unlock(&utrace->lock);
+			raw_spin_unlock(&utrace->lock);
 		}
 
 		return;
 	}
 
 	if (!engine_wants_stop(engine)) {
-		spin_lock(&utrace->lock);
+		raw_spin_lock(&utrace->lock);
 		/*
 		 * If utrace_control() came in and detached us
 		 * before we got the lock, we must not stop now.
@@ -1838,7 +1838,7 @@ static void finish_callback_report(struct task_struct *task,
 			report->detaches = true;
 		else
 			mark_engine_wants_stop(utrace, engine);
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 	}
 }
 
@@ -2216,9 +2216,9 @@ static void utrace_finish_vfork(struct task_struct *task)
 	struct utrace *utrace = task_utrace_struct(task);
 
 	if (utrace->vfork_stop) {
-		spin_lock(&utrace->lock);
+		raw_spin_lock(&utrace->lock);
 		utrace->vfork_stop = 0;
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 		utrace_stop(task, utrace, UTRACE_RESUME); /* XXX */
 	}
 }
@@ -2293,12 +2293,12 @@ static void utrace_report_death(void *cb_data __attribute__ ((unused)),
 		}
 	}
 	else {
-		spin_lock(&utrace->lock);
+		raw_spin_lock(&utrace->lock);
 		BUG_ON(utrace->death);
 		utrace->death = 1;
 		utrace->resume = UTRACE_RESUME;
 		splice_attaching(utrace);
-		spin_unlock(&utrace->lock);
+		raw_spin_unlock(&utrace->lock);
 
 		REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH),
 				 report_death, engine, -1/*group_dead*/,
@@ -2439,12 +2439,12 @@ static void utrace_report_work(struct task_work *work)
 	might_sleep();
 	utrace->report_work_added = 0;
 
-	spin_lock(&utrace->lock);
+	raw_spin_lock(&utrace->lock);
 	BUG_ON(utrace->death);
 	utrace->death = 1;
 	utrace->resume = UTRACE_RESUME;
 	splice_attaching(utrace);
-	spin_unlock(&utrace->lock);
+	raw_spin_unlock(&utrace->lock);
 
 	REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH),
 			 report_death, engine, -1/*group_dead*/,
-- 
1.7.9.5

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
@ 2014-09-09  7:08 Santosh Shukla
  2014-09-09  7:08 ` [SYSTEMTAP/PATCH 1/4] rt : replace __stp_tf_task_work_list_lock to raw_ Santosh Shukla
                   ` (4 more replies)
  0 siblings, 5 replies; 13+ messages in thread
From: Santosh Shukla @ 2014-09-09  7:08 UTC (permalink / raw)
  To: systemtap; +Cc: sshukla

Hi,

I wanted to run systemtap on -rt kernel version 3.14.12-rt9 and noticed bunch of preemptible
bug_on.This is initial effort to make systemtap rt-aware. Tested on 3.14.12-rt
kernel.  Patchset based on stap upstream link [1], build on commit-id [2].
Patchset can work on master branch with little tweak in patch set.

I have also tested this patch set in 3.10.40-rt38 kernel noticed few preemptible
bug_on but those were coming from kernel and no improvement observed in
systemtap side.

Change summary -
- Replaced read lock with rcu_read_lock such that read_lock_irqsave lock substituion is
  rcu_read_lock + local_irq_save and for read_unlock_irqsave lock substitution is 
  local_irq_restore followed bu rcu_read_unlock.

- Replaced write_lock_irqsave/restore with raw_spinlock_irqsave/restore for -rt kernel.
  And for non-rt kernel those raw_ lock should get replaced by normal spin_lock.

- Replaced hlist api to rcu type api.

Test script used for testing :
/usr/local/stap/bin/stap -v testsuite/systemtap.examples/network/netdev.stp
/usr/local/stap/bin/stap -v testsuite/systemtap.examples/network/tcpdumplike.stp

Few other test example script used :
cat ../test-indent.stp
probe kernel.function("*@net/socket.c").call
{
	          printf ("%s -> %s\n", thread_indent(1), probefunc())
}
probe kernel.function("*@net/socket.c").return
{
	          printf ("%s <- %s\n", thread_indent(-1), probefunc())
}


Like know feedback, comment on patch set. also Does it make sense to maintain
systemtap -rt version in upstream.. do we care?

[1] git://sourceware.org/git/systemtap.git
[2] git checkout 8f0fcd995f7f650a2ee0a94539f90c99e6d19e1d

Santosh Shukla (4):
  rt : replace __stp_tf_task_work_list_lock to raw_
  rt : replace read write lock with rcu
  rt : stp_utrace.c : replace utrace->lock with raw_lock
  rt : replace utrace_struct lock to raw lock

 runtime/linux/addr-map.c        |   16 ++++---
 runtime/linux/runtime.h         |    2 +-
 runtime/linux/task_finder2.c    |   14 +++---
 runtime/linux/task_finder_map.c |   38 ++++++++-------
 runtime/stp_utrace.c            |  100 +++++++++++++++++++--------------------
 runtime/task_finder_vma.c       |   43 +++++++++--------
 6 files changed, 112 insertions(+), 101 deletions(-)

-- 
1.7.9.5

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [SYSTEMTAP/PATCH 2/4] rt : replace read write lock with rcu
  2014-09-09  7:08 [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
  2014-09-09  7:08 ` [SYSTEMTAP/PATCH 1/4] rt : replace __stp_tf_task_work_list_lock to raw_ Santosh Shukla
  2014-09-09  7:08 ` [SYSTEMTAP/PATCH 3/4] rt : stp_utrace.c : replace utrace->lock with raw_lock Santosh Shukla
@ 2014-09-09  7:08 ` Santosh Shukla
  2014-09-09  7:09 ` [SYSTEMTAP/PATCH 4/4] rt : replace utrace_struct lock to raw lock Santosh Shukla
  2014-09-15 10:17 ` [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
  4 siblings, 0 replies; 13+ messages in thread
From: Santosh Shukla @ 2014-09-09  7:08 UTC (permalink / raw)
  To: systemtap; +Cc: sshukla

noticed that in rt mode - read and write_lock lead to several bug_on :

[ 1033.544821]  ffff880035463d38 ffffffff815fcaed ffff880035463d90 ffffffff816057f7
[ 1033.544826]  ffff880035463fd8 0000000000017e80 0000000000017e80 ffffffff810be5fb
[ 1033.544826] Call Trace:
[ 1033.544830]  [<ffffffff81602329>] dump_stack+0x4e/0x7a
[ 1033.544834]  [<ffffffff815fcaed>] __schedule_bug+0x9f/0xad
[ 1033.544838]  [<ffffffff816057f7>] __schedule+0x627/0x6a0
[ 1033.544842]  [<ffffffff810be5fb>] ? task_blocks_on_rt_mutex+0x19b/0x220
[ 1033.544846]  [<ffffffff816058a0>] schedule+0x30/0xa0
[ 1033.544850]  [<ffffffff8160727d>] rt_spin_lock_slowlock+0xbd/0x1f0
[ 1033.544856]  [<ffffffff81607df5>] __rt_spin_lock+0x25/0x30
[ 1033.544858]  [<ffffffff816080b0>] rt_read_lock+0x30/0x40
[ 1033.544861]  [<ffffffff816080ce>] rt_read_lock_irqsave+0xe/0x20
[ 1033.544867]  [<ffffffffa08a9b89>] __stp_tf_get_map_entry+0x19/0xc0 [stap_e40dcb2c46d7c0a2fb8d70ba343e393a_15235]
[ 1033.544873]  [<ffffffffa08afedd>] __stp_utrace_task_finder_target_syscall_exit+0x5d/0x350 [stap_e40dcb2c46d7c0a2fb8d70ba343e393a_15235]
[ 1033.544889]  [<ffffffffa08a91c5>] utrace_report_syscall_exit+0xc5/0x110 [stap_e40dcb2c46d7c0a2fb8d70ba343e393a_15235]
[ 1033.544893]  [<ffffffff81023ce0>] syscall_trace_leave+0x100/0x130
[ 1033.544896]  [<ffffffff8161090b>] int_check_syscall_exit_work+0x34/0x3d
[ 1033.544902] CPU: 5 PID: 20766 Comm: make Tainted: GF       W  O 3.14.12-rt-rt9+ #2
[ 1033.544904] Hardware name: Dell Inc. PowerEdge T620/0658N7, BIOS 1.4.6 10/26/2012
[ 1033.544912]  ffff880236f57e80 ffff8801cab21d28 ffffffff81602329 ffff8802243064a0
[ 1033.544919]  ffff8801cab21d38 ffffffff815fcaed ffff8801cab21d90 ffffffff816057f7
[ 1033.544924]  ffff8801cab21fd8 0000000000017e80 0000000000017e80 ffffffff810be5fb
[ 1033.544925] Call Trace:
[ 1033.544929]  [<ffffffff81602329>] dump_stack+0x4e/0x7a
[ 1033.544934]  [<ffffffff815fcaed>] __schedule_bug+0x9f/0xad
[ 1033.544938]  [<ffffffff816057f7>] __schedule+0x627/0x6a0
[ 1033.544942]  [<ffffffff810be5fb>] ? task_blocks_on_rt_mutex+0x19b/0x220
[ 1033.544946]  [<ffffffff816058a0>] schedule+0x30/0xa0
[ 1033.544951]  [<ffffffff8160727d>] rt_spin_lock_slowlock+0xbd/0x1f0
[ 1033.544956]  [<ffffffff81607df5>] __rt_spin_lock+0x25/0x30
[ 1033.544959]  [<ffffffff816080b0>] rt_read_lock+0x30/0x40
[ 1033.544962]  [<ffffffff816080ce>] rt_read_lock_irqsave+0xe/0x20
[ 1033.544968]  [<ffffffffa08a9b89>] __stp_tf_get_map_entry+0x19/0xc0 [stap_e40dcb2c46d7c0a2fb8d70ba343e393a_15235]
[ 1033.544975]  [<ffffffffa08afedd>] __stp_utrace_task_finder_target_syscall_exit+0x5d/0x350 [stap_e40dcb2c46d7c0a2fb8d70ba343e393a_15235]
[ 1033.544981]  [<ffffffffa08a91c5>] utrace_report_syscall_exit+0xc5/0x110 [stap_e40dcb2c46d7c0a2fb8d70ba343e393a_15235]
[ 1033.544984]  [<ffffffff81023ce0>] syscall_trace_leave+0x100/0x130
[ 1033.544988]  [<ffffffff8161090b>] int_check_syscall_exit_work+0x34/0x3d

By replacing read_lock_irqsave to rcu supress these bug_on for -rt mode. As
well writer lock replaced to raw_lock for -rt mode and in non-rt mode they
would switch to normal spin lock. Also changed hlist api to _rcu type api.

Signed-off-by: Santosh Shukla <sshukla@mvista.com>
---
 runtime/linux/addr-map.c        |   16 ++++++++-------
 runtime/linux/runtime.h         |    2 +-
 runtime/linux/task_finder_map.c |   38 ++++++++++++++++++----------------
 runtime/task_finder_vma.c       |   43 ++++++++++++++++++++++-----------------
 4 files changed, 55 insertions(+), 44 deletions(-)

diff --git a/runtime/linux/addr-map.c b/runtime/linux/addr-map.c
index 3f5aca7..679ea7d 100644
--- a/runtime/linux/addr-map.c
+++ b/runtime/linux/addr-map.c
@@ -28,7 +28,7 @@ struct addr_map
   struct addr_map_entry entries[0];
 };
 
-static DEFINE_RWLOCK(addr_map_lock);
+static DEFINE_RAW_SPINLOCK(addr_map_lock);
 static struct addr_map* blackmap;
 
 /* Find address of entry where we can insert a new one. */
@@ -127,9 +127,11 @@ lookup_bad_addr(unsigned long addr, size_t size)
 #endif
 
   /* Search for the given range in the black-listed map.  */
-  read_lock_irqsave(&addr_map_lock, flags);
+  rcu_read_lock();
+  local_irq_save(flags);
   result = lookup_addr_aux(addr, size, blackmap);
-  read_unlock_irqrestore(&addr_map_lock, flags);
+  local_irq_restore(flags);
+  rcu_read_unlock();
   if (result)
     return 1;
   else
@@ -154,7 +156,7 @@ add_bad_addr_entry(unsigned long min_addr, unsigned long max_addr,
   while (1)
     {
       size_t old_size = 0;
-      write_lock_irqsave(&addr_map_lock, flags);
+      raw_spin_lock_irqsave(&addr_map_lock, flags);
       old_map = blackmap;
       if (old_map)
         old_size = old_map->size;
@@ -163,7 +165,7 @@ add_bad_addr_entry(unsigned long min_addr, unsigned long max_addr,
          added an entry while we were sleeping. */
       if (!new_map || (new_map && new_map->size < old_size + 1))
         {
-          write_unlock_irqrestore(&addr_map_lock, flags);
+          raw_spin_unlock_irqrestore(&addr_map_lock, flags);
           if (new_map)
             {
 	      _stp_kfree(new_map);
@@ -192,7 +194,7 @@ add_bad_addr_entry(unsigned long min_addr, unsigned long max_addr,
             *existing_min = min_entry;
           if (existing_max)
             *existing_max = max_entry;
-          write_unlock_irqrestore(&addr_map_lock, flags);
+          raw_spin_unlock_irqrestore(&addr_map_lock, flags);
           _stp_kfree(new_map);
           return 1;
         }
@@ -210,7 +212,7 @@ add_bad_addr_entry(unsigned long min_addr, unsigned long max_addr,
                (old_map->size - existing) * sizeof(*new_entry));
     }
   blackmap = new_map;
-  write_unlock_irqrestore(&addr_map_lock, flags);
+  raw_spin_unlock_irqrestore(&addr_map_lock, flags);
   if (old_map)
     _stp_kfree(old_map);
   return 0;
diff --git a/runtime/linux/runtime.h b/runtime/linux/runtime.h
index 76dbea4..0267808 100644
--- a/runtime/linux/runtime.h
+++ b/runtime/linux/runtime.h
@@ -74,7 +74,7 @@ static void _stp_exit(void);
 
 
 #ifdef STAPCONF_HLIST_4ARGS
-#define stap_hlist_for_each_entry(a,b,c,d) hlist_for_each_entry(a,b,c,d)
+#define stap_hlist_for_each_entry(a,b,c,d) hlist_for_each_entry_rcu(a,b,c,d)
 #define stap_hlist_for_each_entry_safe(a,b,c,d,e) hlist_for_each_entry_safe(a,b,c,d,e)
 #else
 #define stap_hlist_for_each_entry(a,b,c,d) (void) b; hlist_for_each_entry(a,c,d)
diff --git a/runtime/linux/task_finder_map.c b/runtime/linux/task_finder_map.c
index d515e9f..0256393 100644
--- a/runtime/linux/task_finder_map.c
+++ b/runtime/linux/task_finder_map.c
@@ -13,7 +13,7 @@
 // contents in interrupt context (which should only ever call 
 // stap_find_map_map_info for getting stored info). So we might
 // want to look into that if this seems a bottleneck.
-static DEFINE_RWLOCK(__stp_tf_map_lock);
+static DEFINE_RAW_SPINLOCK(__stp_tf_map_lock);
 
 #define __STP_TF_HASH_BITS 4
 #define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
@@ -51,11 +51,11 @@ __stp_tf_map_initialize(void)
 	struct hlist_head *head = &__stp_tf_map_free_list[0];
 
 	unsigned long flags;
-	write_lock_irqsave(&__stp_tf_map_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_map_lock, flags);
 	for (i = 0; i < TASK_FINDER_MAP_ENTRY_ITEMS; i++) {
-		hlist_add_head(&__stp_tf_map_free_list_items[i].hlist, head);
+		hlist_add_head_rcu(&__stp_tf_map_free_list_items[i].hlist, head);
 	}
-	write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+	raw_spin_unlock_irqrestore(&__stp_tf_map_lock, flags);
 }
 
 
@@ -75,7 +75,7 @@ __stp_tf_map_get_free_entry(void)
 		break;
 	}
 	if (entry != NULL)
-		hlist_del(&entry->hlist);
+		hlist_del_rcu(&entry->hlist);
 	return entry;
 }
 
@@ -87,7 +87,7 @@ static void
 __stp_tf_map_put_free_entry(struct __stp_tf_map_entry *entry)
 {
 	struct hlist_head *head = &__stp_tf_map_free_list[0];
-	hlist_add_head(&entry->hlist, head);
+	hlist_add_head_rcu(&entry->hlist, head);
 }
 
 
@@ -109,15 +109,19 @@ __stp_tf_get_map_entry(struct task_struct *tsk)
 	struct __stp_tf_map_entry *entry;
 
 	unsigned long flags;
-	read_lock_irqsave(&__stp_tf_map_lock, flags);
+	rcu_read_lock();
+	local_irq_save(flags);
 	head = &__stp_tf_map_table[__stp_tf_map_hash(tsk)];
 	stap_hlist_for_each_entry(entry, node, head, hlist) {
 		if (tsk->pid == entry->pid) {
-			read_unlock_irqrestore(&__stp_tf_map_lock, flags);
+			local_irq_restore(flags);
+			rcu_read_unlock();
 			return entry;
 		}
 	}
-	read_unlock_irqrestore(&__stp_tf_map_lock, flags);
+	local_irq_restore(flags);
+	rcu_read_unlock();
+
 	return NULL;
 }
 
@@ -133,14 +137,14 @@ __stp_tf_add_map(struct task_struct *tsk, long syscall_no, unsigned long arg0,
 	struct __stp_tf_map_entry *entry;
 	unsigned long flags;
 
-	write_lock_irqsave(&__stp_tf_map_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_map_lock, flags);
 	head = &__stp_tf_map_table[__stp_tf_map_hash(tsk)];
 	stap_hlist_for_each_entry(entry, node, head, hlist) {
 		// If we find an existing entry, just increment the
 		// usage count.
 		if (tsk->pid == entry->pid) {
 			entry->usage++;
-			write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+			raw_spin_unlock_irqrestore(&__stp_tf_map_lock, flags);
 			return 0;
 		}
 	}
@@ -148,7 +152,7 @@ __stp_tf_add_map(struct task_struct *tsk, long syscall_no, unsigned long arg0,
 	// Get an element from the free list.
 	entry = __stp_tf_map_get_free_entry();
 	if (!entry) {
-		write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+		raw_spin_unlock_irqrestore(&__stp_tf_map_lock, flags);
 		return -ENOMEM;
 	}
 	entry->usage = 1;
@@ -157,8 +161,8 @@ __stp_tf_add_map(struct task_struct *tsk, long syscall_no, unsigned long arg0,
 	entry->arg0 = arg0;
 	entry->arg1 = arg1;
 	entry->arg2 = arg2;
-	hlist_add_head(&entry->hlist, head);
-	write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+	hlist_add_head_rcu(&entry->hlist, head);
+	raw_spin_unlock_irqrestore(&__stp_tf_map_lock, flags);
 	return 0;
 }
 
@@ -174,7 +178,7 @@ __stp_tf_remove_map_entry(struct __stp_tf_map_entry *entry)
 
 	if (entry != NULL) {
 		unsigned long flags;
-		write_lock_irqsave(&__stp_tf_map_lock, flags);
+		raw_spin_lock_irqsave(&__stp_tf_map_lock, flags);
 
 		// Decrement the usage count.
 		entry->usage--;
@@ -182,10 +186,10 @@ __stp_tf_remove_map_entry(struct __stp_tf_map_entry *entry)
 		// If the entry is unused, put it back on the free
 		// list.
 		if (entry->usage == 0) {
-			hlist_del(&entry->hlist);
+			hlist_del_rcu(&entry->hlist);
 			__stp_tf_map_put_free_entry(entry);
 		}
-		write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+		raw_spin_unlock_irqrestore(&__stp_tf_map_lock, flags);
 	}
 	return 0;
 }
diff --git a/runtime/task_finder_vma.c b/runtime/task_finder_vma.c
index f826982..ff7f2f3 100644
--- a/runtime/task_finder_vma.c
+++ b/runtime/task_finder_vma.c
@@ -15,7 +15,7 @@
 // contents in interrupt context (which should only ever call 
 // stap_find_vma_map_info for getting stored vma info). So we might
 // want to look into that if this seems a bottleneck.
-static DEFINE_RWLOCK(__stp_tf_vma_lock);
+static DEFINE_RAW_SPINLOCK(__stp_tf_vma_lock);
 
 #define __STP_TF_HASH_BITS 4
 #define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
@@ -103,7 +103,7 @@ stap_destroy_vma_map(void)
 				continue;
 
 		        stap_hlist_for_each_entry_safe(entry, node, n, head, hlist) {
-				hlist_del(&entry->hlist);
+				hlist_del_rcu(&entry->hlist);
 				__stp_tf_vma_release_entry(entry);
 			}
 		}
@@ -180,17 +180,17 @@ stap_add_vma_map_info(struct task_struct *tsk,
 	// Take a write lock, since we are most likely going to write
 	// after reading. But reserve a new entry first outside the lock.
 	new_entry = __stp_tf_vma_new_entry();
-	write_lock_irqsave(&__stp_tf_vma_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_vma_lock, flags);
 	entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
 	if (entry != NULL) {
-		write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
+		raw_spin_unlock_irqrestore(&__stp_tf_vma_lock, flags);
 		if (new_entry)
 			__stp_tf_vma_release_entry(new_entry);
 		return -EBUSY;	/* Already there */
 	}
 
 	if (!new_entry) {
-		write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
+		raw_spin_unlock_irqrestore(&__stp_tf_vma_lock, flags);
 		return -ENOMEM;
 	}
 
@@ -212,8 +212,8 @@ stap_add_vma_map_info(struct task_struct *tsk,
 	entry->user = user;
 
 	head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
-	hlist_add_head(&entry->hlist, head);
-	write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
+	hlist_add_head_rcu(&entry->hlist, head);
+	raw_spin_unlock_irqrestore(&__stp_tf_vma_lock, flags);
 	return 0;
 }
 
@@ -234,13 +234,13 @@ stap_extend_vma_map_info(struct task_struct *tsk,
 
 	// Take a write lock, since we are most likely going to write
 	// to the entry after reading, if its vm_end matches our vm_start.
-	write_lock_irqsave(&__stp_tf_vma_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_vma_lock, flags);
 	entry = __stp_tf_get_vma_map_entry_end_internal(tsk, vm_start);
 	if (entry != NULL) {
 		entry->vm_end = vm_end;
 		res = 0;
 	}
-	write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
+	raw_spin_unlock_irqrestore(&__stp_tf_vma_lock, flags);
 	return res;
 }
 
@@ -258,14 +258,14 @@ stap_remove_vma_map_info(struct task_struct *tsk, unsigned long vm_start)
 	// Take a write lock since we are most likely going to delete
 	// after reading.
 	unsigned long flags;
-	write_lock_irqsave(&__stp_tf_vma_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_vma_lock, flags);
 	entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
 	if (entry != NULL) {
-		hlist_del(&entry->hlist);
+		hlist_del_rcu(&entry->hlist);
 		__stp_tf_vma_release_entry(entry);
                 rc = 0;
 	}
-	write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
+	raw_spin_unlock_irqrestore(&__stp_tf_vma_lock, flags);
 	return rc;
 }
 
@@ -288,7 +288,8 @@ stap_find_vma_map_info(struct task_struct *tsk, unsigned long addr,
 	if (__stp_tf_vma_map == NULL)
 		return rc;
 
-	read_lock_irqsave(&__stp_tf_vma_lock, flags);
+	rcu_read_lock();
+	local_irq_save(flags);
 	head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
 	stap_hlist_for_each_entry(entry, node, head, hlist) {
 		if (tsk->pid == entry->pid
@@ -309,7 +310,9 @@ stap_find_vma_map_info(struct task_struct *tsk, unsigned long addr,
 			*user = found_entry->user;
 		rc = 0;
 	}
-	read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
+	local_irq_restore(flags);
+	rcu_read_unlock();
+
 	return rc;
 }
 
@@ -332,7 +335,8 @@ stap_find_vma_map_info_user(struct task_struct *tsk, void *user,
 	if (__stp_tf_vma_map == NULL)
 		return rc;
 
-	read_lock_irqsave(&__stp_tf_vma_lock, flags);
+	rcu_read_lock();
+	local_irq_save(flags);
 	head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
 	stap_hlist_for_each_entry(entry, node, head, hlist) {
 		if (tsk->pid == entry->pid
@@ -350,7 +354,8 @@ stap_find_vma_map_info_user(struct task_struct *tsk, void *user,
 			*path = found_entry->path;
 		rc = 0;
 	}
-	read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
+	local_irq_restore(flags);
+	rcu_read_unlock();
 	return rc;
 }
 
@@ -363,15 +368,15 @@ stap_drop_vma_maps(struct task_struct *tsk)
 	struct __stp_tf_vma_entry *entry;
 
 	unsigned long flags;
-	write_lock_irqsave(&__stp_tf_vma_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_vma_lock, flags);
 	head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
         stap_hlist_for_each_entry_safe(entry, node, n, head, hlist) {
             if (tsk->pid == entry->pid) {
-		    hlist_del(&entry->hlist);
+		    hlist_del_rcu(&entry->hlist);
 		    __stp_tf_vma_release_entry(entry);
             }
         }
-	write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
+	raw_spin_unlock_irqrestore(&__stp_tf_vma_lock, flags);
 	return 0;
 }
 
-- 
1.7.9.5

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [SYSTEMTAP/PATCH 1/4] rt : replace __stp_tf_task_work_list_lock to raw_
  2014-09-09  7:08 [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
@ 2014-09-09  7:08 ` Santosh Shukla
  2014-09-09  7:08 ` [SYSTEMTAP/PATCH 3/4] rt : stp_utrace.c : replace utrace->lock with raw_lock Santosh Shukla
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Santosh Shukla @ 2014-09-09  7:08 UTC (permalink / raw)
  To: systemtap; +Cc: sshukla

Fixes this bug_on for -rt case :

[ 2184.284672]  [<ffffffff81602329>] dump_stack+0x4e/0x7a
[ 2184.284679]  [<ffffffff815fcaed>] __schedule_bug+0x9f/0xad
[ 2184.284686]  [<ffffffff816057f7>] __schedule+0x627/0x6a0
[ 2184.284694]  [<ffffffff810be5fb>] ? task_blocks_on_rt_mutex+0x19b/0x220
[ 2184.284699]  [<ffffffff816058a0>] schedule+0x30/0xa0
[ 2184.284707]  [<ffffffff8160727d>] rt_spin_lock_slowlock+0xbd/0x1f0
[ 2184.284714]  [<ffffffff81607e25>] rt_spin_lock+0x25/0x30
[ 2184.284727]  [<ffffffffa08ae573>] __stp_tf_alloc_task_work+0x43/0x90 [stap_63e05c06fe2b0c2d17f8d8e096a4ee8a__1700]
[ 2184.284737]  [<ffffffffa08aff8b>] __stp_utrace_task_finder_target_syscall_exit+0xdb/0x350 [stap_63e05c06fe2b0c2d17f8d8e096a4ee8a__1700]
[ 2184.284747]  [<ffffffffa08a91d5>] utrace_report_syscall_exit+0xc5/0x110 [stap_63e05c06fe2b0c2d17f8d8e096a4ee8a__1700]
[ 2184.284753]  [<ffffffff81023ce0>] syscall_trace_leave+0x100/0x130
[ 2184.284758]  [<ffffffff8161090b>] int_check_syscall_exit_work+0x34/0x3d

Signed-off-by: Santosh Shukla <sshukla@mvista.com>
---
 runtime/linux/task_finder2.c |   14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/runtime/linux/task_finder2.c b/runtime/linux/task_finder2.c
index e88543a..afe8835 100644
--- a/runtime/linux/task_finder2.c
+++ b/runtime/linux/task_finder2.c
@@ -95,7 +95,7 @@ struct stap_task_finder_target {
 };
 
 static LIST_HEAD(__stp_tf_task_work_list);
-static DEFINE_SPINLOCK(__stp_tf_task_work_list_lock);
+static DEFINE_RAW_SPINLOCK(__stp_tf_task_work_list_lock);
 struct __stp_tf_task_work {
 	struct list_head list;
 	struct task_struct *task;
@@ -132,9 +132,9 @@ __stp_tf_alloc_task_work(void *data)
 	// list for easier lookup, but as short as the list should be
 	// (and as short lived as these items are) the extra overhead
 	// probably isn't worth the effort.
-	spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
 	list_add(&tf_work->list, &__stp_tf_task_work_list);
-	spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
+	raw_spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
 
 	return &tf_work->work;
 }
@@ -150,14 +150,14 @@ static void __stp_tf_free_task_work(struct task_work *work)
 	tf_work = container_of(work, struct __stp_tf_task_work, work);
 
 	// Remove the item from the list.
-	spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
 	list_for_each_entry(node, &__stp_tf_task_work_list, list) {
 		if (tf_work == node) {
 			list_del(&tf_work->list);
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
+	raw_spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
 
 	// Actually free the data.
 	_stp_kfree(tf_work);
@@ -173,14 +173,14 @@ static void __stp_tf_cancel_task_work(void)
 	unsigned long flags;
 
 	// Cancel all remaining requests.
-	spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
+	raw_spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
 	list_for_each_entry_safe(node, tmp, &__stp_tf_task_work_list, list) {
 	    // Remove the item from the list, cancel it, then free it.
 	    list_del(&node->list);
 	    stp_task_work_cancel(node->task, node->work.func);
 	    _stp_kfree(node);
 	}
-	spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
+	raw_spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
 }
 
 static u32
-- 
1.7.9.5

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [SYSTEMTAP/PATCH 4/4] rt : replace utrace_struct lock to raw lock
  2014-09-09  7:08 [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
                   ` (2 preceding siblings ...)
  2014-09-09  7:08 ` [SYSTEMTAP/PATCH 2/4] rt : replace read write lock with rcu Santosh Shukla
@ 2014-09-09  7:09 ` Santosh Shukla
  2014-09-15 10:17 ` [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
  4 siblings, 0 replies; 13+ messages in thread
From: Santosh Shukla @ 2014-09-09  7:09 UTC (permalink / raw)
  To: systemtap; +Cc: sshukla

Patch fixes below bug_on for -rt mode kernel:

[ 3375.430085]  [<ffffffff815de469>] __schedule+0x5a9/0x700
[ 3375.430090]  [<ffffffff815da3a4>] dump_stack+0x19/0x1b
[ 3375.430094]  [<ffffffff815de5ea>] schedule+0x2a/0x90
[ 3375.430098]  [<ffffffff815d49d7>] __schedule_bug+0xa0/0xae
[ 3375.430102]  [<ffffffff815df525>] rt_spin_lock_slowlock+0xe5/0x2e0
[ 3375.430107]  [<ffffffff815de469>] __schedule+0x5a9/0x700
[ 3375.430110]  [<ffffffff815df935>] rt_spin_lock+0x25/0x30
[ 3375.430116]  [<ffffffff815de5ea>] schedule+0x2a/0x90
[ 3375.430125]  [<ffffffffa2f5ed5e>] task_utrace_struct+0x1e/0x40 [stap_eb141ade124ccb17a233482e6996651f_15664]
[ 3375.430131]  [<ffffffff815df525>] rt_spin_lock_slowlock+0xe5/0x2e0
[ 3375.430138]  [<ffffffffa2f6204b>] utrace_report_syscall_exit+0x4b/0x110 [stap_eb141ade124ccb17a233482e6996651f_15664]
[ 3375.430143]  [<ffffffff815df935>] rt_spin_lock+0x25/0x30
[ 3375.430148]  [<ffffffff810ea646>] ? __audit_syscall_exit+0x1f6/0x2a0
[ 3375.430156]  [<ffffffffa2f5ed5e>] task_utrace_struct+0x1e/0x40 [stap_eb141ade124ccb17a233482e6996651f_15664]
[ 3375.430161]  [<ffffffff81021d56>] syscall_trace_leave+0xd6/0xf0
[ 3375.430168]  [<ffffffffa2f6204b>] utrace_report_syscall_exit+0x4b/0x110 [stap_eb141ade124ccb17a233482e6996651f_15664]
[ 3375.430173]  [<ffffffff815e7af0>] int_check_syscall_exit_work+0x34/0x3d
[ 3375.430178]  [<ffffffff810ea646>] ? __audit_syscall_exit+0x1f6/0x2a0
[ 3375.430184]  [<ffffffff81021d56>] syscall_trace_leave+0xd6/0xf0
[ 3375.430191]  [<ffffffff815e7af0>] int_check_syscall_exit_work+0x34/0x3d

Signed-off-by: Santosh Shukla <sshukla@mvista.com>
---
 runtime/stp_utrace.c |   18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/runtime/stp_utrace.c b/runtime/stp_utrace.c
index b145baa..fdaa794 100644
--- a/runtime/stp_utrace.c
+++ b/runtime/stp_utrace.c
@@ -84,7 +84,7 @@ struct utrace {
 
 static struct hlist_head task_utrace_table[TASK_UTRACE_TABLE_SIZE];
 //DEFINE_MUTEX(task_utrace_mutex);      /* Protects task_utrace_table */
-static DEFINE_SPINLOCK(task_utrace_lock); /* Protects task_utrace_table */
+static DEFINE_RAW_SPINLOCK(task_utrace_lock); /* Protects task_utrace_table */
 
 static struct kmem_cache *utrace_cachep;
 static struct kmem_cache *utrace_engine_cachep;
@@ -412,7 +412,7 @@ static void utrace_shutdown(void)
 #ifdef STP_TF_DEBUG
 	printk(KERN_ERR "%s:%d - freeing task-specific\n", __FUNCTION__, __LINE__);
 #endif
-	spin_lock(&task_utrace_lock);
+	raw_spin_lock(&task_utrace_lock);
 	for (i = 0; i < TASK_UTRACE_TABLE_SIZE; i++) {
 		head = &task_utrace_table[i];
 		stap_hlist_for_each_entry_safe(utrace, node, node2, head,
@@ -421,7 +421,7 @@ static void utrace_shutdown(void)
 			utrace_cleanup(utrace);
 		}
 	}
-	spin_unlock(&task_utrace_lock);
+	raw_spin_unlock(&task_utrace_lock);
 #ifdef STP_TF_DEBUG
 	printk(KERN_ERR "%s:%d - done\n", __FUNCTION__, __LINE__);
 #endif
@@ -468,7 +468,7 @@ static bool utrace_task_alloc(struct task_struct *task)
 	stp_init_task_work(&utrace->work, &utrace_resume);
 	stp_init_task_work(&utrace->report_work, &utrace_report_work);
 
-	spin_lock(&task_utrace_lock);
+	raw_spin_lock(&task_utrace_lock);
 	u = __task_utrace_struct(task);
 	if (u == NULL) {
 		hlist_add_head(&utrace->hlist,
@@ -477,7 +477,7 @@ static bool utrace_task_alloc(struct task_struct *task)
 	else {
 		kmem_cache_free(utrace_cachep, utrace);
 	}
-	spin_unlock(&task_utrace_lock);
+	raw_spin_unlock(&task_utrace_lock);
 
 	return true;
 }
@@ -496,9 +496,9 @@ static void utrace_free(struct utrace *utrace)
 
 	/* Remove this utrace from the mapping list of tasks to
 	 * struct utrace. */
-	spin_lock(&task_utrace_lock);
+	raw_spin_lock(&task_utrace_lock);
 	hlist_del(&utrace->hlist);
-	spin_unlock(&task_utrace_lock);
+	raw_spin_unlock(&task_utrace_lock);
 
 	/* Free the utrace struct. */
 	raw_spin_lock(&utrace->lock);
@@ -539,9 +539,9 @@ static struct utrace *task_utrace_struct(struct task_struct *task)
 {
 	struct utrace *utrace;
 
-	spin_lock(&task_utrace_lock);
+	raw_spin_lock(&task_utrace_lock);
 	utrace = __task_utrace_struct(task);
-	spin_unlock(&task_utrace_lock);
+	raw_spin_unlock(&task_utrace_lock);
 	return utrace;
 }
 
-- 
1.7.9.5

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
  2014-09-09  7:08 [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
                   ` (3 preceding siblings ...)
  2014-09-09  7:09 ` [SYSTEMTAP/PATCH 4/4] rt : replace utrace_struct lock to raw lock Santosh Shukla
@ 2014-09-15 10:17 ` Santosh Shukla
  2014-09-15 14:13   ` David Smith
  4 siblings, 1 reply; 13+ messages in thread
From: Santosh Shukla @ 2014-09-15 10:17 UTC (permalink / raw)
  To: systemtap; +Cc: Santosh Shukla

Ping!

do we care about making stap -rt aware. Just an fyi that I revisited
my patch specially 2nd patch and found some serious improvement to be
done.. RCU use to replace rd/wr lock just not right, So I am spinning
the patch series. Also I am thinking to come up with locking helper
function API on raw_locking vs existing spin lock so to avoid
cluttering the code.


On Tue, Sep 9, 2014 at 12:38 PM, Santosh Shukla <sshukla@mvista.com> wrote:
> Hi,
>
> I wanted to run systemtap on -rt kernel version 3.14.12-rt9 and noticed bunch of preemptible
> bug_on.This is initial effort to make systemtap rt-aware. Tested on 3.14.12-rt
> kernel.  Patchset based on stap upstream link [1], build on commit-id [2].
> Patchset can work on master branch with little tweak in patch set.
>
> I have also tested this patch set in 3.10.40-rt38 kernel noticed few preemptible
> bug_on but those were coming from kernel and no improvement observed in
> systemtap side.
>
> Change summary -
> - Replaced read lock with rcu_read_lock such that read_lock_irqsave lock substituion is
>   rcu_read_lock + local_irq_save and for read_unlock_irqsave lock substitution is
>   local_irq_restore followed bu rcu_read_unlock.
>
> - Replaced write_lock_irqsave/restore with raw_spinlock_irqsave/restore for -rt kernel.
>   And for non-rt kernel those raw_ lock should get replaced by normal spin_lock.
>
> - Replaced hlist api to rcu type api.
>
> Test script used for testing :
> /usr/local/stap/bin/stap -v testsuite/systemtap.examples/network/netdev.stp
> /usr/local/stap/bin/stap -v testsuite/systemtap.examples/network/tcpdumplike.stp
>
> Few other test example script used :
> cat ../test-indent.stp
> probe kernel.function("*@net/socket.c").call
> {
>                   printf ("%s -> %s\n", thread_indent(1), probefunc())
> }
> probe kernel.function("*@net/socket.c").return
> {
>                   printf ("%s <- %s\n", thread_indent(-1), probefunc())
> }
>
>
> Like know feedback, comment on patch set. also Does it make sense to maintain
> systemtap -rt version in upstream.. do we care?
>
> [1] git://sourceware.org/git/systemtap.git
> [2] git checkout 8f0fcd995f7f650a2ee0a94539f90c99e6d19e1d
>
> Santosh Shukla (4):
>   rt : replace __stp_tf_task_work_list_lock to raw_
>   rt : replace read write lock with rcu
>   rt : stp_utrace.c : replace utrace->lock with raw_lock
>   rt : replace utrace_struct lock to raw lock
>
>  runtime/linux/addr-map.c        |   16 ++++---
>  runtime/linux/runtime.h         |    2 +-
>  runtime/linux/task_finder2.c    |   14 +++---
>  runtime/linux/task_finder_map.c |   38 ++++++++-------
>  runtime/stp_utrace.c            |  100 +++++++++++++++++++--------------------
>  runtime/task_finder_vma.c       |   43 +++++++++--------
>  6 files changed, 112 insertions(+), 101 deletions(-)
>
> --
> 1.7.9.5
>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
  2014-09-15 10:17 ` [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
@ 2014-09-15 14:13   ` David Smith
  2014-09-15 15:41     ` Santosh Shukla
  0 siblings, 1 reply; 13+ messages in thread
From: David Smith @ 2014-09-15 14:13 UTC (permalink / raw)
  To: Santosh Shukla, systemtap

On 09/15/2014 05:17 AM, Santosh Shukla wrote:
> Ping!
> 
> do we care about making stap -rt aware. Just an fyi that I revisited
> my patch specially 2nd patch and found some serious improvement to be
> done.. RCU use to replace rd/wr lock just not right, So I am spinning
> the patch series. Also I am thinking to come up with locking helper
> function API on raw_locking vs existing spin lock so to avoid
> cluttering the code.

Santosh,

Sorry about not responding sooner. Making systemtap work better on -rt
kernels would be nice, if the changes aren't too disruptive. I meant to
find the -rt kernel and test your patches last week but I ended up not
finding the time. Luckily for me you've got a 2nd patch set coming...

I'll try to review the 2nd patch set when you get it finished. Here's a
question I've got, do you think the -rt patches will cause any
performance changes (positive or negative) on the regular kernel?

-- 
David Smith
dsmith@redhat.com
Red Hat
http://www.redhat.com
256.217.0141 (direct)
256.837.0057 (fax)

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
  2014-09-15 14:13   ` David Smith
@ 2014-09-15 15:41     ` Santosh Shukla
  2014-09-15 16:22       ` Josh Stone
  0 siblings, 1 reply; 13+ messages in thread
From: Santosh Shukla @ 2014-09-15 15:41 UTC (permalink / raw)
  To: David Smith; +Cc: systemtap

On Mon, Sep 15, 2014 at 7:43 PM, David Smith <dsmith@redhat.com> wrote:
> On 09/15/2014 05:17 AM, Santosh Shukla wrote:
>> Ping!
>>
>> do we care about making stap -rt aware. Just an fyi that I revisited
>> my patch specially 2nd patch and found some serious improvement to be
>> done.. RCU use to replace rd/wr lock just not right, So I am spinning
>> the patch series. Also I am thinking to come up with locking helper
>> function API on raw_locking vs existing spin lock so to avoid
>> cluttering the code.
>
> Santosh,
>
> Sorry about not responding sooner. Making systemtap work better on -rt
> kernels would be nice, if the changes aren't too disruptive. I meant to
> find the -rt kernel and test your patches last week but I ended up not
> finding the time. Luckily for me you've got a 2nd patch set coming...
>
> I'll try to review the 2nd patch set when you get it finished. Here's a
> question I've got, do you think the -rt patches will cause any
> performance changes (positive or negative) on the regular kernel?
>

Thanks for responding -:).

I don't thing so as my patch set intent to provide -rt flavor locks
only so that when I run my stap example scripts under stress/load such
as make -jX.. It should work gracefully.. applicable to virtualization
hooks in stap too.

However I do have some serious comment on choice of locking used in
general in systemtap. Specially reader and writer lock used to protect
hlist, I strongly believe that use of RCU lock would be a better fit..
As per my reading systemtap's some modules like stp_utrace.c and
others.. they do operations like add, search/traverse[reading] then
remove.. Use of rcu lock with rcu variant hlist api would greatly
increase performance overall. As RCU by design immune to deadlock,
prio inversion right?

Reason for raising this question motivated by my 2nd patch which look
dirty though and hardly make use of RCU feature like grace period
elpse then call free list. But my reading in existing design module
such as task_map_finder.c is - It uses two list i.e.. one
__stp_tf_map_list used by reader func and adder func ,although remover
func does remove reader entry from __stp_tf_map_entry contruct and
then updates free list all the time to populate new map entry for next
iteration of adder function. Which is awkward as if sticking with one
list and letting RCU to protect / control reader and writer context
would greatly improve thing.. And in essence It will save some cycles
like _irqsave/restore etc..

Keeping above design limitation in mind, I choose to drop RCU method
to replace reader/writer lock for now and use them in coming further
patches.. first wanted to get raw_locking specific to get into stap.

Also curious to know - How to measure stap performance? is there any
tool or any specific steps to follow. Thanks.


> --
> David Smith
> dsmith@redhat.com
> Red Hat
> http://www.redhat.com
> 256.217.0141 (direct)
> 256.837.0057 (fax)

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
  2014-09-15 15:41     ` Santosh Shukla
@ 2014-09-15 16:22       ` Josh Stone
  2014-09-15 16:35         ` Santosh Shukla
  0 siblings, 1 reply; 13+ messages in thread
From: Josh Stone @ 2014-09-15 16:22 UTC (permalink / raw)
  To: Santosh Shukla; +Cc: systemtap

Hi,

On 09/15/2014 08:41 AM, Santosh Shukla wrote:
> first wanted to get raw_locking specific to get into stap.

One ugly aspect of this is that we try to maintain compatibility with
older platforms too.  It looks like raw_spin_lock didn't come into the
kernel until 2.6.33, which doesn't even cover RHEL6.  If possible we try
to keep things running all the way back to RHEL4 (though see my earlier
RFC about dropping that).

That said, we can get away with new interfaces in some cases.  e.g. I
believe stp_utrace.c and task_finder2.c can only be used on 3.x kernels.
 But anything that's more generic, like addr-map.c, will need some kind
of wrapper or compat #define added to let it work on older kernels too.
 Maybe just #define raw_spin_lock spin_lock, etc.


Josh

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
  2014-09-15 16:22       ` Josh Stone
@ 2014-09-15 16:35         ` Santosh Shukla
  2014-09-15 16:45           ` Josh Stone
  0 siblings, 1 reply; 13+ messages in thread
From: Santosh Shukla @ 2014-09-15 16:35 UTC (permalink / raw)
  To: Josh Stone; +Cc: systemtap, Corey Minyard

On Mon, Sep 15, 2014 at 9:52 PM, Josh Stone <jistone@redhat.com> wrote:
> Hi,
>
> On 09/15/2014 08:41 AM, Santosh Shukla wrote:
>> first wanted to get raw_locking specific to get into stap.
>
> One ugly aspect of this is that we try to maintain compatibility with
> older platforms too.  It looks like raw_spin_lock didn't come into the
> kernel until 2.6.33, which doesn't even cover RHEL6.  If possible we try
> to keep things running all the way back to RHEL4 (though see my earlier
> RFC about dropping that).

Right, I did saw your early RFC and noticed few important stuff
missing so to keep compatibility per se  .i..e.. some type of locking
function helper api which could ably switch between PREEMPT and
non-PREEMPT kernel.


In v2 version which I am working on [inspired by Corey suggestion] to
add below type of locking facility, pseudo code look like below

#ifdef CONFIG_PREEMPT_RT
static inline my_readlock(raw_spinlock_t *lock) { raw_spin_lock(&lock); }
static inline my_readunlock(raw_spinlock_t *lock) {
raw_spin_unlock(&lock); }
static inline my_writelock(raw_spinlock_t *lock) { raw_spin_lock(&lock); }
static inline my_writeunlock(raw_spinlock_t *lock) {
raw_spin_unlock(&lock); }
#else
static inline my_readlock(rwlock_t *lock) {read_lock(&lock); }
static inline my_readunlock(rwlock_t *lock) {read_unlock(&lock); }
static inline my_writelock(rwlock_t *lock) { write_lock(&lock); }
static inline my_writeunlock(rwlock_t *lock) {write_unlock(&lock); }
#endif

>
> That said, we can get away with new interfaces in some cases.  e.g. I
> believe stp_utrace.c and task_finder2.c can only be used on 3.x kernels.
>  But anything that's more generic, like addr-map.c, will need some kind
> of wrapper or compat #define added to let it work on older kernels too.
>  Maybe just #define raw_spin_lock spin_lock, etc.

Above one right? Its coming in v2 soon.

>
>
> Josh

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
  2014-09-15 16:35         ` Santosh Shukla
@ 2014-09-15 16:45           ` Josh Stone
  2014-09-15 16:48             ` Santosh Shukla
  0 siblings, 1 reply; 13+ messages in thread
From: Josh Stone @ 2014-09-15 16:45 UTC (permalink / raw)
  To: Santosh Shukla; +Cc: systemtap, Corey Minyard

On 09/15/2014 09:35 AM, Santosh Shukla wrote:
> On Mon, Sep 15, 2014 at 9:52 PM, Josh Stone <jistone@redhat.com> wrote:
>> Hi,
>>
>> On 09/15/2014 08:41 AM, Santosh Shukla wrote:
>>> first wanted to get raw_locking specific to get into stap.
>>
>> One ugly aspect of this is that we try to maintain compatibility with
>> older platforms too.  It looks like raw_spin_lock didn't come into the
>> kernel until 2.6.33, which doesn't even cover RHEL6.  If possible we try
>> to keep things running all the way back to RHEL4 (though see my earlier
>> RFC about dropping that).
> 
> Right, I did saw your early RFC and noticed few important stuff
> missing so to keep compatibility per se  .i..e.. some type of locking
> function helper api which could ably switch between PREEMPT and
> non-PREEMPT kernel.
> 
> 
> In v2 version which I am working on [inspired by Corey suggestion] to
> add below type of locking facility, pseudo code look like below
> 
> #ifdef CONFIG_PREEMPT_RT
> static inline my_readlock(raw_spinlock_t *lock) { raw_spin_lock(&lock); }
> static inline my_readunlock(raw_spinlock_t *lock) {
> raw_spin_unlock(&lock); }
> static inline my_writelock(raw_spinlock_t *lock) { raw_spin_lock(&lock); }
> static inline my_writeunlock(raw_spinlock_t *lock) {
> raw_spin_unlock(&lock); }
> #else
> static inline my_readlock(rwlock_t *lock) {read_lock(&lock); }
> static inline my_readunlock(rwlock_t *lock) {read_unlock(&lock); }
> static inline my_writelock(rwlock_t *lock) { write_lock(&lock); }
> static inline my_writeunlock(rwlock_t *lock) {write_unlock(&lock); }
> #endif

Ok, if the raw interfaces have always existed with PREEMPT_RT, then this
should be fine.

I suggest "stp_" rather than "my_" though.  </bikeshed>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
  2014-09-15 16:45           ` Josh Stone
@ 2014-09-15 16:48             ` Santosh Shukla
  2014-09-15 16:51               ` Santosh Shukla
  0 siblings, 1 reply; 13+ messages in thread
From: Santosh Shukla @ 2014-09-15 16:48 UTC (permalink / raw)
  To: Josh Stone; +Cc: systemtap, Corey Minyard

On Mon, Sep 15, 2014 at 10:15 PM, Josh Stone <jistone@redhat.com> wrote:
> On 09/15/2014 09:35 AM, Santosh Shukla wrote:
>> On Mon, Sep 15, 2014 at 9:52 PM, Josh Stone <jistone@redhat.com> wrote:
>>> Hi,
>>>
>>> On 09/15/2014 08:41 AM, Santosh Shukla wrote:
>>>> first wanted to get raw_locking specific to get into stap.
>>>
>>> One ugly aspect of this is that we try to maintain compatibility with
>>> older platforms too.  It looks like raw_spin_lock didn't come into the
>>> kernel until 2.6.33, which doesn't even cover RHEL6.  If possible we try
>>> to keep things running all the way back to RHEL4 (though see my earlier
>>> RFC about dropping that).
>>
>> Right, I did saw your early RFC and noticed few important stuff
>> missing so to keep compatibility per se  .i..e.. some type of locking
>> function helper api which could ably switch between PREEMPT and
>> non-PREEMPT kernel.
>>
>>
>> In v2 version which I am working on [inspired by Corey suggestion] to
>> add below type of locking facility, pseudo code look like below
>>
>> #ifdef CONFIG_PREEMPT_RT
>> static inline my_readlock(raw_spinlock_t *lock) { raw_spin_lock(&lock); }
>> static inline my_readunlock(raw_spinlock_t *lock) {
>> raw_spin_unlock(&lock); }
>> static inline my_writelock(raw_spinlock_t *lock) { raw_spin_lock(&lock); }
>> static inline my_writeunlock(raw_spinlock_t *lock) {
>> raw_spin_unlock(&lock); }
>> #else
>> static inline my_readlock(rwlock_t *lock) {read_lock(&lock); }
>> static inline my_readunlock(rwlock_t *lock) {read_unlock(&lock); }
>> static inline my_writelock(rwlock_t *lock) { write_lock(&lock); }
>> static inline my_writeunlock(rwlock_t *lock) {write_unlock(&lock); }
>> #endif
>
> Ok, if the raw interfaces have always existed with PREEMPT_RT, then this
> should be fine.
>
Yes they are.

> I suggest "stp_" rather than "my_" though.  </bikeshed>

Yup.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set
  2014-09-15 16:48             ` Santosh Shukla
@ 2014-09-15 16:51               ` Santosh Shukla
  0 siblings, 0 replies; 13+ messages in thread
From: Santosh Shukla @ 2014-09-15 16:51 UTC (permalink / raw)
  To: Josh Stone; +Cc: systemtap, Corey Minyard

Also, Curious to know any benchmarking tool to test on performance
impact of the port? any sugegstion. Thanks.

On Mon, Sep 15, 2014 at 10:18 PM, Santosh Shukla <sshukla@mvista.com> wrote:
> On Mon, Sep 15, 2014 at 10:15 PM, Josh Stone <jistone@redhat.com> wrote:
>> On 09/15/2014 09:35 AM, Santosh Shukla wrote:
>>> On Mon, Sep 15, 2014 at 9:52 PM, Josh Stone <jistone@redhat.com> wrote:
>>>> Hi,
>>>>
>>>> On 09/15/2014 08:41 AM, Santosh Shukla wrote:
>>>>> first wanted to get raw_locking specific to get into stap.
>>>>
>>>> One ugly aspect of this is that we try to maintain compatibility with
>>>> older platforms too.  It looks like raw_spin_lock didn't come into the
>>>> kernel until 2.6.33, which doesn't even cover RHEL6.  If possible we try
>>>> to keep things running all the way back to RHEL4 (though see my earlier
>>>> RFC about dropping that).
>>>
>>> Right, I did saw your early RFC and noticed few important stuff
>>> missing so to keep compatibility per se  .i..e.. some type of locking
>>> function helper api which could ably switch between PREEMPT and
>>> non-PREEMPT kernel.
>>>
>>>
>>> In v2 version which I am working on [inspired by Corey suggestion] to
>>> add below type of locking facility, pseudo code look like below
>>>
>>> #ifdef CONFIG_PREEMPT_RT
>>> static inline my_readlock(raw_spinlock_t *lock) { raw_spin_lock(&lock); }
>>> static inline my_readunlock(raw_spinlock_t *lock) {
>>> raw_spin_unlock(&lock); }
>>> static inline my_writelock(raw_spinlock_t *lock) { raw_spin_lock(&lock); }
>>> static inline my_writeunlock(raw_spinlock_t *lock) {
>>> raw_spin_unlock(&lock); }
>>> #else
>>> static inline my_readlock(rwlock_t *lock) {read_lock(&lock); }
>>> static inline my_readunlock(rwlock_t *lock) {read_unlock(&lock); }
>>> static inline my_writelock(rwlock_t *lock) { write_lock(&lock); }
>>> static inline my_writeunlock(rwlock_t *lock) {write_unlock(&lock); }
>>> #endif
>>
>> Ok, if the raw interfaces have always existed with PREEMPT_RT, then this
>> should be fine.
>>
> Yes they are.
>
>> I suggest "stp_" rather than "my_" though.  </bikeshed>
>
> Yup.

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2014-09-15 16:51 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-09  7:08 [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
2014-09-09  7:08 ` [SYSTEMTAP/PATCH 1/4] rt : replace __stp_tf_task_work_list_lock to raw_ Santosh Shukla
2014-09-09  7:08 ` [SYSTEMTAP/PATCH 3/4] rt : stp_utrace.c : replace utrace->lock with raw_lock Santosh Shukla
2014-09-09  7:08 ` [SYSTEMTAP/PATCH 2/4] rt : replace read write lock with rcu Santosh Shukla
2014-09-09  7:09 ` [SYSTEMTAP/PATCH 4/4] rt : replace utrace_struct lock to raw lock Santosh Shukla
2014-09-15 10:17 ` [SYSTEMTAP/PATCH 0/4] RT aware systemtap patch set Santosh Shukla
2014-09-15 14:13   ` David Smith
2014-09-15 15:41     ` Santosh Shukla
2014-09-15 16:22       ` Josh Stone
2014-09-15 16:35         ` Santosh Shukla
2014-09-15 16:45           ` Josh Stone
2014-09-15 16:48             ` Santosh Shukla
2014-09-15 16:51               ` Santosh Shukla

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).