This is the mail archive of the
systemtap@sourceware.org
mailing list for the systemtap project.
[SYSTEMTAP/PATCH v3 4/9] stp: rt: replace utrace->lock with stp style raw lock
- From: Santosh Shukla <sshukla at mvista dot com>
- To: fche at redhat dot com, dsmith at redhat dot com, jistone at redhat dot com
- Cc: systemtap at sourceware dot org, Santosh Shukla <sshukla at mvista dot com>
- Date: Mon, 22 Sep 2014 12:56:57 +0530
- Subject: [SYSTEMTAP/PATCH v3 4/9] stp: rt: replace utrace->lock with stp style raw lock
- Authentication-results: sourceware.org; auth=none
- References: <1411370822-8707-1-git-send-email-sshukla at mvista dot com>
In preempt-rt kernel, At the time of launching stap script noticed below
bug_on. Replacing spinlock with Raw fixes this problem.
[ 159.433464] Preemption disabled at:[<ffffffff810b74d6>] remove_wait_queue+0x36/0x40
[ 159.433466] CPU: 8 PID: 6723 Comm: bash Tainted: GF W O 3.14.12-rt-rt9+ #1
[ 159.433467] Hardware name: Intel Corporation S2600CP/S2600CP, BIOS SE5C600.86B.02.01.0002.082220131453 08/22/2013
[ 159.433471] ffff88042d917d80 ffff88040f623d90 ffffffff81602b13 ffff8804121e3980
[ 159.433474] ffff88040f623da0 ffffffff815fd2fb ffff88040f623df8 ffffffff81606017
[ 159.433478] ffff88040f623fd8 0000000000017d80 0000000000017d80 ffffffff810bcbcb
[ 159.433478] Call Trace:
[ 159.433481] [<ffffffff81602b13>] dump_stack+0x4e/0x7a
[ 159.433484] [<ffffffff815fd2fb>] __schedule_bug+0x9f/0xad
[ 159.433486] [<ffffffff81606017>] __schedule+0x627/0x6a0
[ 159.433489] [<ffffffff810bcbcb>] ? task_blocks_on_rt_mutex+0x19b/0x220
[ 159.433492] [<ffffffff816060c0>] schedule+0x30/0xa0
[ 159.433495] [<ffffffff81607a9d>] rt_spin_lock_slowlock+0xbd/0x1f0
[ 159.433498] [<ffffffff81608645>] rt_spin_lock+0x25/0x30
[ 159.433503] [<ffffffffa076fbf5>] start_report+0x45/0xb0 [stap_c108d00c22143294d42db713b804dbb9_10325]
[ 159.433508] [<ffffffffa0773e38>] utrace_report_syscall_exit+0x88/0x110 [stap_c108d00c22143294d42db713b804dbb9_10325]
[ 159.433511] [<ffffffff81023d30>] syscall_trace_leave+0x100/0x130
[ 159.433514] [<ffffffff8161114b>] int_check_syscall_exit_work+0x34/0x3d
Note : This module afaiu is prime candidate to benifit rcu locking primitives
and same some cycle, should improve overall performance, more scallable.
[This is general desing improvement so keeping those changes out from this
patch. .todo]
Signed-off-by: Santosh Shukla <sshukla@mvista.com>
---
runtime/stp_helper_lock.h | 15 ++++++++-
runtime/stp_utrace.c | 85 ++++++++++++++++++++++++-----------------------
2 files changed, 57 insertions(+), 43 deletions(-)
diff --git a/runtime/stp_helper_lock.h b/runtime/stp_helper_lock.h
index d1a69b4..f95d3c0 100644
--- a/runtime/stp_helper_lock.h
+++ b/runtime/stp_helper_lock.h
@@ -19,11 +19,17 @@
#ifdef CONFIG_PREEMPT_RT_FULL
+#define stp_spinlock_t raw_spinlock_t
+
#define STP_DEFINE_SPINLOCK(lock) DEFINE_RAW_SPINLOCK(lock)
+static inline void stp_spin_lock_init(raw_spinlock_t *lock) { raw_spin_lock_init(lock); }
+
static inline void stp_spin_lock(raw_spinlock_t *lock) { raw_spin_lock(lock); }
static inline void stp_spin_unlock(raw_spinlock_t *lock) { raw_spin_unlock(lock); }
+static inline void stp_spin_unlock_wait(raw_spinlock_t *lock) { raw_spin_unlock_wait(lock); }
+
#define stp_spin_lock_irqsave(lock, flags) raw_spin_lock_irqsave(lock, flags)
#define stp_spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(lock, flags)
@@ -33,7 +39,7 @@ static inline void stp_spin_unlock(raw_spinlock_t *lock) { raw_spin_unlock(lock)
static inline void stp_read_lock(raw_spinlock_t *lock) { raw_spin_lock(lock); }
static inline void stp_read_unlock(raw_spinlock_t *lock) { raw_spin_unlock(lock); }
static inline void stp_write_lock(raw_spinlock_t *lock) { raw_spin_lock(lock); }
-static inline void stp_write_unlock(raw_spinlock_t *lock) { raw_spin_unlock(lock); }
+static inline void stp_write_unlock(raw_spinlock_t *lock) { raw_spin_unlock(lock); }
#define stp_read_lock_irqsave(lock, flags) raw_spin_lock_irqsave(lock, flags)
#define stp_read_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(lock, flags)
@@ -41,11 +47,18 @@ static inline void stp_write_unlock(raw_spinlock_t *lock) { raw_spin_unlock(loc
#define stp_write_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(lock, flags)
#else
+
+#define stp_spinlock_t spinlock_t
+
#define STP_DEFINE_SPINLOCK(lock) DEFINE_SPINLOCK(lock)
+static inline void stp_spin_lock_init(spinlock_t *lock) { spin_lock_init(lock); }
+
static inline void stp_spin_lock(spinlock_t *lock) { spin_lock(lock); }
static inline void stp_spin_unlock(spinlock_t *lock) { spin_unlock(lock); }
+static inline void stp_spin_unlock_wait(spinlock_t *lock) { spin_unlock_wait(lock); }
+
#define stp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
#define stp_spin_unlock_irqrestore(lock, flags) spin_unlock_irqrestore(lock, flags)
diff --git a/runtime/stp_utrace.c b/runtime/stp_utrace.c
index acbe936..e5d6d55 100644
--- a/runtime/stp_utrace.c
+++ b/runtime/stp_utrace.c
@@ -22,12 +22,13 @@
#include <linux/sched.h>
#include <linux/freezer.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <trace/events/sched.h>
#include <trace/events/syscalls.h>
#include "stp_task_work.c"
#include "linux/stp_tracepoint.h"
+#include "stp_helper_lock.h"
+
/*
* Per-thread structure private to utrace implementation.
* If task_struct.utrace_flags is nonzero, task_struct.utrace
@@ -56,7 +57,7 @@
* in time to have their callbacks seen.
*/
struct utrace {
- spinlock_t lock;
+ stp_spinlock_t lock;
struct list_head attached, attaching;
struct utrace_engine *reporting;
@@ -379,7 +380,7 @@ static void utrace_cleanup(struct utrace *utrace)
/* Free engines associated with the struct utrace, starting
* with the 'attached' list then doing the 'attaching' list. */
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
list_for_each_entry_safe(engine, next, &utrace->attached, entry) {
#ifdef STP_TF_DEBUG
printk(KERN_ERR "%s:%d - removing engine\n",
@@ -420,7 +421,7 @@ static void utrace_cleanup(struct utrace *utrace)
#endif
utrace->report_work_added = 0;
}
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
/* Free the struct utrace itself. */
kmem_cache_free(utrace_cachep, utrace);
@@ -515,7 +516,7 @@ static bool utrace_task_alloc(struct task_struct *task)
if (unlikely(!utrace))
return false;
- spin_lock_init(&utrace->lock);
+ stp_spin_lock_init(&utrace->lock);
INIT_LIST_HEAD(&utrace->attached);
INIT_LIST_HEAD(&utrace->attaching);
utrace->resume = UTRACE_RESUME;
@@ -556,7 +557,7 @@ static void utrace_free(struct utrace *utrace)
spin_unlock(&task_utrace_lock);
/* Free the utrace struct. */
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
#ifdef STP_TF_DEBUG
if (unlikely(utrace->reporting)
|| unlikely(!list_empty(&utrace->attached))
@@ -585,7 +586,7 @@ static void utrace_free(struct utrace *utrace)
: "UNKNOWN"));
utrace->report_work_added = 0;
}
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
kmem_cache_free(utrace_cachep, utrace);
}
@@ -661,7 +662,7 @@ static int utrace_add_engine(struct task_struct *target,
{
int ret;
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
ret = -EEXIST;
if ((flags & UTRACE_ATTACH_EXCLUSIVE) &&
@@ -709,7 +710,7 @@ static int utrace_add_engine(struct task_struct *target,
utrace_engine_get(engine);
ret = 0;
unlock:
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
return ret;
}
@@ -758,11 +759,11 @@ static struct utrace_engine *utrace_attach_task(
if (!(flags & UTRACE_ATTACH_CREATE)) {
if (unlikely(!utrace))
return ERR_PTR(-ENOENT);
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
engine = find_matching_engine(utrace, flags, ops, data);
if (engine)
utrace_engine_get(engine);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
return engine ?: ERR_PTR(-ENOENT);
}
@@ -878,14 +879,14 @@ static struct utrace *get_utrace_lock(struct task_struct *target,
}
utrace = task_utrace_struct(target);
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
if (unlikely(utrace->reap) || unlikely(!engine->ops) ||
unlikely(engine->ops == &utrace_detached_ops)) {
/*
* By the time we got the utrace lock,
* it had been reaped or detached already.
*/
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
utrace = ERR_PTR(-ESRCH);
if (!attached && engine->ops == &utrace_detached_ops)
utrace = ERR_PTR(-ERESTARTSYS);
@@ -1051,7 +1052,7 @@ static int utrace_set_events(struct task_struct *target,
ret = -EINPROGRESS;
}
unlock:
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
return ret;
}
@@ -1181,7 +1182,7 @@ static bool utrace_reset(struct task_struct *task, struct utrace *utrace)
*/
rcu_read_lock();
utrace->utrace_flags = flags;
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
rcu_read_unlock();
put_detached_list(&detached);
@@ -1197,7 +1198,7 @@ static void utrace_finish_stop(void)
*/
if (unlikely(__fatal_signal_pending(current))) {
struct utrace *utrace = task_utrace_struct(current);
- spin_unlock_wait(&utrace->lock);
+ stp_spin_unlock_wait(&utrace->lock);
}
}
@@ -1211,7 +1212,7 @@ static void utrace_stop(struct task_struct *task, struct utrace *utrace,
enum utrace_resume_action action)
{
relock:
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
if (action < utrace->resume) {
/*
@@ -1247,7 +1248,7 @@ relock:
if (unlikely(__fatal_signal_pending(task))) {
spin_unlock_irq(&task->sighand->siglock);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
return;
}
@@ -1262,7 +1263,7 @@ relock:
task->signal->flags = SIGNAL_STOP_STOPPED;
spin_unlock_irq(&task->sighand->siglock);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
schedule();
@@ -1298,7 +1299,7 @@ static void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace,
struct utrace_engine *engine, *next;
struct list_head attached;
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
if (reap) {
/*
@@ -1312,7 +1313,7 @@ static void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace,
utrace->reap = 1;
if (utrace->utrace_flags & _UTRACE_DEATH_EVENTS) {
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
return;
}
} else {
@@ -1350,7 +1351,7 @@ static void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace,
list_replace_init(&utrace->attached, &attached);
list_splice_tail_init(&utrace->attaching, &attached);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
list_for_each_entry_safe(engine, next, &attached, entry) {
if (engine->flags & UTRACE_EVENT(REAP))
@@ -1525,7 +1526,7 @@ static int utrace_control(struct task_struct *target,
if (unlikely(target->exit_state)) {
ret = utrace_control_dead(target, utrace, action);
if (ret) {
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
return ret;
}
reset = true;
@@ -1623,7 +1624,7 @@ static int utrace_control(struct task_struct *target,
if (reset)
utrace_reset(target, utrace);
else
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
return ret;
}
@@ -1682,7 +1683,7 @@ static int utrace_barrier(struct task_struct *target,
*/
if (utrace->reporting != engine)
ret = 0;
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
if (!ret)
break;
}
@@ -1720,12 +1721,12 @@ static enum utrace_resume_action start_report(struct utrace *utrace)
enum utrace_resume_action resume = utrace->resume;
if (utrace->pending_attach ||
(resume > UTRACE_STOP && resume < UTRACE_RESUME)) {
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
splice_attaching(utrace);
resume = utrace->resume;
if (resume > UTRACE_STOP)
utrace->resume = UTRACE_RESUME;
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
}
return resume;
}
@@ -1735,7 +1736,7 @@ static inline void finish_report_reset(struct task_struct *task,
struct utrace_report *report)
{
if (unlikely(report->spurious || report->detaches)) {
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
if (utrace_reset(task, utrace))
report->action = UTRACE_RESUME;
}
@@ -1758,12 +1759,12 @@ static void finish_report(struct task_struct *task, struct utrace *utrace,
resume = will_not_stop ? UTRACE_REPORT : UTRACE_RESUME;
if (resume < utrace->resume) {
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
utrace->resume = resume;
stp_task_notify_resume(task, utrace);
if (resume == UTRACE_INTERRUPT)
set_tsk_thread_flag(task, TIF_SIGPENDING);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
}
finish_report_reset(task, utrace, report);
@@ -1784,9 +1785,9 @@ static void finish_callback_report(struct task_struct *task,
* This way, a 0 return is an unambiguous indicator that any
* callback returning UTRACE_DETACH has indeed caused detach.
*/
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
engine->ops = &utrace_detached_ops;
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
}
/*
@@ -1805,16 +1806,16 @@ static void finish_callback_report(struct task_struct *task,
report->resume_action = action;
if (engine_wants_stop(engine)) {
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
clear_engine_wants_stop(engine);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
}
return;
}
if (!engine_wants_stop(engine)) {
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
/*
* If utrace_control() came in and detached us
* before we got the lock, we must not stop now.
@@ -1823,7 +1824,7 @@ static void finish_callback_report(struct task_struct *task,
report->detaches = true;
else
mark_engine_wants_stop(utrace, engine);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
}
}
@@ -2201,9 +2202,9 @@ static void utrace_finish_vfork(struct task_struct *task)
struct utrace *utrace = task_utrace_struct(task);
if (utrace->vfork_stop) {
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
utrace->vfork_stop = 0;
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
utrace_stop(task, utrace, UTRACE_RESUME); /* XXX */
}
}
@@ -2278,12 +2279,12 @@ static void utrace_report_death(void *cb_data __attribute__ ((unused)),
}
}
else {
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
BUG_ON(utrace->death);
utrace->death = 1;
utrace->resume = UTRACE_RESUME;
splice_attaching(utrace);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH),
report_death, engine, -1/*group_dead*/,
@@ -2436,12 +2437,12 @@ static void utrace_report_work(struct task_work *work)
might_sleep();
utrace->report_work_added = 0;
- spin_lock(&utrace->lock);
+ stp_spin_lock(&utrace->lock);
BUG_ON(utrace->death);
utrace->death = 1;
utrace->resume = UTRACE_RESUME;
splice_attaching(utrace);
- spin_unlock(&utrace->lock);
+ stp_spin_unlock(&utrace->lock);
REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH),
report_death, engine, -1/*group_dead*/,
--
1.8.3.1