This is the mail archive of the
systemtap@sourceware.org
mailing list for the systemtap project.
[PATCH]Kprobes: fix kprobes reentrancy
- From: "Keshavamurthy, Anil S" <anil dot s dot keshavamurthy at intel dot com>
- To: Linux Kernel <linux-kernel at vger dot kernel dot org>
- Cc: akpm at osdl dot org, Ananth N Mavinakayanahalli <ananth at in dot ibm dot com>, Jim Keniston <jkenisto at us dot ibm dot com>, Prasanna S Panchamukhi <prasanna at in dot ibm dot com>, Systemtap <systemtap at sources dot redhat dot com>
- Date: Tue, 12 Dec 2006 14:25:17 -0800
- Subject: [PATCH]Kprobes: fix kprobes reentrancy
- Reply-to: "Keshavamurthy, Anil S" <anil dot s dot keshavamurthy at intel dot com>
[PATCH]Kprobes: fix kprobes reentrancy
From: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
In case of reentrancy i.e when a probe_handler calls a function
which has a probe on it, then the kprobes handles this reentrancy
by skipping the reentrant's (i.e second) probe_handler function,
and just single steps the original instruction and returns to the point
where reentrancy got triggered. During this reentracy, kprobes save
the previous ( first probe's) info in the per cpu global area called
prev_kprobe. This area was getting corrupted when interrupt is triggered
and the isr routine also had a probe in its path and the kprobe used
to overwrite the per cpu global area prev_kprobe their by corrupting this
data structure. The below patch resolves this by creating stack like
area to store and restor prev_kprobe info. The current patch also resolves
the long pending bug for Systemtap where probes on kernel.function("*"),
i.e probes on all kernel function used to crash the system. With this
patch Systemtap can do probe kernel.fucntion("*")
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Please apply.
arch/i386/kernel/kprobes.c | 26 ++++++++++++++++++--------
arch/ia64/kernel/kprobes.c | 18 ++++++++++++++----
arch/x86_64/kernel/kprobes.c | 26 ++++++++++++++++++--------
include/asm-i386/kprobes.h | 2 +-
include/asm-ia64/kprobes.h | 2 +-
include/asm-x86_64/kprobes.h | 2 +-
include/linux/kprobes.h | 22 ++++++++++++++++++++++
7 files changed, 75 insertions(+), 23 deletions(-)
Index: 2.6.19-git19/arch/i386/kernel/kprobes.c
===================================================================
--- 2.6.19-git19.orig/arch/i386/kernel/kprobes.c
+++ 2.6.19-git19/arch/i386/kernel/kprobes.c
@@ -40,6 +40,8 @@ void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+DEFINE_PER_CPU(atomic_t, prev_kprobe_index);
+DEFINE_PER_CPU(struct prev_kprobe [PREV_KPROBE_SIZE], prev_kprobe_blk);
/* insert a jmp code */
static __always_inline void set_jmp_op(void *from, void *to)
@@ -190,18 +192,23 @@ void __kprobes arch_remove_kprobe(struct
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kcb->prev_kprobe.kp = kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
- kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
- kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
+ kcb->prev_kprobe = get_prev_kprobe();
+ kcb->prev_kprobe->kp = kprobe_running();
+ kcb->prev_kprobe->status = kcb->kprobe_status;
+ kcb->prev_kprobe->old_eflags = kcb->kprobe_old_eflags;
+ kcb->prev_kprobe->saved_eflags = kcb->kprobe_saved_eflags;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
- kcb->kprobe_status = kcb->prev_kprobe.status;
- kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
- kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
+ unsigned long flags;
+ local_irq_save(flags);
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe->kp;
+ kcb->kprobe_status = kcb->prev_kprobe->status;
+ kcb->kprobe_old_eflags = kcb->prev_kprobe->old_eflags;
+ kcb->kprobe_saved_eflags = kcb->prev_kprobe->saved_eflags;
+ kcb->prev_kprobe = restore_prev_kprobe();
+ local_irq_restore(flags);
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
@@ -270,6 +277,7 @@ static int __kprobes kprobe_handler(stru
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
+ unsigned long flags;
if (kcb->kprobe_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
@@ -282,8 +290,10 @@ static int __kprobes kprobe_handler(stru
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
+ local_irq_save(flags);
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
+ local_irq_restore(flags);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
Index: 2.6.19-git19/arch/ia64/kernel/kprobes.c
===================================================================
--- 2.6.19-git19.orig/arch/ia64/kernel/kprobes.c
+++ 2.6.19-git19/arch/ia64/kernel/kprobes.c
@@ -39,6 +39,8 @@ extern void jprobe_inst_return(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+DEFINE_PER_CPU(atomic_t, prev_kprobe_index);
+DEFINE_PER_CPU(struct prev_kprobe [PREV_KPROBE_SIZE], prev_kprobe_blk);
enum instruction_type {A, I, M, F, B, L, X, u};
static enum instruction_type bundle_encoding[32][3] = {
@@ -307,14 +309,19 @@ static int __kprobes valid_kprobe_addr(i
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kcb->prev_kprobe.kp = kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe = get_prev_kprobe();
+ kcb->prev_kprobe->kp = kprobe_running();
+ kcb->prev_kprobe->status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
- kcb->kprobe_status = kcb->prev_kprobe.status;
+ unsigned long flags;
+ local_irq_save(flags);
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe->kp;
+ kcb->kprobe_status = kcb->prev_kprobe->status;
+ kcb->prev_kprobe = restore_prev_kprobe();
+ local_irq_restore(flags);
}
static void __kprobes set_current_kprobe(struct kprobe *p,
@@ -631,6 +638,7 @@ static int __kprobes pre_kprobes_handler
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
+ unsigned long flags;
if ((kcb->kprobe_status == KPROBE_HIT_SS) &&
(p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
ia64_psr(regs)->ss = 0;
@@ -642,8 +650,10 @@ static int __kprobes pre_kprobes_handler
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
+ local_irq_save(flags);
save_previous_kprobe(kcb);
set_current_kprobe(p, kcb);
+ local_irq_restore(flags);
kprobes_inc_nmissed_count(p);
prepare_ss(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
Index: 2.6.19-git19/arch/x86_64/kernel/kprobes.c
===================================================================
--- 2.6.19-git19.orig/arch/x86_64/kernel/kprobes.c
+++ 2.6.19-git19/arch/x86_64/kernel/kprobes.c
@@ -48,6 +48,8 @@ static void __kprobes arch_copy_kprobe(s
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+DEFINE_PER_CPU(atomic_t, prev_kprobe_index);
+DEFINE_PER_CPU(struct prev_kprobe [PREV_KPROBE_SIZE], prev_kprobe_blk);
/*
* returns non-zero if opcode modifies the interrupt flag.
@@ -230,18 +232,23 @@ void __kprobes arch_remove_kprobe(struct
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kcb->prev_kprobe.kp = kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
- kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags;
- kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
+ kcb->prev_kprobe = get_prev_kprobe();
+ kcb->prev_kprobe->kp = kprobe_running();
+ kcb->prev_kprobe->status = kcb->kprobe_status;
+ kcb->prev_kprobe->old_rflags = kcb->kprobe_old_rflags;
+ kcb->prev_kprobe->saved_rflags = kcb->kprobe_saved_rflags;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
- kcb->kprobe_status = kcb->prev_kprobe.status;
- kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags;
- kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
+ unsigned long flags;
+ local_irq_save(flags);
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe->kp;
+ kcb->kprobe_status = kcb->prev_kprobe->status;
+ kcb->kprobe_old_rflags = kcb->prev_kprobe->old_rflags;
+ kcb->kprobe_saved_rflags = kcb->prev_kprobe->saved_rflags;
+ kcb->prev_kprobe = restore_prev_kprobe();
+ local_irq_restore(flags);
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
@@ -326,8 +333,11 @@ int __kprobes kprobe_handler(struct pt_r
* of the new probe without calling any user
* handlers.
*/
+ unsigned long flags;
+ local_irq_save(flags);
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
+ local_irq_restore(flags);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
Index: 2.6.19-git19/include/asm-i386/kprobes.h
===================================================================
--- 2.6.19-git19.orig/include/asm-i386/kprobes.h
+++ 2.6.19-git19/include/asm-i386/kprobes.h
@@ -76,7 +76,7 @@ struct kprobe_ctlblk {
long *jprobe_saved_esp;
struct pt_regs jprobe_saved_regs;
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
- struct prev_kprobe prev_kprobe;
+ struct prev_kprobe *prev_kprobe;
};
/* trap3/1 are intr gates for kprobes. So, restore the status of IF,
Index: 2.6.19-git19/include/asm-ia64/kprobes.h
===================================================================
--- 2.6.19-git19.orig/include/asm-ia64/kprobes.h
+++ 2.6.19-git19/include/asm-ia64/kprobes.h
@@ -77,7 +77,7 @@ struct kprobe_ctlblk {
unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
unsigned long *bsp;
unsigned long cfm;
- struct prev_kprobe prev_kprobe;
+ struct prev_kprobe *prev_kprobe;
};
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
Index: 2.6.19-git19/include/asm-x86_64/kprobes.h
===================================================================
--- 2.6.19-git19.orig/include/asm-x86_64/kprobes.h
+++ 2.6.19-git19/include/asm-x86_64/kprobes.h
@@ -70,7 +70,7 @@ struct kprobe_ctlblk {
long *jprobe_saved_rsp;
struct pt_regs jprobe_saved_regs;
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
- struct prev_kprobe prev_kprobe;
+ struct prev_kprobe *prev_kprobe;
};
/* trap3/1 are intr gates for kprobes. So, restore the status of IF,
Index: 2.6.19-git19/include/linux/kprobes.h
===================================================================
--- 2.6.19-git19.orig/include/linux/kprobes.h
+++ 2.6.19-git19/include/linux/kprobes.h
@@ -188,6 +188,28 @@ static inline struct kprobe_ctlblk *get_
return (&__get_cpu_var(kprobe_ctlblk));
}
+/* Per-CPU area for storing the previous kprobe information
+ * in case of reentrancy.
+ */
+#define PREV_KPROBE_SIZE 4
+DECLARE_PER_CPU(atomic_t, prev_kprobe_index);
+DECLARE_PER_CPU(struct prev_kprobe [PREV_KPROBE_SIZE], prev_kprobe_blk);
+
+static inline struct prev_kprobe *get_prev_kprobe(void)
+{
+ unsigned int i;
+ i = atomic_add_return(1, &(__get_cpu_var(prev_kprobe_index)));
+ BUG_ON(i > PREV_KPROBE_SIZE);
+ return (&__get_cpu_var(prev_kprobe_blk)[i-1]);
+}
+
+static inline struct prev_kprobe *restore_prev_kprobe(void)
+{
+ unsigned int i;
+ i = atomic_sub_return(1, &(__get_cpu_var(prev_kprobe_index)));
+ return (&__get_cpu_var(prev_kprobe_blk)[i]);
+}
+
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int setjmp_pre_handler(struct kprobe *, struct pt_regs *);