This is the mail archive of the systemtap@sources.redhat.com mailing list for the systemtap project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] Kprobes: Fix to prevent possible race conditions


Hi All,

Below is the patch to avoid possible race conditions in kprobes.
This patch has been tested on i386, x86_64, ppc64 architectures and
also cross compiled on ia64 and sparc64 architectures.
Please let me know if you have any issues.

Thanks
Prasanna

---
There are possible race conditions if probes are placed on routines within the
kprobes files and routines used by the kprobes. For example if you put probe on
get_kprobe() routines, the system can hang while inserting probes on
any routine such as do_fork(). Because while inserting probes on do_fork(),
register_kprobes() routine graps the kprobes spin lock and executes get_kprobe()
routine and to handle probe of get_kprobe(), kprobes_handler() gets executed 
and tries to grap kprobes spin lock, and spins forever. This patch avoids such
possible race conditions by preventing probes on routines within the kprobes
file and routines used by kprobes.

Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>


---

 linux-2.6.13-rc1-prasanna/arch/i386/kernel/kprobes.c    |   52 ++++++++++---
 linux-2.6.13-rc1-prasanna/arch/ia64/kernel/kprobes.c    |   40 +++++++++-
 linux-2.6.13-rc1-prasanna/arch/ppc64/kernel/kprobes.c   |   63 ++++++++++++----
 linux-2.6.13-rc1-prasanna/arch/sparc64/kernel/kprobes.c |   34 ++++++++
 linux-2.6.13-rc1-prasanna/arch/x86_64/kernel/kprobes.c  |   54 ++++++++++---
 linux-2.6.13-rc1-prasanna/include/linux/kprobes.h       |    3 
 linux-2.6.13-rc1-prasanna/kernel/kprobes.c              |   16 ++++
 7 files changed, 221 insertions(+), 41 deletions(-)

diff -puN arch/i386/kernel/kprobes.c~kprobes-exclude-region arch/i386/kernel/kprobes.c
--- linux-2.6.13-rc1/arch/i386/kernel/kprobes.c~kprobes-exclude-region	2005-07-01 18:01:23.000000000 +0530
+++ linux-2.6.13-rc1-prasanna/arch/i386/kernel/kprobes.c	2005-07-01 18:01:23.000000000 +0530
@@ -46,8 +46,27 @@ static long *jprobe_saved_esp;
 /* copy of the kernel stack at the probe fire time */
 static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
 void jprobe_return_end(void);
+static void kprobes_arch_code_end(void);
 
 /*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+void kretprobe_trampoline_holder(void)
+{
+	asm volatile (  ".global kretprobe_trampoline\n"
+ 			"kretprobe_trampoline: \n"
+ 			"nop\n");
+}
+
+/*
+ * Marker at the beginning of the arch/i386/kprobes.c file.
+ */
+static void kprobes_arch_code_start(void)
+{
+}
+/*
  * returns non-zero if opcode modifies the interrupt flag.
  */
 static inline int is_IF_modifier(kprobe_opcode_t opcode)
@@ -67,6 +86,20 @@ int arch_prepare_kprobe(struct kprobe *p
 	return 0;
 }
 
+/*
+ * Avoid probes on routines defined within kprobe files to
+ * prevent possible race conditions.
+ */
+int kprobe_exclude_region(unsigned long addr)
+{
+	if (((addr >= (unsigned long)kprobes_code_start)
+	     && (addr <= (unsigned long)kprobes_code_end))
+	    || ((addr >= (unsigned long)kprobes_arch_code_start)
+		&& (addr <= (unsigned long)kprobes_arch_code_end)))
+		return -EINVAL;
+	return 0;
+}
+
 void arch_copy_kprobe(struct kprobe *p)
 {
 	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
@@ -245,18 +278,6 @@ no_kprobe:
 }
 
 /*
- * For function-return probes, init_kprobes() establishes a probepoint
- * here. When a retprobed function returns, this probe is hit and
- * trampoline_probe_handler() runs, calling the kretprobe's handler.
- */
- void kretprobe_trampoline_holder(void)
- {
- 	asm volatile (  ".global kretprobe_trampoline\n"
- 			"kretprobe_trampoline: \n"
- 			"nop\n");
- }
-
-/*
  * Called when we hit the probe point at kretprobe_trampoline
  */
 int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
@@ -541,3 +562,10 @@ int __init arch_init_kprobes(void)
 {
 	return register_kprobe(&trampoline_p);
 }
+
+/*
+ * Marker at the end of the arch/i386/kprobes.c file.
+ */
+static void kprobes_arch_code_end(void)
+{
+}
diff -puN arch/ia64/kernel/kprobes.c~kprobes-exclude-region arch/ia64/kernel/kprobes.c
--- linux-2.6.13-rc1/arch/ia64/kernel/kprobes.c~kprobes-exclude-region	2005-07-01 18:01:23.000000000 +0530
+++ linux-2.6.13-rc1-prasanna/arch/ia64/kernel/kprobes.c	2005-07-01 18:01:23.000000000 +0530
@@ -82,6 +82,19 @@ static enum instruction_type bundle_enco
   { u, u, u },  			/* 1F */
 };
 
+static void kprobes_arch_code_end(void);
+
+static void kretprobe_trampoline(void)
+{
+}
+
+/*
+ * Marker at the beginning of the arch/ia64/kprobes.c file.
+ */
+static void kprobes_arch_code_start(void)
+{
+}
+
 /*
  * In this function we check to see if the instruction
  * is IP relative instruction and update the kprobe
@@ -121,6 +134,22 @@ static void update_kprobe_inst_flag(uint
 }
 
 /*
+ * Avoid probes on routines defined within kprobe files and
+ * other kernel routines used by kprobes to prevent possible
+ * race conditions.
+ */
+int kprobe_exclude_region(unsigned long addr)
+{
+	if (((addr >= (unsigned long)kprobes_code_start)
+	     && (addr <= (unsigned long)kprobes_code_end))
+	    || ((addr >= (unsigned long)kprobes_arch_code_start)
+		&& (addr <= (unsigned long)kprobes_arch_code_end))
+	    || (addr == (unsigned long)flush_icache_range))
+		return -EINVAL;
+	return 0;
+}
+
+/*
  * In this function we check to see if the instruction
  * on which we are inserting kprobe is supported.
  * Returns 0 if supported
@@ -311,10 +340,6 @@ static inline void set_current_kprobe(st
 	current_kprobe = p;
 }
 
-static void kretprobe_trampoline(void)
-{
-}
-
 /*
  * At this point the target function has been tricked into
  * returning into our trampoline.  Lookup the associated instance
@@ -719,3 +744,10 @@ int __init arch_init_kprobes(void)
 		(kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip;
 	return register_kprobe(&trampoline_p);
 }
+
+/*
+ * Marker at the end of the arch/ia64/kprobes.c file.
+ */
+static void kprobes_arch_code_end(void)
+{
+}
diff -puN arch/ppc64/kernel/kprobes.c~kprobes-exclude-region arch/ppc64/kernel/kprobes.c
--- linux-2.6.13-rc1/arch/ppc64/kernel/kprobes.c~kprobes-exclude-region	2005-07-01 18:01:23.000000000 +0530
+++ linux-2.6.13-rc1-prasanna/arch/ppc64/kernel/kprobes.c	2005-07-01 18:01:23.000000000 +0530
@@ -43,6 +43,27 @@ static unsigned long kprobe_status, kpro
 static struct kprobe *kprobe_prev;
 static unsigned long kprobe_status_prev, kprobe_saved_msr_prev;
 static struct pt_regs jprobe_saved_regs;
+static void kprobes_arch_code_end(void);
+
+/*
+ * Function return probe trampoline:
+ * 	- init_kprobes() establishes a probepoint here
+ * 	- When the probed function returns, this probe
+ * 		causes the handlers to fire
+ */
+void kretprobe_trampoline_holder(void)
+{
+	asm volatile(".global kretprobe_trampoline\n"
+			"kretprobe_trampoline:\n"
+			"nop\n");
+}
+
+/*
+ * Marker at the beginning of the arch/ppc64/kprobes.c file.
+ */
+static void kprobes_arch_code_start(void)
+{
+}
 
 int arch_prepare_kprobe(struct kprobe *p)
 {
@@ -74,6 +95,28 @@ void arch_copy_kprobe(struct kprobe *p)
 	p->opcode = *p->addr;
 }
 
+/*
+ * Avoid probes on routines defined within kprobe files and other kernel
+ * routines used by kprobes to prevent possible race conditions.
+ */
+int kprobe_exclude_region(unsigned long addr)
+{
+	unsigned long start = (unsigned long)(((func_descr_t *)
+						&kprobes_code_start)->entry);
+	unsigned long end = (unsigned long)(((func_descr_t *)
+						&kprobes_code_end)->entry);
+	unsigned long arch_start = (unsigned long)(((func_descr_t *)
+					&kprobes_arch_code_start)->entry);
+	unsigned long arch_end = (unsigned long)(((func_descr_t *)
+						&kprobes_arch_code_end)->entry);
+	if (((addr >= start) && (addr <= end))
+	    || ((addr >= arch_start) && (addr <= arch_end))
+	    || (addr == (unsigned long)(((func_descr_t *)
+					& __flush_icache_range)-> entry)))
+		return -EINVAL;
+	return 0;
+}
+
 void arch_arm_kprobe(struct kprobe *p)
 {
 	*p->addr = BREAKPOINT_INSTRUCTION;
@@ -229,19 +272,6 @@ no_kprobe:
 }
 
 /*
- * Function return probe trampoline:
- * 	- init_kprobes() establishes a probepoint here
- * 	- When the probed function returns, this probe
- * 		causes the handlers to fire
- */
-void kretprobe_trampoline_holder(void)
-{
-	asm volatile(".global kretprobe_trampoline\n"
-			"kretprobe_trampoline:\n"
-			"nop\n");
-}
-
-/*
  * Called when the probe at kretprobe trampoline is hit
  */
 int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
@@ -448,3 +478,10 @@ int __init arch_init_kprobes(void)
 {
 	return register_kprobe(&trampoline_p);
 }
+
+/*
+ * Marker at the end of the arch/ppc64/kprobes.c file.
+ */
+static void kprobes_arch_code_end(void)
+{
+}
diff -puN arch/sparc64/kernel/kprobes.c~kprobes-exclude-region arch/sparc64/kernel/kprobes.c
--- linux-2.6.13-rc1/arch/sparc64/kernel/kprobes.c~kprobes-exclude-region	2005-07-01 18:01:23.000000000 +0530
+++ linux-2.6.13-rc1-prasanna/arch/sparc64/kernel/kprobes.c	2005-07-01 18:05:22.000000000 +0530
@@ -8,6 +8,7 @@
 #include <linux/kprobes.h>
 #include <asm/kdebug.h>
 #include <asm/signal.h>
+#include <asm/cacheflush.h>
 
 /* We do not have hardware single-stepping on sparc64.
  * So we implement software single-stepping with breakpoint
@@ -37,11 +38,37 @@
  * - Mark that we are no longer actively in a kprobe.
  */
 
+static void kprobes_arch_code_end(void);
+
+/*
+ * Marker at the beginning of the arch/sparc64/kprobes.c file.
+ */
+static void kprobes_arch_code_start(void)
+{
+}
+
 int arch_prepare_kprobe(struct kprobe *p)
 {
 	return 0;
 }
 
+/*
+ * Avoid probes on routines defined within kprobe files and
+ * other kernel routines used by kprobes to prevent possible
+ * race conditions.
+ */
+int kprobe_exclude_region(unsigned long addr)
+{
+	if (((addr >= (unsigned long)kprobes_code_start)
+	     && (addr <= (unsigned long)kprobes_code_end))
+	    || ((addr >= (unsigned long)kprobes_arch_code_start)
+		&& (addr <= (unsigned long)kprobes_arch_code_end))
+	    || (addr == (unsigned long)flush_icache_range)
+	    || (addr == (unsigned long)__flush_icache_page))
+		return -EINVAL;
+	return 0;
+}
+
 void arch_copy_kprobe(struct kprobe *p)
 {
 	p->ainsn.insn[0] = *p->addr;
@@ -438,3 +465,10 @@ int arch_init_kprobes(void)
 {
 	return 0;
 }
+
+/*
+ * Marker at the end of the arch/sparc64/kprobes.c file.
+ */
+static void kprobes_arch_code_end(void)
+{
+}
diff -puN arch/x86_64/kernel/kprobes.c~kprobes-exclude-region arch/x86_64/kernel/kprobes.c
--- linux-2.6.13-rc1/arch/x86_64/kernel/kprobes.c~kprobes-exclude-region	2005-07-01 18:01:23.000000000 +0530
+++ linux-2.6.13-rc1-prasanna/arch/x86_64/kernel/kprobes.c	2005-07-01 18:01:23.000000000 +0530
@@ -55,6 +55,27 @@ void jprobe_return_end(void);
 
 /* copy of the kernel stack at the probe fire time */
 static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+static void kprobes_arch_code_end(void);
+
+/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+void kretprobe_trampoline_holder(void)
+{
+ 	asm volatile (  ".global kretprobe_trampoline\n"
+ 			"kretprobe_trampoline: \n"
+ 			"nop\n");
+}
+
+
+/*
+ * Marker at the beginning of the arch/x86_64/kprobes.c file.
+ */
+static void kprobes_arch_code_start(void)
+{
+}
 
 /*
  * returns non-zero if opcode modifies the interrupt flag.
@@ -87,6 +108,20 @@ int arch_prepare_kprobe(struct kprobe *p
 }
 
 /*
+ * Avoid probes on routines defined within kprobe files to
+ * prevent possible race conditions.
+ */
+int kprobe_exclude_region(unsigned long addr)
+{
+	if (((addr >= (unsigned long)kprobes_code_start)
+	     && (addr <= (unsigned long)kprobes_code_end))
+	    || ((addr >= (unsigned long)kprobes_arch_code_start)
+		&& (addr <= (unsigned long)kprobes_arch_code_end)))
+		return -EINVAL;
+	return 0;
+}
+
+/*
  * Determine if the instruction uses the %rip-relative addressing mode.
  * If it does, return the address of the 32-bit displacement word.
  * If not, return null.
@@ -385,18 +420,6 @@ no_kprobe:
 }
 
 /*
- * For function-return probes, init_kprobes() establishes a probepoint
- * here. When a retprobed function returns, this probe is hit and
- * trampoline_probe_handler() runs, calling the kretprobe's handler.
- */
- void kretprobe_trampoline_holder(void)
- {
- 	asm volatile (  ".global kretprobe_trampoline\n"
- 			"kretprobe_trampoline: \n"
- 			"nop\n");
- }
-
-/*
  * Called when we hit the probe point at kretprobe_trampoline
  */
 int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
@@ -686,3 +709,10 @@ int __init arch_init_kprobes(void)
 {
 	return register_kprobe(&trampoline_p);
 }
+
+/*
+ * Marker at the end of the arch/x86_64/kprobes.c file.
+ */
+static void kprobes_arch_code_end(void)
+{
+}
diff -puN include/linux/kprobes.h~kprobes-exclude-region include/linux/kprobes.h
--- linux-2.6.13-rc1/include/linux/kprobes.h~kprobes-exclude-region	2005-07-01 18:01:23.000000000 +0530
+++ linux-2.6.13-rc1-prasanna/include/linux/kprobes.h	2005-07-01 18:01:23.000000000 +0530
@@ -159,6 +159,9 @@ extern int arch_init_kprobes(void);
 extern void show_registers(struct pt_regs *regs);
 extern kprobe_opcode_t *get_insn_slot(void);
 extern void free_insn_slot(kprobe_opcode_t *slot);
+extern int kprobe_exclude_region(unsigned long addr);
+void kprobes_code_start(void);
+void kprobes_code_end(void);
 
 /* Get the kprobe at this addr (if any).  Must have called lock_kprobes */
 struct kprobe *get_kprobe(void *addr);
diff -puN kernel/kprobes.c~kprobes-exclude-region kernel/kprobes.c
--- linux-2.6.13-rc1/kernel/kprobes.c~kprobes-exclude-region	2005-07-01 18:01:23.000000000 +0530
+++ linux-2.6.13-rc1-prasanna/kernel/kprobes.c	2005-07-01 18:02:06.000000000 +0530
@@ -68,6 +68,13 @@ struct kprobe_insn_page {
 
 static struct hlist_head kprobe_insn_pages;
 
+/*
+ * Marker at the beginning of the kernel/kprobes.c file.
+ */
+void kprobes_code_start(void)
+{
+}
+
 /**
  * get_insn_slot() - Find a slot on an executable page for an instruction.
  * We allocate an executable page if there's no room on existing ones.
@@ -440,6 +447,8 @@ int register_kprobe(struct kprobe *p)
 	unsigned long flags = 0;
 	struct kprobe *old_p;
 
+	if ((ret = kprobe_exclude_region((unsigned long)p->addr)) != 0)
+		return ret;
 	if ((ret = arch_prepare_kprobe(p)) != 0) {
 		goto rm_kprobe;
 	}
@@ -581,6 +590,13 @@ static int __init init_kprobes(void)
 	return err;
 }
 
+/*
+ * Marker at the end of the kernel/kprobes.c file
+ */
+void kprobes_code_end(void)
+{
+}
+
 __initcall(init_kprobes);
 
 EXPORT_SYMBOL_GPL(register_kprobe);

_
-- 

Prasanna S Panchamukhi
Linux Technology Center
India Software Labs, IBM Bangalore
Ph: 91-80-25044636
<prasanna@in.ibm.com>


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]