This is the mail archive of the systemtap@sources.redhat.com mailing list for the systemtap project.
Index Nav: | [Date Index] [Subject Index] [Author Index] [Thread Index] | |
---|---|---|
Message Nav: | [Date Prev] [Date Next] | [Thread Prev] [Thread Next] |
Other format: | [Raw text] |
Hi,
Here is a design to support "Mulitple handler sets per address". I have also put in the i386 implementation based on this design.
Some notes:
- The interfaces to register, unregister, define handlers all remain
the same.
- A kprobe and jprobe cannot co-exist at the same location. (Ideas are welcome on how to support this).
I have minimally tested the patch and it works(tm).
Please let me know your thoughts on the design. I'd also appreciate if you could test the patch (diffed against 2.6.12-rc1-mm3) and provide feedback.
Thanks, Ananth
--- linux-2.6.12-rc1-mm3-mprobe/include/linux/kprobes.h 2005-04-01 09:53:17.000000000 -0800 +++ linux-2.6.12-rc1-mm3-mprobe.rprobe/include/linux/kprobes.h 2005-04-01 13:42:28.000000000 -0800 @@ -34,12 +34,15 @@ struct kprobe; struct pt_regs; +struct retprobe_instance; typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, unsigned long flags); typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, int trapnr); +typedef int (*retprobe_handler_t) (struct retprobe_instance *, struct pt_regs *); + struct kprobe { struct list_head list; @@ -59,6 +62,9 @@ /* ... called if breakpoint trap occurs in probe handler. * Return 1 if it handled break, otherwise kernel will see it. */ kprobe_break_handler_t break_handler; + + /* point to retprobe */ + struct retprobe *rp; }; /** @@ -89,6 +95,50 @@ kprobe_opcode_t *entry; /* probe handling code to jump to */ }; +#ifdef arch_supports_retprobes +extern int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs); +extern struct task_struct *arch_get_kprobe_task(void *ptr); +#else +#define arch_supports_retprobes 0 +static void retprobe_trampoline(void) +{ +} +static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +{ + return 0; +} +#define arch_get_kprobe_task(ptr) ((struct task_struct *)NULL) +#endif +/* + * Function-return probe - + * Note: + * User needs to provide a handler function, and initialize maxactive. + * maxactive - The maximum number of instances of the probed function that + * can be active concurrently. + * nmissed - tracks the number of times the probed function's return was + * ignored, due to maxactive being too low. + * + */ +struct retprobe { + retprobe_handler_t handler; + int maxactive; + int nmissed; + int num_ri_running; + int unregistering; + struct kprobe *kprobe; + struct retprobe_instance *instances; /* allocated memory */ + struct list_head free_instances; +}; + +struct retprobe_instance { + struct list_head list; + struct hlist_node hlist; + struct retprobe *rp; + void *ret_addr; + void *stack_addr; +}; + + #ifdef CONFIG_KPROBES /* Locks kprobe: irq must be disabled */ void lock_kprobes(void); @@ -117,6 +167,14 @@ void unregister_jprobe(struct jprobe *p); void jprobe_return(void); +int register_kretprobe(struct kprobe *p, struct retprobe *rp); +int register_jretprobe(struct jprobe *p, struct retprobe *rp); + +struct retprobe_instance *get_free_rp_inst(struct retprobe *rp); +struct retprobe_instance *get_rp_inst(void *sara); +void add_retprobe_inst_to_hash(struct retprobe_instance *ri); +void kprobe_flush_task(struct task_struct *tk); +void recycle_retprobe_instance(struct retprobe_instance *ri); #else static inline int kprobe_running(void) { @@ -139,5 +197,13 @@ static inline void jprobe_return(void) { } +static inline int register_kretprobe(struct kprobe *p, struct retprobe *rp) +{ + return -ENOSYS; +} +static inline int register_jretprobe(struct jprobe *p, struct retprobe *rp) +{ + return -ENOSYS; +} #endif #endif /* _LINUX_KPROBES_H */ --- linux-2.6.12-rc1-mm3-mprobe/kernel/kprobes.c 2005-04-01 09:53:17.000000000 -0800 +++ linux-2.6.12-rc1-mm3-mprobe.rprobe/kernel/kprobes.c 2005-04-01 16:16:02.000000000 -0800 @@ -42,6 +42,10 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; +#define RPROBE_HASH_BITS KPROBE_HASH_BITS +#define RPROBE_INST_TABLE_SIZE KPROBE_TABLE_SIZE +static struct hlist_head retprobe_inst_table[RPROBE_INST_TABLE_SIZE]; + unsigned int kprobe_cpu = NR_CPUS; static DEFINE_SPINLOCK(kprobe_lock); @@ -58,6 +62,26 @@ spin_unlock(&kprobe_lock); } +struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &retprobe_trampoline, + .pre_handler = trampoline_probe_handler, + .rp = NULL +}; + +/* + * Called once to register a probe point at the retprobe trampoline. + */ +static void init_retprobes(void) +{ + int i; + + register_kprobe(&trampoline_p); + + /* Allocate retprobe instances hash table */ + for (i = 0; i < RPROBE_INST_TABLE_SIZE; i++) + INIT_HLIST_HEAD(&retprobe_inst_table[i]); +} + /* You have to be holding the kprobe_lock */ struct aggr_kp *get_kprobe(void *addr) { @@ -73,7 +97,84 @@ return NULL; } -int register_kprobe(struct kprobe *p) +struct retprobe_instance * get_free_rp_inst(struct retprobe *rp) +{ + if (list_empty(&rp->free_instances)) { + return NULL; + } + return (struct retprobe_instance *) rp->free_instances.next; +} + +struct retprobe_instance *get_rp_inst(void *sara) +{ + struct hlist_head *head; + struct hlist_node *node; + struct task_struct *tsk; + struct retprobe_instance *ri; + + tsk = arch_get_kprobe_task(sara); + head = &retprobe_inst_table[hash_ptr(tsk, RPROBE_HASH_BITS)]; + hlist_for_each_entry(ri, node, head, hlist) { + if (ri->stack_addr == sara) + return ri; + } + return NULL; +} + +void add_retprobe_inst_to_hash(struct retprobe_instance *ri) +{ + struct task_struct *tsk; + tsk = arch_get_kprobe_task(ri->stack_addr); + hlist_add_head(&ri->hlist, &retprobe_inst_table[hash_ptr(tsk, RPROBE_HASH_BITS)]); +} + +void recycle_retprobe_instance(struct retprobe_instance *ri) +{ + ri->rp->num_ri_running--; + if (ri->rp->num_ri_running == 0 && ri->rp->unregistering == 1) { + /* This is the last running ri during unregister. + * Free memory to complete the unregister. + */ + kfree(ri->rp->instances); + kfree(ri->rp); + } else { + /* put ri obj back to free list */ + list_add(&ri->list, &ri->rp->free_instances); + } +} + +/* + * This function is called from do_exit or do_execv when task tk's stack is + * about to be recycled. Recycle any function-return probe instances + * associated with this task. These represent probed functions that have + * been called but will never return. + */ +void kprobe_flush_task(struct task_struct *tk) +{ + unsigned long flags = 0; + struct retprobe_instance *ri; + struct task_struct *tsk; + struct hlist_head *head; + struct hlist_node *node; + + if (!arch_supports_retprobes) { + return; + } + spin_lock_irqsave(&kprobe_lock, flags); + head = &retprobe_inst_table[hash_ptr(tk, RPROBE_HASH_BITS)]; + hlist_for_each_entry(ri, node, head, hlist) { + tsk = arch_get_kprobe_task(ri->stack_addr); + if (tsk == tk) { + /* Put the original return address back to stack */ + *((unsigned long *)(ri->stack_addr)) = (unsigned long) ri->ret_addr; + hlist_del_rcu(&ri->hlist); + recycle_retprobe_instance(ri); + } + } + spin_unlock_irqrestore(&kprobe_lock, flags); +} + +int _register_kprobe(struct kprobe *p) { int ret = 0; unsigned long flags = 0; @@ -133,6 +234,12 @@ return ret; } +int register_kprobe(struct kprobe *p) +{ + p->rp = NULL; + return _register_kprobe(p); +} + void unregister_kprobe(struct kprobe *p) { unsigned long flags; @@ -144,6 +251,27 @@ spin_lock_irqsave(&kprobe_lock, flags); list_del(&p->list); + if (p->rp) { + if (p->rp->num_ri_running != 0) { + int i; + struct retprobe *rp; + struct retprobe_instance *ri; + /* + * Make a copy of retprobe so we can relinquish + * the user's original. + */ + rp = kmalloc(sizeof(struct retprobe), GFP_KERNEL); + BUG_ON(rp == NULL); + memcpy(rp, p->rp, sizeof(struct retprobe)); + rp->unregistering = 1; + for (i = 0 ; i < p->rp->maxactive; i++) { + ri = p->rp->instances + i; + ri->rp = rp; + } + } else { + kfree(p->rp->instances); + } + } if (list_empty(&kp->handlers)) { /* all handlers unregistered - free aggr_kp */ arch_remove_kprobe(kp); @@ -175,6 +303,67 @@ unregister_kprobe(&jp->kp); } +int register_kretprobe(struct kprobe *p, struct retprobe *rp) +{ + int ret = 0; + static int retprobe_init_setup = 0; + struct retprobe_instance *inst; + int maxinst, i; + + if (!arch_supports_retprobes) { + return -ENOSYS; + } + if (retprobe_init_setup == 0) { + init_retprobes(); + retprobe_init_setup = 1; + } + /* Pre-allocate memory for max retprobe instances */ + if (rp->maxactive > 0) { + maxinst = rp->maxactive; + } else { +#ifdef CONFIG_PREEMPT + maxinst = max(10, 2 * NR_CPUS); +#else + maxinst = NR_CPUS; +#endif + } + rp->instances = kmalloc(maxinst * sizeof(struct retprobe_instance), + GFP_KERNEL); + if (rp->instances == NULL) { + return -ENOMEM; + } + + INIT_LIST_HEAD(&rp->free_instances); + /* Put all retprobe_instance objects on the free list */ + for (i = 0; i < maxinst; i++) { + inst = rp->instances + i; + list_add(&inst->list, &rp->free_instances); + } + rp->num_ri_running = 0; + rp->nmissed = 0; + rp->unregistering = 0; + rp->kprobe = p; + p->rp = rp; + + /* Establish function entry probe point */ + /* todo: we need to deal with probe that has been registered */ + + if((ret = _register_kprobe(p)) != 0) { + kfree(rp->instances); + return ret; + } + return ret; +} + +int register_jretprobe(struct jprobe *jp, struct retprobe *rp) +{ + + jp->kp.pre_handler = setjmp_pre_handler; + jp->kp.break_handler = longjmp_break_handler; + + return register_kretprobe(&jp->kp, rp); +} + static int __init init_kprobes(void) { int i, err = 0; @@ -195,3 +384,5 @@ EXPORT_SYMBOL_GPL(register_jprobe); EXPORT_SYMBOL_GPL(unregister_jprobe); EXPORT_SYMBOL_GPL(jprobe_return); +EXPORT_SYMBOL_GPL(register_kretprobe); +EXPORT_SYMBOL_GPL(register_jretprobe); --- linux-2.6.12-rc1-mm3-mprobe/arch/i386/kernel/kprobes.c 2005-04-01 09:53:17.000000000 -0800 +++ linux-2.6.12-rc1-mm3-mprobe.rprobe/arch/i386/kernel/kprobes.c 2005-04-01 13:49:04.000000000 -0800 @@ -61,6 +61,11 @@ return 0; } +struct task_struct *arch_get_kprobe_task(void *ptr) +{ + return ((struct thread_info *) (((unsigned long) ptr) & (~(THREAD_SIZE -1))))->task; +} + int arch_prepare_kprobe(struct kprobe *p) { return 0; @@ -89,6 +94,30 @@ regs->eip = (unsigned long)&kp->ainsn.insn; } +static void prepare_retprobe(struct retprobe *rp, struct pt_regs *regs) +{ + struct retprobe_instance *ri; + unsigned long *sara = (unsigned long *)®s->esp; + + if ((ri = get_free_rp_inst(rp)) != NULL) { + INIT_HLIST_NODE(&ri->hlist); + ri->rp = rp; + ri->stack_addr = sara; + ri->ret_addr = (void *) *sara; + add_retprobe_inst_to_hash(ri); + /* Replace the return addr with trampoline addr */ + *sara = (unsigned long) &retprobe_trampoline; + /* + * Remove obj in free list - + * will add it back when probed function returns + */ + list_del(&ri->list); + rp->num_ri_running++; + } else { + rp->nmissed++; + } +} + /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled thorough out this function. @@ -176,12 +205,17 @@ */ if (kp->jprobe) { /* jprobe has just _one_ struct kprobe in the handlers list */ - list_for_each_entry(p, &kp->handlers, list) + list_for_each_entry(p, &kp->handlers, list) { + if (p->rp) + prepare_retprobe(p->rp, regs); if (p->pre_handler && p->pre_handler(p, regs)) return 1; + } } list_for_each_entry(p, &kp->handlers, list) { + if (p->rp) + prepare_retprobe(p->rp, regs); /* we don't care about return values if this isn't a jprobe */ if (p->pre_handler) p->pre_handler(p, regs); @@ -196,6 +230,22 @@ preempt_enable_no_resched(); return ret; } +/* + * Called when we hit the probe point at retprobe_trampoline + */ +int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct retprobe_instance *ri; + unsigned long *sara = ((unsigned long *) ®s->esp) - 1; + + if ((ri = get_rp_inst(sara)) == NULL) { + return 0; + } + if (ri->rp && !ri->rp->unregistering) { + return ri->rp->handler(ri, regs); + } + return 0; +} /* * Called after single-stepping. kp->addr is the address of the @@ -249,6 +299,19 @@ case 0xea: /* jmp absolute -- eip is correct */ next_eip = regs->eip; break; + case 0x90: /* nop */ + /* Check to make sure this is from the trampoline probe */ + if (orig_eip == (unsigned long) retprobe_trampoline) { + struct retprobe_instance *ri; + unsigned long *sara = tos - 1; /* RA already popped */ + ri = get_rp_inst(sara); + if (ri != NULL) { + next_eip = (unsigned long)ri->ret_addr; + hlist_del(&ri->hlist); + recycle_retprobe_instance(ri); + } + } + break; default: break; } --- linux-2.6.12-rc1-mm3-mprobe/arch/i386/kernel/entry.S 2005-04-01 09:52:30.000000000 -0800 +++ linux-2.6.12-rc1-mm3-mprobe.rprobe/arch/i386/kernel/entry.S 2005-04-01 13:43:55.000000000 -0800 @@ -135,6 +135,16 @@ .long 2b,4b; \ .previous +#ifdef CONFIG_KPROBES +/* + * For function-return probes, init_retprobes() establishes a probepoint + * here. When a retprobed function returns, this probe is hit and + * trampoline_probe_handler() runs, calling the retprobe's handler. + */ +ENTRY(retprobe_trampoline) + nop +/* NOT REACHED */ +#endif ENTRY(ret_from_fork) pushl %eax --- linux-2.6.12-rc1-mm3-mprobe/arch/i386/kernel/process.c 2005-04-01 09:52:30.000000000 -0800 +++ linux-2.6.12-rc1-mm3-mprobe.rprobe/arch/i386/kernel/process.c 2005-04-01 13:46:09.000000000 -0800 @@ -39,6 +39,8 @@ #include <linux/kallsyms.h> #include <linux/ptrace.h> #include <linux/random.h> +#include <linux/random.h> +#include <linux/kprobes.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -371,7 +373,14 @@ { struct task_struct *tsk = current; struct thread_struct *t = &tsk->thread; - +#ifdef CONFIG_KPROBES + /* + * Remove function-return probe instances associated with this task + * and put them back on the free list. Do not insert an exit probe for + * this function, it will be disabled by kprobe_flush_task if you do. + */ + kprobe_flush_task(tsk); +#endif /* The process may have allocated an io port bitmap... nuke it. */ if (unlikely(NULL != t->io_bitmap_ptr)) { int cpu = get_cpu(); @@ -395,7 +404,14 @@ void flush_thread(void) { struct task_struct *tsk = current; - +#ifdef CONFIG_KPROBES + /* + * Remove function-return probe instances associated with this task + * and put them back on the free list. Do not insert an exit probe for + * this function, it will be disabled by kprobe_flush_task if you do. + */ + kprobe_flush_task(tsk); +#endif memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); /* --- linux-2.6.12-rc1-mm3-mprobe/include/asm-i386/kprobes.h 2005-03-01 23:38:12.000000000 -0800 +++ linux-2.6.12-rc1-mm3-mprobe.rprobe/include/asm-i386/kprobes.h 2005-04-01 13:44:31.000000000 -0800 @@ -39,6 +39,9 @@ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry +#define arch_supports_retprobes 1 + +asmlinkage void retprobe_trampoline(void) __asm__("retprobe_trampoline"); /* Architecture specific copy of original instruction*/ struct arch_specific_insn {
Attachment:
sys_kretprobe.tar.gz
Description: GNU Zip compressed data
Attachment:
sys_kretprobe2.tar.gz
Description: GNU Zip compressed data
Index Nav: | [Date Index] [Subject Index] [Author Index] [Thread Index] | |
---|---|---|
Message Nav: | [Date Prev] [Date Next] | [Thread Prev] [Thread Next] |