From e4a3f541f0b67fdad98b326c851dfe7f4b6b6dad Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jun 2011 19:02:29 -0400 Subject: tracing: Still trace filtered irq functions when irq trace is disabled If a function is set to be traced by the set_graph_function, but the option funcgraph-irqs is zero, and the traced function happens to be called from a interrupt, it will not be traced. The point of funcgraph-irqs is to not trace interrupts when we are preempted by an irq, not to not trace functions we want to trace that happen to be *in* a irq. Luckily the current->trace_recursion element is perfect to add a flag to help us be able to trace functions within an interrupt even when we are not tracing interrupts that preempt the trace. Reported-by: Heiko Carstens Tested-by: Heiko Carstens Signed-off-by: Steven Rostedt --- kernel/trace/trace.h | 50 ++++++++++++++++++++++++------------ kernel/trace/trace_functions_graph.c | 2 +- 2 files changed, 35 insertions(+), 17 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a3e2db708072..651f35be372a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -278,6 +278,29 @@ struct tracer { }; +/* Only current can touch trace_recursion */ +#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0) +#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0) + +/* Ring buffer has the 10 LSB bits to count */ +#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) + +/* for function tracing recursion */ +#define TRACE_INTERNAL_BIT (1<<11) +#define TRACE_GLOBAL_BIT (1<<12) +/* + * Abuse of the trace_recursion. + * As we need a way to maintain state if we are tracing the function + * graph in irq because we want to trace a particular function that + * was called in irq context but we have irq tracing off. Since this + * can only be modified by current, we can reuse trace_recursion. + */ +#define TRACE_IRQ_BIT (1<<13) + +#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) +#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) +#define trace_recursion_test(bit) ((current)->trace_recursion & (bit)) + #define TRACE_PIPE_ALL_CPU -1 int tracer_init(struct tracer *t, struct trace_array *tr); @@ -516,8 +539,18 @@ static inline int ftrace_graph_addr(unsigned long addr) return 1; for (i = 0; i < ftrace_graph_count; i++) { - if (addr == ftrace_graph_funcs[i]) + if (addr == ftrace_graph_funcs[i]) { + /* + * If no irqs are to be traced, but a set_graph_function + * is set, and called by an interrupt handler, we still + * want to trace it. + */ + if (in_irq()) + trace_recursion_set(TRACE_IRQ_BIT); + else + trace_recursion_clear(TRACE_IRQ_BIT); return 1; + } } return 0; @@ -794,19 +827,4 @@ extern const char *__stop___trace_bprintk_fmt[]; FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) #include "trace_entries.h" -/* Only current can touch trace_recursion */ -#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0) -#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0) - -/* Ring buffer has the 10 LSB bits to count */ -#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) - -/* for function tracing recursion */ -#define TRACE_INTERNAL_BIT (1<<11) -#define TRACE_GLOBAL_BIT (1<<12) - -#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) -#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) -#define trace_recursion_test(bit) ((current)->trace_recursion & (bit)) - #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index e8d6bb55d719..a7d2a4c653d8 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -227,7 +227,7 @@ int __trace_graph_entry(struct trace_array *tr, static inline int ftrace_graph_ignore_irqs(void) { - if (!ftrace_graph_skip_irqs) + if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) return 0; return in_irq(); -- cgit v1.2.3 From 4376cac66778b25e599be3f5d54f33f58ba8ead7 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 24 Jun 2011 23:28:13 -0400 Subject: ftrace: Do not disable interrupts for modules in mcount update When I mounted an NFS directory, it caused several modules to be loaded. At the time I was running the preemptirqsoff tracer, and it showed the following output: # tracer: preemptirqsoff # # preemptirqsoff latency trace v1.1.5 on 2.6.33.9-rt30-mrg-test # -------------------------------------------------------------------- # latency: 1177 us, #4/4, CPU#3 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4) # ----------------- # | task: modprobe-19370 (uid:0 nice:0 policy:0 rt_prio:0) # ----------------- # => started at: ftrace_module_notify # => ended at: ftrace_module_notify # # # _------=> CPU# # / _-----=> irqs-off # | / _----=> need-resched # || / _---=> hardirq/softirq # ||| / _--=> preempt-depth # |||| /_--=> lock-depth # |||||/ delay # cmd pid |||||| time | caller # \ / |||||| \ | / modprobe-19370 3d.... 0us!: ftrace_process_locs <-ftrace_module_notify modprobe-19370 3d.... 1176us : ftrace_process_locs <-ftrace_module_notify modprobe-19370 3d.... 1178us : trace_hardirqs_on <-ftrace_module_notify modprobe-19370 3d.... 1178us : => ftrace_process_locs => ftrace_module_notify => notifier_call_chain => __blocking_notifier_call_chain => blocking_notifier_call_chain => sys_init_module => system_call_fastpath That's over 1ms that interrupts are disabled on a Real-Time kernel! Looking at the cause (being the ftrace author helped), I found that the interrupts are disabled before the code modification of mcounts into nops. The interrupts only need to be disabled on start up around this code, not when modules are being loaded. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c997f7371c65..df93392aad89 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3318,7 +3318,7 @@ static int ftrace_process_locs(struct module *mod, { unsigned long *p; unsigned long addr; - unsigned long flags; + unsigned long flags = 0; /* Shut up gcc */ mutex_lock(&ftrace_lock); p = start; @@ -3336,12 +3336,18 @@ static int ftrace_process_locs(struct module *mod, } /* - * Disable interrupts to prevent interrupts from executing - * code that is being modified. + * We only need to disable interrupts on start up + * because we are modifying code that an interrupt + * may execute, and the modification is not atomic. + * But for modules, nothing runs the code we modify + * until we are finished with it, and there's no + * reason to cause large interrupt latencies while we do it. */ - local_irq_save(flags); + if (!mod) + local_irq_save(flags); ftrace_update_code(mod); - local_irq_restore(flags); + if (!mod) + local_irq_restore(flags); mutex_unlock(&ftrace_lock); return 0; -- cgit v1.2.3 From 41fb61c2d08107ce96a5dcb3a6289b2afd3e135c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 13 Jul 2011 15:03:44 -0400 Subject: ftrace: Balance records when updating the hash Whenever the hash of the ftrace_ops is updated, the record counts must be balance. This requires disabling the records that are set in the original hash, and then enabling the records that are set in the updated hash. Moving the update into ftrace_hash_move() removes the bug where the hash was updated but the records were not, which results in ftrace triggering a warning and disabling itself because the ftrace_ops filter is updated while the ftrace_ops was registered, and then the failure happens when the ftrace_ops is unregistered. The current code will not trigger this bug, but new code will. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 49 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 16 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index df93392aad89..853f6f0a4b4a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1170,8 +1170,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) return NULL; } +static void +ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); +static void +ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); + static int -ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) +ftrace_hash_move(struct ftrace_ops *ops, int enable, + struct ftrace_hash **dst, struct ftrace_hash *src) { struct ftrace_func_entry *entry; struct hlist_node *tp, *tn; @@ -1181,8 +1187,15 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) unsigned long key; int size = src->count; int bits = 0; + int ret; int i; + /* + * Remove the current set, update the hash and add + * them back. + */ + ftrace_hash_rec_disable(ops, enable); + /* * If the new source is empty, just free dst and assign it * the empty_hash. @@ -1203,9 +1216,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) if (bits > FTRACE_HASH_MAX_BITS) bits = FTRACE_HASH_MAX_BITS; + ret = -ENOMEM; new_hash = alloc_ftrace_hash(bits); if (!new_hash) - return -ENOMEM; + goto out; size = 1 << src->size_bits; for (i = 0; i < size; i++) { @@ -1224,7 +1238,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) rcu_assign_pointer(*dst, new_hash); free_ftrace_hash_rcu(old_hash); - return 0; + ret = 0; + out: + /* + * Enable regardless of ret: + * On success, we enable the new hash. + * On failure, we re-enable the original hash. + */ + ftrace_hash_rec_enable(ops, enable); + + return ret; } /* @@ -2845,7 +2868,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, ftrace_match_records(hash, buf, len); mutex_lock(&ftrace_lock); - ret = ftrace_hash_move(orig_hash, hash); + ret = ftrace_hash_move(ops, enable, orig_hash, hash); mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_regex_lock); @@ -3028,18 +3051,12 @@ ftrace_regex_release(struct inode *inode, struct file *file) orig_hash = &iter->ops->notrace_hash; mutex_lock(&ftrace_lock); - /* - * Remove the current set, update the hash and add - * them back. - */ - ftrace_hash_rec_disable(iter->ops, filter_hash); - ret = ftrace_hash_move(orig_hash, iter->hash); - if (!ret) { - ftrace_hash_rec_enable(iter->ops, filter_hash); - if (iter->ops->flags & FTRACE_OPS_FL_ENABLED - && ftrace_enabled) - ftrace_run_update_code(FTRACE_ENABLE_CALLS); - } + ret = ftrace_hash_move(iter->ops, filter_hash, + orig_hash, iter->hash); + if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) + && ftrace_enabled) + ftrace_run_update_code(FTRACE_ENABLE_CALLS); + mutex_unlock(&ftrace_lock); } free_ftrace_hash(iter->hash); -- cgit v1.2.3 From 072126f4529196f71a97960248bca54fd4554c2d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 13 Jul 2011 15:08:31 -0400 Subject: ftrace: Update filter when tracing enabled in set_ftrace_filter() Currently, if set_ftrace_filter() is called when the ftrace_ops is active, the function filters will not be updated. They will only be updated when tracing is disabled and re-enabled. Update the functions immediately during set_ftrace_filter(). Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 853f6f0a4b4a..a0dc0de8d649 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2869,6 +2869,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, mutex_lock(&ftrace_lock); ret = ftrace_hash_move(ops, enable, orig_hash, hash); + if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED + && ftrace_enabled) + ftrace_run_update_code(FTRACE_ENABLE_CALLS); + mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_regex_lock); -- cgit v1.2.3 From 6331c28c962561aee59e5a493b7556a4bb585957 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 13 Jul 2011 15:11:02 -0400 Subject: ftrace: Fix dynamic selftest failure on some archs Archs that do not implement CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST, will fail the dynamic ftrace selftest. The function tracer has a quick 'off' variable that will prevent the call back functions from being called. This variable is called function_trace_stop. In x86, this is implemented directly in the mcount assembly, but for other archs, an intermediate function is used called ftrace_test_stop_func(). In dynamic ftrace, the function pointer variable ftrace_trace_function is used to update the caller code in the mcount caller. But for archs that do not have CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST set, it only calls ftrace_test_stop_func() instead, which in turn calls __ftrace_trace_function. When more than one ftrace_ops is registered, the function it calls is ftrace_ops_list_func(), which will iterate over all registered ftrace_ops and call the callbacks that have their hash matching. The issue happens when two ftrace_ops are registered for different functions and one is then unregistered. The __ftrace_trace_function is then pointed to the remaining ftrace_ops callback function directly. This mean it will be called for all functions that were registered to trace by both ftrace_ops that were registered. This is not an issue for archs with CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST, because the update of ftrace_trace_function doesn't happen until after all functions have been updated, and then the mcount caller is updated. But for those archs that do use the ftrace_test_stop_func(), the update is immediate. The dynamic selftest fails because it hits this situation, and the ftrace_ops that it registers fails to only trace what it was suppose to and instead traces all other functions. The solution is to delay the setting of __ftrace_trace_function until after all the functions have been updated according to the registered ftrace_ops. Also, function_trace_stop is set during the update to prevent function tracing from calling code that is caused by the function tracer itself. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a0dc0de8d649..62e26d930535 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -88,6 +88,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = { static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; +static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; static struct ftrace_ops global_ops; @@ -146,9 +147,11 @@ void clear_ftrace_function(void) { ftrace_trace_function = ftrace_stub; __ftrace_trace_function = ftrace_stub; + __ftrace_trace_function_delay = ftrace_stub; ftrace_pid_function = ftrace_stub; } +#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST /* * For those archs that do not test ftrace_trace_stop in their @@ -207,8 +210,13 @@ static void update_ftrace_function(void) #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST ftrace_trace_function = func; +#else +#ifdef CONFIG_DYNAMIC_FTRACE + /* do not update till all functions have been modified */ + __ftrace_trace_function_delay = func; #else __ftrace_trace_function = func; +#endif ftrace_trace_function = ftrace_test_stop_func; #endif } @@ -1607,6 +1615,12 @@ static int __ftrace_modify_code(void *data) { int *command = data; + /* + * Do not call function tracer while we update the code. + * We are in stop machine, no worrying about races. + */ + function_trace_stop++; + if (*command & FTRACE_ENABLE_CALLS) ftrace_replace_code(1); else if (*command & FTRACE_DISABLE_CALLS) @@ -1620,6 +1634,18 @@ static int __ftrace_modify_code(void *data) else if (*command & FTRACE_STOP_FUNC_RET) ftrace_disable_ftrace_graph_caller(); +#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + /* + * For archs that call ftrace_test_stop_func(), we must + * wait till after we update all the function callers + * before we update the callback. This keeps different + * ops that record different functions from corrupting + * each other. + */ + __ftrace_trace_function = __ftrace_trace_function_delay; +#endif + function_trace_stop--; + return 0; } -- cgit v1.2.3 From 4a9bd3f134decd6d16ead8d288342d57aad486be Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 14 Jul 2011 16:36:53 -0400 Subject: tracing: Have dynamic size event stack traces Currently the stack trace per event in ftace is only 8 frames. This can be quite limiting and sometimes useless. Especially when the "ignore frames" is wrong and we also use up stack frames for the event processing itself. Change this to be dynamic by adding a percpu buffer that we can write a large stack frame into and then copy into the ring buffer. For interrupts and NMIs that come in while another event is being process, will only get to use the 8 frame stack. That should be enough as the task that it interrupted will have the full stack frame anyway. Requested-by: Thomas Gleixner Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 92 +++++++++++++++++++++++++++++++++++++------- kernel/trace/trace_entries.h | 3 +- kernel/trace/trace_output.c | 11 +++--- 3 files changed, 87 insertions(+), 19 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d9c16123f6e2..e5df02c69b1d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1248,6 +1248,15 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, } #ifdef CONFIG_STACKTRACE + +#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) +struct ftrace_stack { + unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; +}; + +static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); +static DEFINE_PER_CPU(int, ftrace_stack_reserve); + static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) @@ -1256,25 +1265,77 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, struct ring_buffer_event *event; struct stack_entry *entry; struct stack_trace trace; + int use_stack; + int size = FTRACE_STACK_ENTRIES; + + trace.nr_entries = 0; + trace.skip = skip; + + /* + * Since events can happen in NMIs there's no safe way to + * use the per cpu ftrace_stacks. We reserve it and if an interrupt + * or NMI comes in, it will just have to use the default + * FTRACE_STACK_SIZE. + */ + preempt_disable_notrace(); + + use_stack = ++__get_cpu_var(ftrace_stack_reserve); + /* + * We don't need any atomic variables, just a barrier. + * If an interrupt comes in, we don't care, because it would + * have exited and put the counter back to what we want. + * We just need a barrier to keep gcc from moving things + * around. + */ + barrier(); + if (use_stack == 1) { + trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; + trace.max_entries = FTRACE_STACK_MAX_ENTRIES; + + if (regs) + save_stack_trace_regs(regs, &trace); + else + save_stack_trace(&trace); + + if (trace.nr_entries > size) + size = trace.nr_entries; + } else + /* From now on, use_stack is a boolean */ + use_stack = 0; + + size *= sizeof(unsigned long); event = trace_buffer_lock_reserve(buffer, TRACE_STACK, - sizeof(*entry), flags, pc); + sizeof(*entry) + size, flags, pc); if (!event) - return; - entry = ring_buffer_event_data(event); - memset(&entry->caller, 0, sizeof(entry->caller)); + goto out; + entry = ring_buffer_event_data(event); - trace.nr_entries = 0; - trace.max_entries = FTRACE_STACK_ENTRIES; - trace.skip = skip; - trace.entries = entry->caller; + memset(&entry->caller, 0, size); + + if (use_stack) + memcpy(&entry->caller, trace.entries, + trace.nr_entries * sizeof(unsigned long)); + else { + trace.max_entries = FTRACE_STACK_ENTRIES; + trace.entries = entry->caller; + if (regs) + save_stack_trace_regs(regs, &trace); + else + save_stack_trace(&trace); + } + + entry->size = trace.nr_entries; - if (regs) - save_stack_trace_regs(regs, &trace); - else - save_stack_trace(&trace); if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); + + out: + /* Again, don't let gcc optimize things here */ + barrier(); + __get_cpu_var(ftrace_stack_reserve)--; + preempt_enable_notrace(); + } void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, @@ -1562,7 +1623,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, ftrace_enable_cpu(); - return event ? ring_buffer_event_data(event) : NULL; + if (event) { + iter->ent_size = ring_buffer_event_length(event); + return ring_buffer_event_data(event); + } + iter->ent_size = 0; + return NULL; } static struct trace_entry * diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index e32744c84d94..93365907f219 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -161,7 +161,8 @@ FTRACE_ENTRY(kernel_stack, stack_entry, TRACE_STACK, F_STRUCT( - __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) + __field( int, size ) + __dynamic_array(unsigned long, caller ) ), F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index e37de492a9e1..51999309a6cf 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1107,19 +1107,20 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, { struct stack_entry *field; struct trace_seq *s = &iter->seq; - int i; + unsigned long *p; + unsigned long *end; trace_assign_type(field, iter->ent); + end = (unsigned long *)((long)iter->ent + iter->ent_size); if (!trace_seq_puts(s, "\n")) goto partial; - for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { - if (!field->caller[i] || (field->caller[i] == ULONG_MAX)) - break; + + for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { if (!trace_seq_puts(s, " => ")) goto partial; - if (!seq_print_ip_sym(s, field->caller[i], flags)) + if (!seq_print_ip_sym(s, *p, flags)) goto partial; if (!trace_seq_puts(s, "\n")) goto partial; -- cgit v1.2.3 From 7143f168e2aa4bc7f8a8bcfe46d8dc52f7be869a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 27 Jun 2011 16:26:36 +0900 Subject: tracing/kprobes: Rename probe_* to trace_probe_* Rename probe_* to trace_probe_* for avoiding namespace confliction. This also fixes improper names of find_probe_event() and cleanup_all_probes() to find_trace_probe() and release_all_trace_probes() respectively. Signed-off-by: Masami Hiramatsu Cc: Arnaldo Carvalho de Melo Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/20110627072636.6528.60374.stgit@fedora15 Signed-off-by: Steven Rostedt --- kernel/trace/trace_kprobe.c | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7db7b68c6c37..14b88ed65bb7 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -555,12 +555,12 @@ struct trace_probe { (sizeof(struct probe_arg) * (n))) -static __kprobes int probe_is_return(struct trace_probe *tp) +static __kprobes int trace_probe_is_return(struct trace_probe *tp) { return tp->rp.handler != NULL; } -static __kprobes const char *probe_symbol(struct trace_probe *tp) +static __kprobes const char *trace_probe_symbol(struct trace_probe *tp) { return tp->symbol ? tp->symbol : "unknown"; } @@ -671,7 +671,7 @@ static void free_trace_probe(struct trace_probe *tp) kfree(tp); } -static struct trace_probe *find_probe_event(const char *event, +static struct trace_probe *find_trace_probe(const char *event, const char *group) { struct trace_probe *tp; @@ -686,7 +686,7 @@ static struct trace_probe *find_probe_event(const char *event, /* Unregister a trace_probe and probe_event: call with locking probe_lock */ static void unregister_trace_probe(struct trace_probe *tp) { - if (probe_is_return(tp)) + if (trace_probe_is_return(tp)) unregister_kretprobe(&tp->rp); else unregister_kprobe(&tp->rp.kp); @@ -703,7 +703,7 @@ static int register_trace_probe(struct trace_probe *tp) mutex_lock(&probe_lock); /* register as an event */ - old_tp = find_probe_event(tp->call.name, tp->call.class->system); + old_tp = find_trace_probe(tp->call.name, tp->call.class->system); if (old_tp) { /* delete old event */ unregister_trace_probe(old_tp); @@ -716,7 +716,7 @@ static int register_trace_probe(struct trace_probe *tp) } tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; - if (probe_is_return(tp)) + if (trace_probe_is_return(tp)) ret = register_kretprobe(&tp->rp); else ret = register_kprobe(&tp->rp.kp); @@ -1025,7 +1025,7 @@ static int create_trace_probe(int argc, char **argv) return -EINVAL; } mutex_lock(&probe_lock); - tp = find_probe_event(event, group); + tp = find_trace_probe(event, group); if (!tp) { mutex_unlock(&probe_lock); pr_info("Event %s/%s doesn't exist.\n", group, event); @@ -1144,7 +1144,7 @@ error: return ret; } -static void cleanup_all_probes(void) +static void release_all_trace_probes(void) { struct trace_probe *tp; @@ -1181,15 +1181,16 @@ static int probes_seq_show(struct seq_file *m, void *v) struct trace_probe *tp = v; int i; - seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); + seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p'); seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); if (!tp->symbol) seq_printf(m, " 0x%p", tp->rp.kp.addr); else if (tp->rp.kp.offset) - seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); + seq_printf(m, " %s+%u", trace_probe_symbol(tp), + tp->rp.kp.offset); else - seq_printf(m, " %s", probe_symbol(tp)); + seq_printf(m, " %s", trace_probe_symbol(tp)); for (i = 0; i < tp->nr_args; i++) seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm); @@ -1209,7 +1210,7 @@ static int probes_open(struct inode *inode, struct file *file) { if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) - cleanup_all_probes(); + release_all_trace_probes(); return seq_open(file, &probes_seq_op); } @@ -1518,7 +1519,7 @@ static int probe_event_enable(struct ftrace_event_call *call) struct trace_probe *tp = (struct trace_probe *)call->data; tp->flags |= TP_FLAG_TRACE; - if (probe_is_return(tp)) + if (trace_probe_is_return(tp)) return enable_kretprobe(&tp->rp); else return enable_kprobe(&tp->rp.kp); @@ -1530,7 +1531,7 @@ static void probe_event_disable(struct ftrace_event_call *call) tp->flags &= ~TP_FLAG_TRACE; if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { - if (probe_is_return(tp)) + if (trace_probe_is_return(tp)) disable_kretprobe(&tp->rp); else disable_kprobe(&tp->rp.kp); @@ -1598,7 +1599,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) const char *fmt, *arg; - if (!probe_is_return(tp)) { + if (!trace_probe_is_return(tp)) { fmt = "(%lx)"; arg = "REC->" FIELD_STRING_IP; } else { @@ -1722,7 +1723,7 @@ static int probe_perf_enable(struct ftrace_event_call *call) tp->flags |= TP_FLAG_PROFILE; - if (probe_is_return(tp)) + if (trace_probe_is_return(tp)) return enable_kretprobe(&tp->rp); else return enable_kprobe(&tp->rp.kp); @@ -1735,7 +1736,7 @@ static void probe_perf_disable(struct ftrace_event_call *call) tp->flags &= ~TP_FLAG_PROFILE; if (!(tp->flags & TP_FLAG_TRACE)) { - if (probe_is_return(tp)) + if (trace_probe_is_return(tp)) disable_kretprobe(&tp->rp); else disable_kprobe(&tp->rp.kp); @@ -1807,7 +1808,7 @@ static int register_probe_event(struct trace_probe *tp) /* Initialize ftrace_event_call */ INIT_LIST_HEAD(&call->class->fields); - if (probe_is_return(tp)) { + if (trace_probe_is_return(tp)) { call->event.funcs = &kretprobe_funcs; call->class->define_fields = kretprobe_event_define_fields; } else { @@ -1899,7 +1900,7 @@ static __init int kprobe_trace_self_tests_init(void) warn++; } else { /* Enable trace point */ - tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM); + tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { pr_warning("error on getting new probe.\n"); warn++; @@ -1914,7 +1915,7 @@ static __init int kprobe_trace_self_tests_init(void) warn++; } else { /* Enable trace point */ - tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM); + tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { pr_warning("error on getting new probe.\n"); warn++; @@ -1940,7 +1941,7 @@ static __init int kprobe_trace_self_tests_init(void) } end: - cleanup_all_probes(); + release_all_trace_probes(); if (warn) pr_cont("NG: Some tests are failed. Please check them.\n"); else -- cgit v1.2.3 From 1538f888f1e793de04e0f90372352ac1b05833cf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 27 Jun 2011 16:26:44 +0900 Subject: tracing/kprobes: Merge trace probe enable/disable functions Merge redundant enable/disable functions into enable_trace_probe() and disable_trace_probe(). Signed-off-by: Masami Hiramatsu Cc: Arnaldo Carvalho de Melo Cc: Ingo Molnar Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Cc: Frederic Weisbecker Cc: Ingo Molnar Link: http://lkml.kernel.org/r/20110627072644.6528.26910.stgit@fedora15 [ converted kprobe selftest to use enable_trace_probe ] Signed-off-by: Steven Rostedt --- kernel/trace/trace_kprobe.c | 92 ++++++++++++++++++--------------------------- 1 file changed, 36 insertions(+), 56 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 14b88ed65bb7..3be50c52d080 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -683,6 +683,34 @@ static struct trace_probe *find_trace_probe(const char *event, return NULL; } +/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ +static int enable_trace_probe(struct trace_probe *tp, int flag) +{ + int ret = 0; + + tp->flags |= flag; + if (tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)) { + if (trace_probe_is_return(tp)) + ret = enable_kretprobe(&tp->rp); + else + ret = enable_kprobe(&tp->rp.kp); + } + + return ret; +} + +/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ +static void disable_trace_probe(struct trace_probe *tp, int flag) +{ + tp->flags &= ~flag; + if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { + if (trace_probe_is_return(tp)) + disable_kretprobe(&tp->rp); + else + disable_kprobe(&tp->rp.kp); + } +} + /* Unregister a trace_probe and probe_event: call with locking probe_lock */ static void unregister_trace_probe(struct trace_probe *tp) { @@ -1514,30 +1542,6 @@ partial: return TRACE_TYPE_PARTIAL_LINE; } -static int probe_event_enable(struct ftrace_event_call *call) -{ - struct trace_probe *tp = (struct trace_probe *)call->data; - - tp->flags |= TP_FLAG_TRACE; - if (trace_probe_is_return(tp)) - return enable_kretprobe(&tp->rp); - else - return enable_kprobe(&tp->rp.kp); -} - -static void probe_event_disable(struct ftrace_event_call *call) -{ - struct trace_probe *tp = (struct trace_probe *)call->data; - - tp->flags &= ~TP_FLAG_TRACE; - if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { - if (trace_probe_is_return(tp)) - disable_kretprobe(&tp->rp); - else - disable_kprobe(&tp->rp.kp); - } -} - #undef DEFINE_FIELD #define DEFINE_FIELD(type, item, name, is_signed) \ do { \ @@ -1716,49 +1720,25 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, head = this_cpu_ptr(call->perf_events); perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); } - -static int probe_perf_enable(struct ftrace_event_call *call) -{ - struct trace_probe *tp = (struct trace_probe *)call->data; - - tp->flags |= TP_FLAG_PROFILE; - - if (trace_probe_is_return(tp)) - return enable_kretprobe(&tp->rp); - else - return enable_kprobe(&tp->rp.kp); -} - -static void probe_perf_disable(struct ftrace_event_call *call) -{ - struct trace_probe *tp = (struct trace_probe *)call->data; - - tp->flags &= ~TP_FLAG_PROFILE; - - if (!(tp->flags & TP_FLAG_TRACE)) { - if (trace_probe_is_return(tp)) - disable_kretprobe(&tp->rp); - else - disable_kprobe(&tp->rp.kp); - } -} #endif /* CONFIG_PERF_EVENTS */ static __kprobes int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) { + struct trace_probe *tp = (struct trace_probe *)event->data; + switch (type) { case TRACE_REG_REGISTER: - return probe_event_enable(event); + return enable_trace_probe(tp, TP_FLAG_TRACE); case TRACE_REG_UNREGISTER: - probe_event_disable(event); + disable_trace_probe(tp, TP_FLAG_TRACE); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: - return probe_perf_enable(event); + return enable_trace_probe(tp, TP_FLAG_PROFILE); case TRACE_REG_PERF_UNREGISTER: - probe_perf_disable(event); + disable_trace_probe(tp, TP_FLAG_PROFILE); return 0; #endif } @@ -1905,7 +1885,7 @@ static __init int kprobe_trace_self_tests_init(void) pr_warning("error on getting new probe.\n"); warn++; } else - probe_event_enable(&tp->call); + enable_trace_probe(tp, TP_FLAG_TRACE); } ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " @@ -1920,7 +1900,7 @@ static __init int kprobe_trace_self_tests_init(void) pr_warning("error on getting new probe.\n"); warn++; } else - probe_event_enable(&tp->call); + enable_trace_probe(tp, TP_FLAG_TRACE); } if (warn) -- cgit v1.2.3 From 614243181050436785f5a621749a7da2336a7916 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 27 Jun 2011 16:26:56 +0900 Subject: tracing/kprobes: Support module init function probing To support probing module init functions, kprobe-tracer allows user to define a probe on non-existed function when it is given with a module name. This also enables user to set a probe on a function on a specific module, even if a same name (but different) function is locally defined in another module. The module name must be in the front of function name and separated by a ':'. e.g. btrfs:btrfs_init_sysfs Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/20110627072656.6528.89970.stgit@fedora15 Signed-off-by: Steven Rostedt --- kernel/trace/trace_kprobe.c | 164 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 138 insertions(+), 26 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3be50c52d080..acc6664a6b22 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -536,6 +536,7 @@ struct probe_arg { /* Flags for trace_probe */ #define TP_FLAG_TRACE 1 #define TP_FLAG_PROFILE 2 +#define TP_FLAG_REGISTERED 4 struct trace_probe { struct list_head list; @@ -565,6 +566,39 @@ static __kprobes const char *trace_probe_symbol(struct trace_probe *tp) return tp->symbol ? tp->symbol : "unknown"; } +static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp) +{ + return tp->rp.kp.offset; +} + +static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp) +{ + return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); +} + +static __kprobes bool trace_probe_is_registered(struct trace_probe *tp) +{ + return !!(tp->flags & TP_FLAG_REGISTERED); +} + +static __kprobes bool trace_probe_has_gone(struct trace_probe *tp) +{ + return !!(kprobe_gone(&tp->rp.kp)); +} + +static __kprobes bool trace_probe_within_module(struct trace_probe *tp, + struct module *mod) +{ + int len = strlen(mod->name); + const char *name = trace_probe_symbol(tp); + return strncmp(mod->name, name, len) == 0 && name[len] == ':'; +} + +static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) +{ + return !!strchr(trace_probe_symbol(tp), ':'); +} + static int register_probe_event(struct trace_probe *tp); static void unregister_probe_event(struct trace_probe *tp); @@ -689,7 +723,8 @@ static int enable_trace_probe(struct trace_probe *tp, int flag) int ret = 0; tp->flags |= flag; - if (tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)) { + if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && + !trace_probe_has_gone(tp)) { if (trace_probe_is_return(tp)) ret = enable_kretprobe(&tp->rp); else @@ -703,7 +738,7 @@ static int enable_trace_probe(struct trace_probe *tp, int flag) static void disable_trace_probe(struct trace_probe *tp, int flag) { tp->flags &= ~flag; - if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { + if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { if (trace_probe_is_return(tp)) disable_kretprobe(&tp->rp); else @@ -711,13 +746,64 @@ static void disable_trace_probe(struct trace_probe *tp, int flag) } } -/* Unregister a trace_probe and probe_event: call with locking probe_lock */ -static void unregister_trace_probe(struct trace_probe *tp) +/* Internal register function - just handle k*probes and flags */ +static int __register_trace_probe(struct trace_probe *tp) { + int ret; + + if (trace_probe_is_registered(tp)) + return -EINVAL; + + /* Set/clear disabled flag according to tp->flag */ + if (trace_probe_is_enabled(tp)) + tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; + else + tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; + if (trace_probe_is_return(tp)) - unregister_kretprobe(&tp->rp); + ret = register_kretprobe(&tp->rp); else - unregister_kprobe(&tp->rp.kp); + ret = register_kprobe(&tp->rp.kp); + + if (ret == 0) + tp->flags |= TP_FLAG_REGISTERED; + else { + pr_warning("Could not insert probe at %s+%lu: %d\n", + trace_probe_symbol(tp), trace_probe_offset(tp), ret); + if (ret == -ENOENT && trace_probe_is_on_module(tp)) { + pr_warning("This probe might be able to register after" + "target module is loaded. Continue.\n"); + ret = 0; + } else if (ret == -EILSEQ) { + pr_warning("Probing address(0x%p) is not an " + "instruction boundary.\n", + tp->rp.kp.addr); + ret = -EINVAL; + } + } + + return ret; +} + +/* Internal unregister function - just handle k*probes and flags */ +static void __unregister_trace_probe(struct trace_probe *tp) +{ + if (trace_probe_is_registered(tp)) { + if (trace_probe_is_return(tp)) + unregister_kretprobe(&tp->rp); + else + unregister_kprobe(&tp->rp.kp); + tp->flags &= ~TP_FLAG_REGISTERED; + /* Cleanup kprobe for reuse */ + if (tp->rp.kp.symbol_name) + tp->rp.kp.addr = NULL; + } +} + +/* Unregister a trace_probe and probe_event: call with locking probe_lock */ +static void unregister_trace_probe(struct trace_probe *tp) +{ + __unregister_trace_probe(tp); list_del(&tp->list); unregister_probe_event(tp); } @@ -730,41 +816,65 @@ static int register_trace_probe(struct trace_probe *tp) mutex_lock(&probe_lock); - /* register as an event */ + /* Delete old (same name) event if exist */ old_tp = find_trace_probe(tp->call.name, tp->call.class->system); if (old_tp) { - /* delete old event */ unregister_trace_probe(old_tp); free_trace_probe(old_tp); } + + /* Register new event */ ret = register_probe_event(tp); if (ret) { pr_warning("Failed to register probe event(%d)\n", ret); goto end; } - tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; - if (trace_probe_is_return(tp)) - ret = register_kretprobe(&tp->rp); - else - ret = register_kprobe(&tp->rp.kp); - - if (ret) { - pr_warning("Could not insert probe(%d)\n", ret); - if (ret == -EILSEQ) { - pr_warning("Probing address(0x%p) is not an " - "instruction boundary.\n", - tp->rp.kp.addr); - ret = -EINVAL; - } + /* Register k*probe */ + ret = __register_trace_probe(tp); + if (ret < 0) unregister_probe_event(tp); - } else + else list_add_tail(&tp->list, &probe_list); + end: mutex_unlock(&probe_lock); return ret; } +/* Module notifier call back, checking event on the module */ +static int trace_probe_module_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct module *mod = data; + struct trace_probe *tp; + int ret; + + if (val != MODULE_STATE_COMING) + return NOTIFY_DONE; + + /* Update probes on coming module */ + mutex_lock(&probe_lock); + list_for_each_entry(tp, &probe_list, list) { + if (trace_probe_within_module(tp, mod)) { + __unregister_trace_probe(tp); + ret = __register_trace_probe(tp); + if (ret) + pr_warning("Failed to re-register probe %s on" + "%s: %d\n", + tp->call.name, mod->name, ret); + } + } + mutex_unlock(&probe_lock); + + return NOTIFY_DONE; +} + +static struct notifier_block trace_probe_module_nb = { + .notifier_call = trace_probe_module_callback, + .priority = 1 /* Invoked after kprobe module callback */ +}; + /* Split symbol and offset. */ static int split_symbol_offset(char *symbol, unsigned long *offset) { @@ -990,8 +1100,8 @@ static int create_trace_probe(int argc, char **argv) { /* * Argument syntax: - * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] - * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] + * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] + * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] * Fetch args: * $retval : fetch return value * $stack : fetch stack address @@ -1186,7 +1296,6 @@ static void release_all_trace_probes(void) mutex_unlock(&probe_lock); } - /* Probes listing interfaces */ static void *probes_seq_start(struct seq_file *m, loff_t *pos) { @@ -1827,6 +1936,9 @@ static __init int init_kprobe_trace(void) struct dentry *d_tracer; struct dentry *entry; + if (register_module_notifier(&trace_probe_module_nb)) + return -EINVAL; + d_tracer = tracing_init_dentry(); if (!d_tracer) return 0; -- cgit v1.2.3 From 7f6878a3d707b947603e09d95df0c3a98987e3a4 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 27 Jun 2011 16:27:03 +0900 Subject: tracing/kprobe: Update symbol reference when loading module Since the address of a module-local variable can only be solved after the target module is loaded, the symbol fetch-argument should be updated when loading target module. Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/20110627072703.6528.75042.stgit@fedora15 Signed-off-by: Steven Rostedt --- kernel/trace/trace_kprobe.c | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index acc6664a6b22..5fb3697bf0e5 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -343,6 +343,14 @@ DEFINE_BASIC_FETCH_FUNCS(deref) DEFINE_FETCH_deref(string) DEFINE_FETCH_deref(string_size) +static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data) +{ + if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) + update_deref_fetch_param(data->orig.data); + else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) + update_symbol_cache(data->orig.data); +} + static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) { if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) @@ -376,6 +384,19 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield) #define fetch_bitfield_string NULL #define fetch_bitfield_string_size NULL +static __kprobes void +update_bitfield_fetch_param(struct bitfield_fetch_param *data) +{ + /* + * Don't check the bitfield itself, because this must be the + * last fetch function. + */ + if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) + update_deref_fetch_param(data->orig.data); + else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) + update_symbol_cache(data->orig.data); +} + static __kprobes void free_bitfield_fetch_param(struct bitfield_fetch_param *data) { @@ -389,6 +410,7 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data) free_symbol_cache(data->orig.data); kfree(data); } + /* Default (unsigned long) fetch type */ #define __DEFAULT_FETCH_TYPE(t) u##t #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) @@ -680,6 +702,16 @@ error: return ERR_PTR(ret); } +static void update_probe_arg(struct probe_arg *arg) +{ + if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) + update_bitfield_fetch_param(arg->fetch.data); + else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn)) + update_deref_fetch_param(arg->fetch.data); + else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn)) + update_symbol_cache(arg->fetch.data); +} + static void free_probe_arg(struct probe_arg *arg) { if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) @@ -749,11 +781,14 @@ static void disable_trace_probe(struct trace_probe *tp, int flag) /* Internal register function - just handle k*probes and flags */ static int __register_trace_probe(struct trace_probe *tp) { - int ret; + int i, ret; if (trace_probe_is_registered(tp)) return -EINVAL; + for (i = 0; i < tp->nr_args; i++) + update_probe_arg(&tp->args[i]); + /* Set/clear disabled flag according to tp->flag */ if (trace_probe_is_enabled(tp)) tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; -- cgit v1.2.3