diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 11:50:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 11:50:00 -0700 |
commit | b8c0aa46b3e86083721b57ed2eec6bd2c29ebfba (patch) | |
tree | 45e349bf8a14aa99279d323fdc515e849fd349f3 /include | |
parent | c7ed326fa7cafb83ced5a8b02517a61672fe9e90 (diff) | |
parent | dc6f03f26f570104a2bb03f9d1deb588026d7c75 (diff) |
Merge tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"This pull request has a lot of work done. The main thing is the
changes to the ftrace function callback infrastructure. It's
introducing a way to allow different functions to call directly
different trampolines instead of all calling the same "mcount" one.
The only user of this for now is the function graph tracer, which
always had a different trampoline, but the function tracer trampoline
was called and did basically nothing, and then the function graph
tracer trampoline was called. The difference now, is that the
function graph tracer trampoline can be called directly if a function
is only being traced by the function graph trampoline. If function
tracing is also happening on the same function, the old way is still
done.
The accounting for this takes up more memory when function graph
tracing is activated, as it needs to keep track of which functions it
uses. I have a new way that wont take as much memory, but it's not
ready yet for this merge window, and will have to wait for the next
one.
Another big change was the removal of the ftrace_start/stop() calls
that were used by the suspend/resume code that stopped function
tracing when entering into suspend and resume paths. The stop of
ftrace was done because there was some function that would crash the
system if one called smp_processor_id()! The stop/start was a big
hammer to solve the issue at the time, which was when ftrace was first
introduced into Linux. Now ftrace has better infrastructure to debug
such issues, and I found the problem function and labeled it with
"notrace" and function tracing can now safely be activated all the way
down into the guts of suspend and resume
Other changes include clean ups of uprobe code, clean up of the
trace_seq() code, and other various small fixes and clean ups to
ftrace and tracing"
* tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits)
ftrace: Add warning if tramp hash does not match nr_trampolines
ftrace: Fix trampoline hash update check on rec->flags
ring-buffer: Use rb_page_size() instead of open coded head_page size
ftrace: Rename ftrace_ops field from trampolines to nr_trampolines
tracing: Convert local function_graph functions to static
ftrace: Do not copy old hash when resetting
tracing: let user specify tracing_thresh after selecting function_graph
ring-buffer: Always run per-cpu ring buffer resize with schedule_work_on()
tracing: Remove function_trace_stop and HAVE_FUNCTION_TRACE_MCOUNT_TEST
s390/ftrace: remove check of obsolete variable function_trace_stop
arm64, ftrace: Remove check of obsolete variable function_trace_stop
Blackfin: ftrace: Remove check of obsolete variable function_trace_stop
metag: ftrace: Remove check of obsolete variable function_trace_stop
microblaze: ftrace: Remove check of obsolete variable function_trace_stop
MIPS: ftrace: Remove check of obsolete variable function_trace_stop
parisc: ftrace: Remove check of obsolete variable function_trace_stop
sh: ftrace: Remove check of obsolete variable function_trace_stop
sparc64,ftrace: Remove check of obsolete variable function_trace_stop
tile: ftrace: Remove check of obsolete variable function_trace_stop
ftrace: x86: Remove check of obsolete variable function_trace_stop
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/ftrace.h | 68 | ||||
-rw-r--r-- | include/linux/trace_seq.h | 36 |
2 files changed, 54 insertions, 50 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 404a686a3644..6bb5e3f2a3b4 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -33,8 +33,7 @@ * features, then it must call an indirect function that * does. Or at least does enough to prevent any unwelcomed side effects. */ -#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ - !ARCH_SUPPORTS_FTRACE_OPS +#if !ARCH_SUPPORTS_FTRACE_OPS # define FTRACE_FORCE_LIST_FUNC 1 #else # define FTRACE_FORCE_LIST_FUNC 0 @@ -118,17 +117,18 @@ struct ftrace_ops { ftrace_func_t func; struct ftrace_ops *next; unsigned long flags; - int __percpu *disabled; void *private; + int __percpu *disabled; #ifdef CONFIG_DYNAMIC_FTRACE + int nr_trampolines; struct ftrace_hash *notrace_hash; struct ftrace_hash *filter_hash; + struct ftrace_hash *tramp_hash; struct mutex regex_lock; + unsigned long trampoline; #endif }; -extern int function_trace_stop; - /* * Type of the current tracing. */ @@ -140,32 +140,6 @@ enum ftrace_tracing_type_t { /* Current tracing type, default is FTRACE_TYPE_ENTER */ extern enum ftrace_tracing_type_t ftrace_tracing_type; -/** - * ftrace_stop - stop function tracer. - * - * A quick way to stop the function tracer. Note this an on off switch, - * it is not something that is recursive like preempt_disable. - * This does not disable the calling of mcount, it only stops the - * calling of functions from mcount. - */ -static inline void ftrace_stop(void) -{ - function_trace_stop = 1; -} - -/** - * ftrace_start - start the function tracer. - * - * This function is the inverse of ftrace_stop. This does not enable - * the function tracing if the function tracer is disabled. This only - * sets the function tracer flag to continue calling the functions - * from mcount. - */ -static inline void ftrace_start(void) -{ - function_trace_stop = 0; -} - /* * The ftrace_ops must be a static and should also * be read_mostly. These functions do modify read_mostly variables @@ -242,8 +216,6 @@ static inline int ftrace_nr_registered_ops(void) } static inline void clear_ftrace_function(void) { } static inline void ftrace_kill(void) { } -static inline void ftrace_stop(void) { } -static inline void ftrace_start(void) { } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_STACK_TRACER @@ -317,13 +289,20 @@ extern int ftrace_nr_registered_ops(void); * from tracing that function. */ enum { - FTRACE_FL_ENABLED = (1UL << 29), + FTRACE_FL_ENABLED = (1UL << 31), FTRACE_FL_REGS = (1UL << 30), - FTRACE_FL_REGS_EN = (1UL << 31) + FTRACE_FL_REGS_EN = (1UL << 29), + FTRACE_FL_TRAMP = (1UL << 28), + FTRACE_FL_TRAMP_EN = (1UL << 27), }; -#define FTRACE_FL_MASK (0x7UL << 29) -#define FTRACE_REF_MAX ((1UL << 29) - 1) +#define FTRACE_REF_MAX_SHIFT 27 +#define FTRACE_FL_BITS 5 +#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) +#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) +#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) + +#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) struct dyn_ftrace { unsigned long ip; /* address of mcount call-site */ @@ -431,6 +410,10 @@ void ftrace_modify_all_code(int command); #define FTRACE_ADDR ((unsigned long)ftrace_caller) #endif +#ifndef FTRACE_GRAPH_ADDR +#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) +#endif + #ifndef FTRACE_REGS_ADDR #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) @@ -439,6 +422,16 @@ void ftrace_modify_all_code(int command); #endif #endif +/* + * If an arch would like functions that are only traced + * by the function graph tracer to jump directly to its own + * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR + * to be that address to jump to. + */ +#ifndef FTRACE_GRAPH_TRAMP_ADDR +#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) +#endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER extern void ftrace_graph_caller(void); extern int ftrace_enable_ftrace_graph_caller(void); @@ -736,6 +729,7 @@ extern char __irqentry_text_end[]; extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc); +extern bool ftrace_graph_is_dead(void); extern void ftrace_graph_stop(void); /* The current handlers in use */ diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 136116924d8d..ea6c9dea79e3 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -25,6 +25,21 @@ trace_seq_init(struct trace_seq *s) s->full = 0; } +/** + * trace_seq_buffer_ptr - return pointer to next location in buffer + * @s: trace sequence descriptor + * + * Returns the pointer to the buffer where the next write to + * the buffer will happen. This is useful to save the location + * that is about to be written to and then return the result + * of that write. + */ +static inline unsigned char * +trace_seq_buffer_ptr(struct trace_seq *s) +{ + return s->buffer + s->len; +} + /* * Currently only defined when tracing is enabled. */ @@ -36,14 +51,13 @@ int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); extern int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); -extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, - size_t cnt); +extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + int cnt); extern int trace_seq_puts(struct trace_seq *s, const char *str); extern int trace_seq_putc(struct trace_seq *s, unsigned char c); -extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); +extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, - size_t len); -extern void *trace_seq_reserve(struct trace_seq *s, size_t len); + unsigned int len); extern int trace_seq_path(struct trace_seq *s, const struct path *path); extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, @@ -71,8 +85,8 @@ static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) { return 0; } -static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, - size_t cnt) +static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + int cnt) { return 0; } @@ -85,19 +99,15 @@ static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) return 0; } static inline int -trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) +trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) { return 0; } static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, - size_t len) + unsigned int len) { return 0; } -static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) -{ - return NULL; -} static inline int trace_seq_path(struct trace_seq *s, const struct path *path) { return 0; |