ftrace changes for v6.15:

- Record function parameters for function and function graph tracers
 
   An option has been added to function tracer (func-args) and the function
   graph tracer (funcgraph-args) that when set, the tracers will record the
   registers that hold the arguments into each function event. On reading of
   the trace, it will use BTF to print those arguments. Most archs support up
   to 6 arguments (depending on the complexity of the arguments) and those
   are printed. If a function has more arguments then what was recorded, the
   output will end with " ... )".
 
     Example of function graph tracer:
 
     6)              | dummy_xmit [dummy](skb = 0x8887c100, dev = 0x872ca000) {
     6)              |   consume_skb(skb = 0x8887c100) {
     6)              |     skb_release_head_state(skb = 0x8887c100) {
     6)  0.178 us    |       sock_wfree(skb = 0x8887c100)
     6)  0.627 us    |     }
 
 - The rest of the changes are minor clean ups and fixes
 -----BEGIN PGP SIGNATURE-----
 
 iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCZ+M9vRQccm9zdGVkdEBn
 b29kbWlzLm9yZwAKCRAp5XQQmuv6qrBGAP9NTn4Lpci69n8FJGfz+UKUEKbzOOeg
 QrkIZIxbm8N56gEA0RaUionWjt1znZXUdBTsE3h+en/Ik0//VZytQcmJBwM=
 =fxuG
 -----END PGP SIGNATURE-----

Merge tag 'ftrace-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull ftrace updates from Steven Rostedt:

 - Record function parameters for function and function graph tracers

   An option has been added to function tracer (func-args) and the
   function graph tracer (funcgraph-args) that when set, the tracers
   will record the registers that hold the arguments into each function
   event. On reading of the trace, it will use BTF to print those
   arguments. Most archs support up to 6 arguments (depending on the
   complexity of the arguments) and those are printed.

   If a function has more arguments then what was recorded, the output
   will end with " ... )".

   Example of function graph tracer:

	6)              | dummy_xmit [dummy](skb = 0x8887c100, dev = 0x872ca000) {
	6)              |   consume_skb(skb = 0x8887c100) {
	6)              |     skb_release_head_state(skb = 0x8887c100) {
	6)  0.178 us    |       sock_wfree(skb = 0x8887c100)
	6)  0.627 us    |     }

 - The rest of the changes are minor clean ups and fixes

* tag 'ftrace-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
  tracing: Use hashtable.h for event_hash
  tracing: Fix use-after-free in print_graph_function_flags during tracer switching
  function_graph: Remove the unused variable func
  ftrace: Add arguments to function tracer
  ftrace: Have funcgraph-args take affect during tracing
  ftrace: Add support for function argument to graph tracer
  ftrace: Add print_function_args()
  ftrace: Have ftrace_free_filter() WARN and exit if ops is active
  fgraph: Correct typo in ftrace_return_to_handler comment
This commit is contained in:
Linus Torvalds 2025-03-27 15:57:29 -07:00
commit 31eb415bf6
13 changed files with 351 additions and 74 deletions

View File

@ -35,4 +35,9 @@ struct ftrace_regs;
#endif /* HAVE_ARCH_FTRACE_REGS */
/* This can be overridden by the architectures */
#ifndef FTRACE_REGS_MAX_ARGS
# define FTRACE_REGS_MAX_ARGS 6
#endif
#endif /* _LINUX_FTRACE_REGS_H */

View File

@ -263,6 +263,18 @@ config FUNCTION_GRAPH_RETADDR
the function is called. This feature is off by default, and you can
enable it via the trace option funcgraph-retaddr.
config FUNCTION_TRACE_ARGS
bool
depends on HAVE_FUNCTION_ARG_ACCESS_API
depends on DEBUG_INFO_BTF
default y
help
If supported with function argument access API and BTF, then
the function tracer and function graph tracer will support printing
of function arguments. This feature is off by default, and can be
enabled via the trace option func-args (for the function tracer) and
funcgraph-args (for the function graph tracer)
config DYNAMIC_FTRACE
bool "enable/disable function tracing dynamically"
depends on FUNCTION_TRACER

View File

@ -865,7 +865,7 @@ __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointe
}
/*
* After all architecures have selected HAVE_FUNCTION_GRAPH_FREGS, we can
* After all architectures have selected HAVE_FUNCTION_GRAPH_FREGS, we can
* leave only ftrace_return_to_handler(fregs).
*/
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS

View File

@ -1293,6 +1293,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
void ftrace_free_filter(struct ftrace_ops *ops)
{
ftrace_ops_init(ops);
if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
return;
free_ftrace_hash(ops->func_hash->filter_hash);
free_ftrace_hash(ops->func_hash->notrace_hash);
}

View File

@ -2878,13 +2878,16 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
void
trace_function(struct trace_array *tr, unsigned long ip, unsigned long
parent_ip, unsigned int trace_ctx)
parent_ip, unsigned int trace_ctx, struct ftrace_regs *fregs)
{
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
int size = sizeof(*entry);
event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
size += FTRACE_REGS_MAX_ARGS * !!fregs * sizeof(long);
event = __trace_buffer_lock_reserve(buffer, TRACE_FN, size,
trace_ctx);
if (!event)
return;
@ -2892,6 +2895,13 @@ trace_function(struct trace_array *tr, unsigned long ip, unsigned long
entry->ip = ip;
entry->parent_ip = parent_ip;
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
if (fregs) {
for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++)
entry->args[i] = ftrace_regs_get_argument(fregs, i);
}
#endif
if (static_branch_unlikely(&trace_function_exports_enabled))
ftrace_exports(event, TRACE_EXPORT_FUNCTION);
__buffer_unlock_commit(buffer, event);

View File

@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <linux/ctype.h>
#include <linux/once_lite.h>
#include <linux/ftrace_regs.h>
#include "pid_list.h"
@ -697,7 +698,8 @@ unsigned long trace_total_entries(struct trace_array *tr);
void trace_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
unsigned int trace_ctx);
unsigned int trace_ctx,
struct ftrace_regs *regs);
void trace_graph_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
@ -897,6 +899,7 @@ static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
#define TRACE_GRAPH_PRINT_RETVAL 0x800
#define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000
#define TRACE_GRAPH_PRINT_RETADDR 0x2000
#define TRACE_GRAPH_ARGS 0x4000
#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)

View File

@ -61,8 +61,9 @@ FTRACE_ENTRY_REG(function, ftrace_entry,
TRACE_FN,
F_STRUCT(
__field_fn( unsigned long, ip )
__field_fn( unsigned long, parent_ip )
__field_fn( unsigned long, ip )
__field_fn( unsigned long, parent_ip )
__dynamic_array( unsigned long, args )
),
F_printk(" %ps <-- %ps",
@ -72,17 +73,18 @@ FTRACE_ENTRY_REG(function, ftrace_entry,
);
/* Function call entry */
FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry,
FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
TRACE_GRAPH_ENT,
F_STRUCT(
__field_struct( struct ftrace_graph_ent, graph_ent )
__field_packed( unsigned long, graph_ent, func )
__field_packed( int, graph_ent, depth )
__field_packed( unsigned long, graph_ent, depth )
__dynamic_array(unsigned long, args )
),
F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth)
F_printk("--> %ps (%lu)", (void *)__entry->func, __entry->depth)
);
#ifdef CONFIG_FUNCTION_GRAPH_RETADDR

View File

@ -25,6 +25,9 @@ static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
static void
function_args_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
static void
@ -42,9 +45,10 @@ enum {
TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
TRACE_FUNC_OPT_STACK = 0x1,
TRACE_FUNC_OPT_NO_REPEATS = 0x2,
TRACE_FUNC_OPT_ARGS = 0x4,
/* Update this to next highest bit. */
TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
TRACE_FUNC_OPT_HIGHEST_BIT = 0x8
};
#define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
@ -114,6 +118,8 @@ static ftrace_func_t select_trace_function(u32 flags_val)
switch (flags_val & TRACE_FUNC_OPT_MASK) {
case TRACE_FUNC_NO_OPTS:
return function_trace_call;
case TRACE_FUNC_OPT_ARGS:
return function_args_trace_call;
case TRACE_FUNC_OPT_STACK:
return function_stack_trace_call;
case TRACE_FUNC_OPT_NO_REPEATS:
@ -220,7 +226,34 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
data = this_cpu_ptr(tr->array_buffer.data);
if (!atomic_read(&data->disabled))
trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx, NULL);
ftrace_test_recursion_unlock(bit);
}
static void
function_args_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = op->private;
struct trace_array_cpu *data;
unsigned int trace_ctx;
int bit;
int cpu;
if (unlikely(!tr->function_enabled))
return;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
trace_ctx = tracing_gen_ctx();
cpu = smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (!atomic_read(&data->disabled))
trace_function(tr, ip, parent_ip, trace_ctx, fregs);
ftrace_test_recursion_unlock(bit);
}
@ -270,7 +303,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (likely(disabled == 1)) {
trace_ctx = tracing_gen_ctx_flags(flags);
trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx, NULL);
#ifdef CONFIG_UNWINDER_FRAME_POINTER
if (ftrace_pids_enabled(op))
skip++;
@ -349,7 +382,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
trace_ctx = tracing_gen_ctx_dec();
process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx, NULL);
out:
ftrace_test_recursion_unlock(bit);
@ -389,7 +422,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
trace_ctx = tracing_gen_ctx_flags(flags);
process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx, NULL);
__trace_stack(tr, trace_ctx, STACK_SKIP);
}
@ -403,6 +436,9 @@ static struct tracer_opt func_opts[] = {
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
#ifdef CONFIG_FUNCTION_TRACE_ARGS
{ TRACER_OPT(func-args, TRACE_FUNC_OPT_ARGS) },
#endif
{ } /* Always set a last empty entry */
};

View File

@ -70,6 +70,10 @@ static struct tracer_opt trace_opts[] = {
#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
/* Display function return address ? */
{ TRACER_OPT(funcgraph-retaddr, TRACE_GRAPH_PRINT_RETADDR) },
#endif
#ifdef CONFIG_FUNCTION_TRACE_ARGS
/* Display function arguments ? */
{ TRACER_OPT(funcgraph-args, TRACE_GRAPH_ARGS) },
#endif
/* Include sleep time (scheduled out) between entry and return */
{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
@ -110,25 +114,43 @@ static void
print_graph_duration(struct trace_array *tr, unsigned long long duration,
struct trace_seq *s, u32 flags);
int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned int trace_ctx)
static int __graph_entry(struct trace_array *tr, struct ftrace_graph_ent *trace,
unsigned int trace_ctx, struct ftrace_regs *fregs)
{
struct ring_buffer_event *event;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
int size;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
sizeof(*entry), trace_ctx);
/* If fregs is defined, add FTRACE_REGS_MAX_ARGS long size words */
size = sizeof(*entry) + (FTRACE_REGS_MAX_ARGS * !!fregs * sizeof(long));
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, size, trace_ctx);
if (!event)
return 0;
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
if (fregs) {
for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++)
entry->args[i] = ftrace_regs_get_argument(fregs, i);
}
#endif
trace_buffer_unlock_commit_nostack(buffer, event);
return 1;
}
int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned int trace_ctx)
{
return __graph_entry(tr, trace, trace_ctx, NULL);
}
#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
int __trace_graph_retaddr_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
@ -174,9 +196,9 @@ struct fgraph_times {
unsigned long long sleeptime; /* may be optional! */
};
int trace_graph_entry(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
static int graph_entry(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{
unsigned long *task_var = fgraph_get_task_var(gops);
struct trace_array *tr = gops->private;
@ -246,7 +268,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
unsigned long retaddr = ftrace_graph_top_ret_addr(current);
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
} else {
ret = __trace_graph_entry(tr, trace, trace_ctx);
ret = __graph_entry(tr, trace, trace_ctx, fregs);
}
}
preempt_enable_notrace();
@ -254,6 +276,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
return ret;
}
int trace_graph_entry(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{
return graph_entry(trace, gops, NULL);
}
static int trace_graph_entry_args(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{
return graph_entry(trace, gops, fregs);
}
static void
__trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned int trace_ctx)
@ -418,14 +454,17 @@ static int graph_trace_init(struct trace_array *tr)
{
int ret;
tr->gops->entryfunc = trace_graph_entry;
if (tracer_flags_is_set(TRACE_GRAPH_ARGS))
tr->gops->entryfunc = trace_graph_entry_args;
else
tr->gops->entryfunc = trace_graph_entry;
if (tracing_thresh)
tr->gops->retfunc = trace_graph_thresh_return;
else
tr->gops->retfunc = trace_graph_return;
/* Make gops functions are visible before we start tracing */
/* Make gops functions visible before we start tracing */
smp_mb();
ret = register_ftrace_graph(tr->gops);
@ -436,6 +475,28 @@ static int graph_trace_init(struct trace_array *tr)
return 0;
}
static int ftrace_graph_trace_args(struct trace_array *tr, int set)
{
trace_func_graph_ent_t entry;
if (set)
entry = trace_graph_entry_args;
else
entry = trace_graph_entry;
/* See if there's any changes */
if (tr->gops->entryfunc == entry)
return 0;
unregister_ftrace_graph(tr->gops);
tr->gops->entryfunc = entry;
/* Make gops functions visible before we start tracing */
smp_mb();
return register_ftrace_graph(tr->gops);
}
static void graph_trace_reset(struct trace_array *tr)
{
tracing_stop_cmdline_record();
@ -775,7 +836,7 @@ static void print_graph_retaddr(struct trace_seq *s, struct fgraph_retaddr_ent_e
static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entry *entry,
struct ftrace_graph_ret *graph_ret, void *func,
u32 opt_flags, u32 trace_flags)
u32 opt_flags, u32 trace_flags, int args_size)
{
unsigned long err_code = 0;
unsigned long retval = 0;
@ -809,7 +870,14 @@ static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entr
if (entry->ent.type != TRACE_GRAPH_RETADDR_ENT)
print_retaddr = false;
trace_seq_printf(s, "%ps();", func);
trace_seq_printf(s, "%ps", func);
if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
print_function_args(s, entry->args, (unsigned long)func);
trace_seq_putc(s, ';');
} else
trace_seq_puts(s, "();");
if (print_retval || print_retaddr)
trace_seq_puts(s, " /*");
else
@ -836,7 +904,8 @@ static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entr
#else
#define print_graph_retval(_seq, _ent, _ret, _func, _opt_flags, _trace_flags) do {} while (0)
#define print_graph_retval(_seq, _ent, _ret, _func, _opt_flags, _trace_flags, args_size) \
do {} while (0)
#endif
@ -852,16 +921,17 @@ print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call;
unsigned long long duration;
unsigned long func;
unsigned long ret_func;
int args_size;
int cpu = iter->cpu;
int i;
args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
graph_ret = &ret_entry->ret;
call = &entry->graph_ent;
duration = ret_entry->rettime - ret_entry->calltime;
func = call->func + iter->tr->text_delta;
if (data) {
struct fgraph_cpu_data *cpu_data;
@ -887,16 +957,25 @@ print_graph_entry_leaf(struct trace_iterator *iter,
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
trace_seq_putc(s, ' ');
ret_func = graph_ret->func + iter->tr->text_delta;
/*
* Write out the function return value or return address
*/
if (flags & (__TRACE_GRAPH_PRINT_RETVAL | __TRACE_GRAPH_PRINT_RETADDR)) {
print_graph_retval(s, entry, graph_ret,
(void *)graph_ret->func + iter->tr->text_delta,
flags, tr->trace_flags);
flags, tr->trace_flags, args_size);
} else {
trace_seq_printf(s, "%ps();\n", (void *)func);
trace_seq_printf(s, "%ps", (void *)ret_func);
if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
print_function_args(s, entry->args, ret_func);
trace_seq_putc(s, ';');
} else
trace_seq_puts(s, "();");
}
trace_seq_printf(s, "\n");
print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
cpu, iter->ent->pid, flags);
@ -913,6 +992,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
unsigned long func;
int args_size;
int i;
if (data) {
@ -937,7 +1017,17 @@ print_graph_entry_nested(struct trace_iterator *iter,
func = call->func + iter->tr->text_delta;
trace_seq_printf(s, "%ps() {", (void *)func);
trace_seq_printf(s, "%ps", (void *)func);
args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
print_function_args(s, entry->args, func);
else
trace_seq_puts(s, "()");
trace_seq_puts(s, " {");
if (flags & __TRACE_GRAPH_PRINT_RETADDR &&
entry->ent.type == TRACE_GRAPH_RETADDR_ENT)
print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
@ -1107,21 +1197,38 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
struct trace_iterator *iter, u32 flags)
{
struct fgraph_data *data = iter->private;
struct ftrace_graph_ent *call = &field->graph_ent;
struct ftrace_graph_ent *call;
struct ftrace_graph_ret_entry *leaf_ret;
static enum print_line_t ret;
int cpu = iter->cpu;
/*
* print_graph_entry() may consume the current event,
* thus @field may become invalid, so we need to save it.
* sizeof(struct ftrace_graph_ent_entry) is very small,
* it can be safely saved at the stack.
*/
struct ftrace_graph_ent_entry *entry;
u8 save_buf[sizeof(*entry) + FTRACE_REGS_MAX_ARGS * sizeof(long)];
/* The ent_size is expected to be as big as the entry */
if (iter->ent_size > sizeof(save_buf))
iter->ent_size = sizeof(save_buf);
entry = (void *)save_buf;
memcpy(entry, field, iter->ent_size);
call = &entry->graph_ent;
if (check_irq_entry(iter, flags, call->func, call->depth))
return TRACE_TYPE_HANDLED;
print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
leaf_ret = get_return_for_leaf(iter, field);
leaf_ret = get_return_for_leaf(iter, entry);
if (leaf_ret)
ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
ret = print_graph_entry_leaf(iter, entry, leaf_ret, s, flags);
else
ret = print_graph_entry_nested(iter, field, s, cpu, flags);
ret = print_graph_entry_nested(iter, entry, s, cpu, flags);
if (data) {
/*
@ -1195,7 +1302,8 @@ print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
* funcgraph-retval option is enabled.
*/
if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
print_graph_retval(s, NULL, trace, (void *)func, flags, tr->trace_flags);
print_graph_retval(s, NULL, trace, (void *)func, flags,
tr->trace_flags, 0);
} else {
/*
* If the return function does not have a matching entry,
@ -1323,16 +1431,8 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
switch (entry->type) {
case TRACE_GRAPH_ENT: {
/*
* print_graph_entry() may consume the current event,
* thus @field may become invalid, so we need to save it.
* sizeof(struct ftrace_graph_ent_entry) is very small,
* it can be safely saved at the stack.
*/
struct ftrace_graph_ent_entry saved;
trace_assign_type(field, entry);
saved = *field;
return print_graph_entry(&saved, s, iter, flags);
return print_graph_entry(field, s, iter, flags);
}
#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
case TRACE_GRAPH_RETADDR_ENT: {
@ -1511,6 +1611,7 @@ void graph_trace_close(struct trace_iterator *iter)
if (data) {
free_percpu(data->cpu_data);
kfree(data);
iter->private = NULL;
}
}
@ -1526,6 +1627,9 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
if (bit == TRACE_GRAPH_GRAPH_TIME)
ftrace_graph_graph_time_control(set);
if (bit == TRACE_GRAPH_ARGS)
return ftrace_graph_trace_args(tr, set);
return 0;
}

View File

@ -150,7 +150,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
trace_ctx = tracing_gen_ctx_flags(flags);
trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx, fregs);
atomic_dec(&data->disabled);
}
@ -250,8 +250,6 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
else
iter->private = NULL;
}
static void irqsoff_trace_close(struct trace_iterator *iter)
@ -295,11 +293,17 @@ __trace_function(struct trace_array *tr,
if (is_graph(tr))
trace_graph_function(tr, ip, parent_ip, trace_ctx);
else
trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx, NULL);
}
#else
#define __trace_function trace_function
static inline void
__trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
unsigned int trace_ctx)
{
return trace_function(tr, ip, parent_ip, trace_ctx, NULL);
}
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{

View File

@ -12,15 +12,19 @@
#include <linux/sched/clock.h>
#include <linux/sched/mm.h>
#include <linux/idr.h>
#include <linux/btf.h>
#include <linux/bpf.h>
#include <linux/hashtable.h>
#include "trace_output.h"
#include "trace_btf.h"
/* must be a power of 2 */
#define EVENT_HASHSIZE 128
/* 2^7 = 128 */
#define EVENT_HASH_BITS 7
DECLARE_RWSEM(trace_event_sem);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
static DEFINE_HASHTABLE(event_hash, EVENT_HASH_BITS);
enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
{
@ -684,6 +688,88 @@ int trace_print_lat_context(struct trace_iterator *iter)
return !trace_seq_has_overflowed(s);
}
#ifdef CONFIG_FUNCTION_TRACE_ARGS
void print_function_args(struct trace_seq *s, unsigned long *args,
unsigned long func)
{
const struct btf_param *param;
const struct btf_type *t;
const char *param_name;
char name[KSYM_NAME_LEN];
unsigned long arg;
struct btf *btf;
s32 tid, nr = 0;
int a, p, x;
trace_seq_printf(s, "(");
if (!args)
goto out;
if (lookup_symbol_name(func, name))
goto out;
/* TODO: Pass module name here too */
t = btf_find_func_proto(name, &btf);
if (IS_ERR_OR_NULL(t))
goto out;
param = btf_get_func_param(t, &nr);
if (!param)
goto out_put;
for (a = 0, p = 0; p < nr; a++, p++) {
if (p)
trace_seq_puts(s, ", ");
/* This only prints what the arch allows (6 args by default) */
if (a == FTRACE_REGS_MAX_ARGS) {
trace_seq_puts(s, "...");
break;
}
arg = args[a];
param_name = btf_name_by_offset(btf, param[p].name_off);
if (param_name)
trace_seq_printf(s, "%s=", param_name);
t = btf_type_skip_modifiers(btf, param[p].type, &tid);
switch (t ? BTF_INFO_KIND(t->info) : BTF_KIND_UNKN) {
case BTF_KIND_UNKN:
trace_seq_putc(s, '?');
/* Still print unknown type values */
fallthrough;
case BTF_KIND_PTR:
trace_seq_printf(s, "0x%lx", arg);
break;
case BTF_KIND_INT:
trace_seq_printf(s, "%ld", arg);
break;
case BTF_KIND_ENUM:
trace_seq_printf(s, "%ld", arg);
break;
default:
/* This does not handle complex arguments */
trace_seq_printf(s, "(%s)[0x%lx", btf_type_str(t), arg);
for (x = sizeof(long); x < t->size; x += sizeof(long)) {
trace_seq_putc(s, ':');
if (++a == FTRACE_REGS_MAX_ARGS) {
trace_seq_puts(s, "...]");
goto out_put;
}
trace_seq_printf(s, "0x%lx", args[a]);
}
trace_seq_putc(s, ']');
break;
}
}
out_put:
btf_put(btf);
out:
trace_seq_printf(s, ")");
}
#endif
/**
* ftrace_find_event - find a registered event
* @type: the type of event to look for
@ -694,11 +780,8 @@ int trace_print_lat_context(struct trace_iterator *iter)
struct trace_event *ftrace_find_event(int type)
{
struct trace_event *event;
unsigned key;
key = type & (EVENT_HASHSIZE - 1);
hlist_for_each_entry(event, &event_hash[key], node) {
hash_for_each_possible(event_hash, event, node, type) {
if (event->type == type)
return event;
}
@ -753,7 +836,6 @@ void trace_event_read_unlock(void)
*/
int register_trace_event(struct trace_event *event)
{
unsigned key;
int ret = 0;
down_write(&trace_event_sem);
@ -786,9 +868,7 @@ int register_trace_event(struct trace_event *event)
if (event->funcs->binary == NULL)
event->funcs->binary = trace_nop_print;
key = event->type & (EVENT_HASHSIZE - 1);
hlist_add_head(&event->node, &event_hash[key]);
hash_add(event_hash, &event->node, event->type);
ret = event->type;
out:
@ -803,7 +883,7 @@ EXPORT_SYMBOL_GPL(register_trace_event);
*/
int __unregister_trace_event(struct trace_event *event)
{
hlist_del(&event->node);
hash_del(&event->node);
free_trace_event_type(event->type);
return 0;
}
@ -1005,12 +1085,15 @@ enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
}
static void print_fn_trace(struct trace_seq *s, unsigned long ip,
unsigned long parent_ip, long delta, int flags)
unsigned long parent_ip, long delta,
unsigned long *args, int flags)
{
ip += delta;
parent_ip += delta;
seq_print_ip_sym(s, ip, flags);
if (args)
print_function_args(s, args, ip);
if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
trace_seq_puts(s, " <-");
@ -1024,10 +1107,19 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
{
struct ftrace_entry *field;
struct trace_seq *s = &iter->seq;
unsigned long *args;
int args_size;
trace_assign_type(field, iter->ent);
print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, flags);
args_size = iter->ent_size - offsetof(struct ftrace_entry, args);
if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
args = field->args;
else
args = NULL;
print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta,
args, flags);
trace_seq_putc(s, '\n');
return trace_handle_return(s);
@ -1700,7 +1792,7 @@ trace_func_repeats_print(struct trace_iterator *iter, int flags,
trace_assign_type(field, iter->ent);
print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, flags);
print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, NULL, flags);
trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
trace_print_time(s, iter,
iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));

View File

@ -41,5 +41,14 @@ extern struct rw_semaphore trace_event_sem;
#define SEQ_PUT_HEX_FIELD(s, x) \
trace_seq_putmem_hex(s, &(x), sizeof(x))
#ifdef CONFIG_FUNCTION_TRACE_ARGS
void print_function_args(struct trace_seq *s, unsigned long *args,
unsigned long func);
#else
static inline void print_function_args(struct trace_seq *s, unsigned long *args,
unsigned long func) {
trace_seq_puts(s, "()");
}
#endif
#endif

View File

@ -188,8 +188,6 @@ static void wakeup_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
else
iter->private = NULL;
}
static void wakeup_trace_close(struct trace_iterator *iter)
@ -242,7 +240,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
return;
local_irq_save(flags);
trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx, fregs);
local_irq_restore(flags);
atomic_dec(&data->disabled);
@ -327,7 +325,7 @@ __trace_function(struct trace_array *tr,
if (is_graph(tr))
trace_graph_function(tr, ip, parent_ip, trace_ctx);
else
trace_function(tr, ip, parent_ip, trace_ctx);
trace_function(tr, ip, parent_ip, trace_ctx, NULL);
}
static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)