mirror of https://github.com/Qortal/Brooklyn
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
220 lines
5.2 KiB
220 lines
5.2 KiB
// SPDX-License-Identifier: GPL-2.0-only |
|
/* |
|
* Stack tracing support |
|
* |
|
* Copyright (C) 2012 ARM Ltd. |
|
*/ |
|
#include <linux/kernel.h> |
|
#include <linux/export.h> |
|
#include <linux/ftrace.h> |
|
#include <linux/sched.h> |
|
#include <linux/sched/debug.h> |
|
#include <linux/sched/task_stack.h> |
|
#include <linux/stacktrace.h> |
|
|
|
#include <asm/irq.h> |
|
#include <asm/stack_pointer.h> |
|
#include <asm/stacktrace.h> |
|
|
|
/* |
|
* Start an unwind from a pt_regs. |
|
* |
|
* The unwind will begin at the PC within the regs. |
|
* |
|
* The regs must be on a stack currently owned by the calling task. |
|
*/ |
|
static inline void unwind_init_from_regs(struct unwind_state *state, |
|
struct pt_regs *regs) |
|
{ |
|
unwind_init_common(state, current); |
|
|
|
state->fp = regs->regs[29]; |
|
state->pc = regs->pc; |
|
} |
|
|
|
/* |
|
* Start an unwind from a caller. |
|
* |
|
* The unwind will begin at the caller of whichever function this is inlined |
|
* into. |
|
* |
|
* The function which invokes this must be noinline. |
|
*/ |
|
static __always_inline void unwind_init_from_caller(struct unwind_state *state) |
|
{ |
|
unwind_init_common(state, current); |
|
|
|
state->fp = (unsigned long)__builtin_frame_address(1); |
|
state->pc = (unsigned long)__builtin_return_address(0); |
|
} |
|
|
|
/* |
|
* Start an unwind from a blocked task. |
|
* |
|
* The unwind will begin at the blocked tasks saved PC (i.e. the caller of |
|
* cpu_switch_to()). |
|
* |
|
* The caller should ensure the task is blocked in cpu_switch_to() for the |
|
* duration of the unwind, or the unwind will be bogus. It is never valid to |
|
* call this for the current task. |
|
*/ |
|
static inline void unwind_init_from_task(struct unwind_state *state, |
|
struct task_struct *task) |
|
{ |
|
unwind_init_common(state, task); |
|
|
|
state->fp = thread_saved_fp(task); |
|
state->pc = thread_saved_pc(task); |
|
} |
|
|
|
/* |
|
* Unwind from one frame record (A) to the next frame record (B). |
|
* |
|
* We terminate early if the location of B indicates a malformed chain of frame |
|
* records (e.g. a cycle), determined based on the location and fp value of A |
|
* and the location (but not the fp value) of B. |
|
*/ |
|
static int notrace unwind_next(struct unwind_state *state) |
|
{ |
|
struct task_struct *tsk = state->task; |
|
unsigned long fp = state->fp; |
|
int err; |
|
|
|
/* Final frame; nothing to unwind */ |
|
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) |
|
return -ENOENT; |
|
|
|
err = unwind_next_frame_record(state); |
|
if (err) |
|
return err; |
|
|
|
state->pc = ptrauth_strip_insn_pac(state->pc); |
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
|
if (tsk->ret_stack && |
|
(state->pc == (unsigned long)return_to_handler)) { |
|
unsigned long orig_pc; |
|
/* |
|
* This is a case where function graph tracer has |
|
* modified a return address (LR) in a stack frame |
|
* to hook a function return. |
|
* So replace it to an original value. |
|
*/ |
|
orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, |
|
(void *)state->fp); |
|
if (WARN_ON_ONCE(state->pc == orig_pc)) |
|
return -EINVAL; |
|
state->pc = orig_pc; |
|
} |
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
|
#ifdef CONFIG_KRETPROBES |
|
if (is_kretprobe_trampoline(state->pc)) |
|
state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); |
|
#endif |
|
|
|
return 0; |
|
} |
|
NOKPROBE_SYMBOL(unwind_next); |
|
|
|
static void notrace unwind(struct unwind_state *state, |
|
stack_trace_consume_fn consume_entry, void *cookie) |
|
{ |
|
while (1) { |
|
int ret; |
|
|
|
if (!consume_entry(cookie, state->pc)) |
|
break; |
|
ret = unwind_next(state); |
|
if (ret < 0) |
|
break; |
|
} |
|
} |
|
NOKPROBE_SYMBOL(unwind); |
|
|
|
static bool dump_backtrace_entry(void *arg, unsigned long where) |
|
{ |
|
char *loglvl = arg; |
|
printk("%s %pSb\n", loglvl, (void *)where); |
|
return true; |
|
} |
|
|
|
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, |
|
const char *loglvl) |
|
{ |
|
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
|
|
|
if (regs && user_mode(regs)) |
|
return; |
|
|
|
if (!tsk) |
|
tsk = current; |
|
|
|
if (!try_get_task_stack(tsk)) |
|
return; |
|
|
|
printk("%sCall trace:\n", loglvl); |
|
arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); |
|
|
|
put_task_stack(tsk); |
|
} |
|
|
|
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) |
|
{ |
|
dump_backtrace(NULL, tsk, loglvl); |
|
barrier(); |
|
} |
|
|
|
/* |
|
* Per-cpu stacks are only accessible when unwinding the current task in a |
|
* non-preemptible context. |
|
*/ |
|
#define STACKINFO_CPU(name) \ |
|
({ \ |
|
((task == current) && !preemptible()) \ |
|
? stackinfo_get_##name() \ |
|
: stackinfo_get_unknown(); \ |
|
}) |
|
|
|
/* |
|
* SDEI stacks are only accessible when unwinding the current task in an NMI |
|
* context. |
|
*/ |
|
#define STACKINFO_SDEI(name) \ |
|
({ \ |
|
((task == current) && in_nmi()) \ |
|
? stackinfo_get_sdei_##name() \ |
|
: stackinfo_get_unknown(); \ |
|
}) |
|
|
|
noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, |
|
void *cookie, struct task_struct *task, |
|
struct pt_regs *regs) |
|
{ |
|
struct stack_info stacks[] = { |
|
stackinfo_get_task(task), |
|
STACKINFO_CPU(irq), |
|
#if defined(CONFIG_VMAP_STACK) |
|
STACKINFO_CPU(overflow), |
|
#endif |
|
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE) |
|
STACKINFO_SDEI(normal), |
|
STACKINFO_SDEI(critical), |
|
#endif |
|
}; |
|
struct unwind_state state = { |
|
.stacks = stacks, |
|
.nr_stacks = ARRAY_SIZE(stacks), |
|
}; |
|
|
|
if (regs) { |
|
if (task != current) |
|
return; |
|
unwind_init_from_regs(&state, regs); |
|
} else if (task == current) { |
|
unwind_init_from_caller(&state); |
|
} else { |
|
unwind_init_from_task(&state, task); |
|
} |
|
|
|
unwind(&state, consume_entry, cookie); |
|
}
|
|
|