mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00

* arm64/for-next/perf: perf: Switch back to struct platform_driver::remove() perf: arm_pmuv3: Add support for Samsung Mongoose PMU dt-bindings: arm: pmu: Add Samsung Mongoose core compatible perf/dwc_pcie: Fix typos in event names perf/dwc_pcie: Add support for Ampere SoCs ARM: pmuv3: Add missing write_pmuacr() perf/marvell: Marvell PEM performance monitor support perf/arm_pmuv3: Add PMUv3.9 per counter EL0 access control perf/dwc_pcie: Convert the events with mixed case to lowercase perf/cxlpmu: Support missing events in 3.1 spec perf: imx_perf: add support for i.MX91 platform dt-bindings: perf: fsl-imx-ddr: Add i.MX91 compatible drivers perf: remove unused field pmu_node * for-next/gcs: (42 commits) : arm64 Guarded Control Stack user-space support kselftest/arm64: Fix missing printf() argument in gcs/gcs-stress.c arm64/gcs: Fix outdated ptrace documentation kselftest/arm64: Ensure stable names for GCS stress test results kselftest/arm64: Validate that GCS push and write permissions work kselftest/arm64: Enable GCS for the FP stress tests kselftest/arm64: Add a GCS stress test kselftest/arm64: Add GCS signal tests kselftest/arm64: Add test coverage for GCS mode locking kselftest/arm64: Add a GCS test program built with the system libc kselftest/arm64: Add very basic GCS test program kselftest/arm64: Always run signals tests with GCS enabled kselftest/arm64: Allow signals tests to specify an expected si_code kselftest/arm64: Add framework support for GCS to signal handling tests kselftest/arm64: Add GCS as a detected feature in the signal tests kselftest/arm64: Verify the GCS hwcap arm64: Add Kconfig for Guarded Control Stack (GCS) arm64/ptrace: Expose GCS via ptrace and core files arm64/signal: Expose GCS state in signal frames arm64/signal: Set up and restore the GCS context for signal handlers arm64/mm: Implement map_shadow_stack() ... * for-next/probes: : Various arm64 uprobes/kprobes cleanups arm64: insn: Simulate nop instruction for better uprobe performance arm64: probes: Remove probe_opcode_t arm64: probes: Cleanup kprobes endianness conversions arm64: probes: Move kprobes-specific fields arm64: probes: Fix uprobes for big-endian kernels arm64: probes: Fix simulate_ldr*_literal() arm64: probes: Remove broken LDR (literal) uprobe support * for-next/asm-offsets: : arm64 asm-offsets.c cleanup (remove unused offsets) arm64: asm-offsets: remove PREEMPT_DISABLE_OFFSET arm64: asm-offsets: remove DMA_{TO,FROM}_DEVICE arm64: asm-offsets: remove VM_EXEC and PAGE_SZ arm64: asm-offsets: remove MM_CONTEXT_ID arm64: asm-offsets: remove COMPAT_{RT_,SIGFRAME_REGS_OFFSET arm64: asm-offsets: remove VMA_VM_* arm64: asm-offsets: remove TSK_ACTIVE_MM * for-next/tlb: : TLB flushing optimisations arm64: optimize flush tlb kernel range arm64: tlbflush: add __flush_tlb_range_limit_excess() * for-next/misc: : Miscellaneous patches arm64: tls: Fix context-switching of tpidrro_el0 when kpti is enabled arm64/ptrace: Clarify documentation of VL configuration via ptrace acpi/arm64: remove unnecessary cast arm64/mm: Change protval as 'pteval_t' in map_range() arm64: uprobes: Optimize cache flushes for xol slot acpi/arm64: Adjust error handling procedure in gtdt_parse_timer_block() arm64: fix .data.rel.ro size assertion when CONFIG_LTO_CLANG arm64/ptdump: Test both PTE_TABLE_BIT and PTE_VALID for block mappings arm64/mm: Sanity check PTE address before runtime P4D/PUD folding arm64/mm: Drop setting PTE_TYPE_PAGE in pte_mkcont() ACPI: GTDT: Tighten the check for the array of platform timer structures arm64/fpsimd: Fix a typo arm64: Expose ID_AA64ISAR1_EL1.XS to sanitised feature consumers arm64: Return early when break handler is found on linked-list arm64/mm: Re-organize arch_make_huge_pte() arm64/mm: Drop _PROT_SECT_DEFAULT arm64: Add command-line override for ID_AA64MMFR0_EL1.ECV arm64: head: Drop SWAPPER_TABLE_SHIFT arm64: cpufeature: add POE to cpucap_is_possible() arm64/mm: Change pgattr_change_is_safe() arguments as pteval_t * for-next/mte: : Various MTE improvements selftests: arm64: add hugetlb mte tests hugetlb: arm64: add mte support * for-next/sysreg: : arm64 sysreg updates arm64/sysreg: Update ID_AA64MMFR1_EL1 to DDI0601 2024-09 * for-next/stacktrace: : arm64 stacktrace improvements arm64: preserve pt_regs::stackframe during exec*() arm64: stacktrace: unwind exception boundaries arm64: stacktrace: split unwind_consume_stack() arm64: stacktrace: report recovered PCs arm64: stacktrace: report source of unwind data arm64: stacktrace: move dump_backtrace() to kunwind_stack_walk() arm64: use a common struct frame_record arm64: pt_regs: swap 'unused' and 'pmr' fields arm64: pt_regs: rename "pmr_save" -> "pmr" arm64: pt_regs: remove stale big-endian layout arm64: pt_regs: assert pt_regs is a multiple of 16 bytes * for-next/hwcap3: : Add AT_HWCAP3 support for arm64 (also wire up AT_HWCAP4) arm64: Support AT_HWCAP3 binfmt_elf: Wire up AT_HWCAP3 at AT_HWCAP4 * for-next/kselftest: (30 commits) : arm64 kselftest fixes/cleanups kselftest/arm64: Try harder to generate different keys during PAC tests kselftest/arm64: Don't leak pipe fds in pac.exec_sign_all() kselftest/arm64: Corrupt P0 in the irritator when testing SSVE kselftest/arm64: Add FPMR coverage to fp-ptrace kselftest/arm64: Expand the set of ZA writes fp-ptrace does kselftets/arm64: Use flag bits for features in fp-ptrace assembler code kselftest/arm64: Enable build of PAC tests with LLVM=1 kselftest/arm64: Check that SVCR is 0 in signal handlers kselftest/arm64: Fix printf() compiler warnings in the arm64 syscall-abi.c tests kselftest/arm64: Fix printf() warning in the arm64 MTE prctl() test kselftest/arm64: Fix printf() compiler warnings in the arm64 fp tests kselftest/arm64: Fix build with stricter assemblers kselftest/arm64: Test signal handler state modification in fp-stress kselftest/arm64: Provide a SIGUSR1 handler in the kernel mode FP stress test kselftest/arm64: Implement irritators for ZA and ZT kselftest/arm64: Remove unused ADRs from irritator handlers kselftest/arm64: Correct misleading comments on fp-stress irritators kselftest/arm64: Poll less often while waiting for fp-stress children kselftest/arm64: Increase frequency of signal delivery in fp-stress kselftest/arm64: Fix encoding for SVE B16B16 test ... * for-next/crc32: : Optimise CRC32 using PMULL instructions arm64/crc32: Implement 4-way interleave using PMULL arm64/crc32: Reorganize bit/byte ordering macros arm64/lib: Handle CRC-32 alternative in C code * for-next/guest-cca: : Support for running Linux as a guest in Arm CCA arm64: Document Arm Confidential Compute virt: arm-cca-guest: TSM_REPORT support for realms arm64: Enable memory encrypt for Realms arm64: mm: Avoid TLBI when marking pages as valid arm64: Enforce bounce buffers for realm DMA efi: arm64: Map Device with Prot Shared arm64: rsi: Map unprotected MMIO as decrypted arm64: rsi: Add support for checking whether an MMIO is protected arm64: realm: Query IPA size from the RMM arm64: Detect if in a realm and set RIPAS RAM arm64: rsi: Add RSI definitions * for-next/haft: : Support for arm64 FEAT_HAFT arm64: pgtable: Warn unexpected pmdp_test_and_clear_young() arm64: Enable ARCH_HAS_NONLEAF_PMD_YOUNG arm64: Add support for FEAT_HAFT arm64: setup: name 'tcr2' register arm64/sysreg: Update ID_AA64MMFR1_EL1 register * for-next/scs: : Dynamic shadow call stack fixes arm64/scs: Drop unused prototype __pi_scs_patch_vmlinux() arm64/scs: Deal with 64-bit relative offsets in FDE frames arm64/scs: Fix handling of DWARF augmentation data in CIE/FDE frames
217 lines
5.0 KiB
C
217 lines
5.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
|
|
*/
|
|
#include <linux/highmem.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/uprobes.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include "decode-insn.h"
|
|
|
|
#define UPROBE_INV_FAULT_CODE UINT_MAX
|
|
|
|
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
|
|
void *src, unsigned long len)
|
|
{
|
|
void *xol_page_kaddr = kmap_atomic(page);
|
|
void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
|
|
|
|
/*
|
|
* Initial cache maintenance of the xol page done via set_pte_at().
|
|
* Subsequent CMOs only needed if the xol slot changes.
|
|
*/
|
|
if (!memcmp(dst, src, len))
|
|
goto done;
|
|
|
|
/* Initialize the slot */
|
|
memcpy(dst, src, len);
|
|
|
|
/* flush caches (dcache/icache) */
|
|
sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
|
|
|
|
done:
|
|
kunmap_atomic(xol_page_kaddr);
|
|
}
|
|
|
|
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
|
|
{
|
|
return instruction_pointer(regs);
|
|
}
|
|
|
|
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
|
|
unsigned long addr)
|
|
{
|
|
u32 insn;
|
|
|
|
/* TODO: Currently we do not support AARCH32 instruction probing */
|
|
if (mm->context.flags & MMCF_AARCH32)
|
|
return -EOPNOTSUPP;
|
|
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
|
|
return -EINVAL;
|
|
|
|
insn = le32_to_cpu(auprobe->insn);
|
|
|
|
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
|
|
case INSN_REJECTED:
|
|
return -EINVAL;
|
|
|
|
case INSN_GOOD_NO_SLOT:
|
|
auprobe->simulate = true;
|
|
break;
|
|
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
{
|
|
struct uprobe_task *utask = current->utask;
|
|
|
|
/* Initialize with an invalid fault code to detect if ol insn trapped */
|
|
current->thread.fault_code = UPROBE_INV_FAULT_CODE;
|
|
|
|
/* Instruction points to execute ol */
|
|
instruction_pointer_set(regs, utask->xol_vaddr);
|
|
|
|
user_enable_single_step(current);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
{
|
|
struct uprobe_task *utask = current->utask;
|
|
|
|
WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
|
|
|
|
/* Instruction points to execute next to breakpoint address */
|
|
instruction_pointer_set(regs, utask->vaddr + 4);
|
|
|
|
user_disable_single_step(current);
|
|
|
|
return 0;
|
|
}
|
|
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
|
|
{
|
|
/*
|
|
* Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
|
|
* insn itself is trapped, then detect the case with the help of
|
|
* invalid fault code which is being set in arch_uprobe_pre_xol
|
|
*/
|
|
if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
{
|
|
u32 insn;
|
|
unsigned long addr;
|
|
|
|
if (!auprobe->simulate)
|
|
return false;
|
|
|
|
insn = le32_to_cpu(auprobe->insn);
|
|
addr = instruction_pointer(regs);
|
|
|
|
if (auprobe->api.handler)
|
|
auprobe->api.handler(insn, addr, regs);
|
|
|
|
return true;
|
|
}
|
|
|
|
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
{
|
|
struct uprobe_task *utask = current->utask;
|
|
|
|
/*
|
|
* Task has received a fatal signal, so reset back to probbed
|
|
* address.
|
|
*/
|
|
instruction_pointer_set(regs, utask->vaddr);
|
|
|
|
user_disable_single_step(current);
|
|
}
|
|
|
|
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
|
|
struct pt_regs *regs)
|
|
{
|
|
/*
|
|
* If a simple branch instruction (B) was called for retprobed
|
|
* assembly label then return true even when regs->sp and ret->stack
|
|
* are same. It will ensure that cleanup and reporting of return
|
|
* instances corresponding to callee label is done when
|
|
* handle_trampoline for called function is executed.
|
|
*/
|
|
if (ctx == RP_CHECK_CHAIN_CALL)
|
|
return regs->sp <= ret->stack;
|
|
else
|
|
return regs->sp < ret->stack;
|
|
}
|
|
|
|
unsigned long
|
|
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
|
|
struct pt_regs *regs)
|
|
{
|
|
unsigned long orig_ret_vaddr;
|
|
|
|
orig_ret_vaddr = procedure_link_pointer(regs);
|
|
/* Replace the return addr with trampoline addr */
|
|
procedure_link_pointer_set(regs, trampoline_vaddr);
|
|
|
|
return orig_ret_vaddr;
|
|
}
|
|
|
|
int arch_uprobe_exception_notify(struct notifier_block *self,
|
|
unsigned long val, void *data)
|
|
{
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static int uprobe_breakpoint_handler(struct pt_regs *regs,
|
|
unsigned long esr)
|
|
{
|
|
if (uprobe_pre_sstep_notifier(regs))
|
|
return DBG_HOOK_HANDLED;
|
|
|
|
return DBG_HOOK_ERROR;
|
|
}
|
|
|
|
static int uprobe_single_step_handler(struct pt_regs *regs,
|
|
unsigned long esr)
|
|
{
|
|
struct uprobe_task *utask = current->utask;
|
|
|
|
WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
|
|
if (uprobe_post_sstep_notifier(regs))
|
|
return DBG_HOOK_HANDLED;
|
|
|
|
return DBG_HOOK_ERROR;
|
|
}
|
|
|
|
/* uprobe breakpoint handler hook */
|
|
static struct break_hook uprobes_break_hook = {
|
|
.imm = UPROBES_BRK_IMM,
|
|
.fn = uprobe_breakpoint_handler,
|
|
};
|
|
|
|
/* uprobe single step handler hook */
|
|
static struct step_hook uprobes_step_hook = {
|
|
.fn = uprobe_single_step_handler,
|
|
};
|
|
|
|
static int __init arch_init_uprobes(void)
|
|
{
|
|
register_user_break_hook(&uprobes_break_hook);
|
|
register_user_step_hook(&uprobes_step_hook);
|
|
|
|
return 0;
|
|
}
|
|
|
|
device_initcall(arch_init_uprobes);
|