mirror of
https://github.com/torvalds/linux.git
synced 2025-04-12 16:47:42 +00:00

Several architectures support text patching, but they name the header files that declare patching functions differently. Make all such headers consistently named text-patching.h and add an empty header in asm-generic for architectures that do not support text patching. Link: https://lkml.kernel.org/r/20241023162711.2579610-4-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> # m68k Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Luis Chamberlain <mcgrof@kernel.org> Tested-by: kdevops <kdevops@lists.linux.dev> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Borislav Petkov (AMD) <bp@alien8.de> Cc: Brian Cain <bcain@quicinc.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Guo Ren <guoren@kernel.org> Cc: Helge Deller <deller@gmx.de> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Liam R. Howlett <Liam.Howlett@Oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu (Google) <mhiramat@kernel.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Song Liu <song@kernel.org> Cc: Stafford Horne <shorne@gmail.com> Cc: Steven Rostedt (Google) <rostedt@goodmis.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
342 lines
7.9 KiB
C
342 lines
7.9 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* This file contains the routines for TLB flushing.
|
|
* On machines where the MMU does not use a hash table to store virtual to
|
|
* physical translations (ie, SW loaded TLBs or Book3E compilant processors,
|
|
* this does -not- include 603 however which shares the implementation with
|
|
* hash based processors)
|
|
*
|
|
* -- BenH
|
|
*
|
|
* Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
|
|
* IBM Corp.
|
|
*
|
|
* Derived from arch/ppc/mm/init.c:
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
*
|
|
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
|
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
|
* Copyright (C) 1996 Paul Mackerras
|
|
*
|
|
* Derived from "arch/i386/mm/init.c"
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/init.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/text-patching.h>
|
|
#include <asm/cputhreads.h>
|
|
#include <asm/hugetlb.h>
|
|
#include <asm/paca.h>
|
|
|
|
#include <mm/mmu_decl.h>
|
|
|
|
/*
|
|
* This struct lists the sw-supported page sizes. The hardawre MMU may support
|
|
* other sizes not listed here. The .ind field is only used on MMUs that have
|
|
* indirect page table entries.
|
|
*/
|
|
#ifdef CONFIG_PPC_E500
|
|
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
|
|
[MMU_PAGE_4K] = {
|
|
.shift = 12,
|
|
},
|
|
[MMU_PAGE_2M] = {
|
|
.shift = 21,
|
|
},
|
|
[MMU_PAGE_4M] = {
|
|
.shift = 22,
|
|
},
|
|
[MMU_PAGE_16M] = {
|
|
.shift = 24,
|
|
},
|
|
[MMU_PAGE_64M] = {
|
|
.shift = 26,
|
|
},
|
|
[MMU_PAGE_256M] = {
|
|
.shift = 28,
|
|
},
|
|
[MMU_PAGE_1G] = {
|
|
.shift = 30,
|
|
},
|
|
};
|
|
|
|
static inline int mmu_get_tsize(int psize)
|
|
{
|
|
return mmu_psize_defs[psize].shift - 10;
|
|
}
|
|
#else
|
|
static inline int mmu_get_tsize(int psize)
|
|
{
|
|
/* This isn't used on !Book3E for now */
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_8xx
|
|
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
|
|
[MMU_PAGE_4K] = {
|
|
.shift = 12,
|
|
},
|
|
[MMU_PAGE_16K] = {
|
|
.shift = 14,
|
|
},
|
|
[MMU_PAGE_512K] = {
|
|
.shift = 19,
|
|
},
|
|
[MMU_PAGE_8M] = {
|
|
.shift = 23,
|
|
},
|
|
};
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_E500
|
|
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
|
|
DEFINE_PER_CPU(int, next_tlbcam_idx);
|
|
EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
|
|
#endif
|
|
|
|
/*
|
|
* Base TLB flushing operations:
|
|
*
|
|
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
|
* - flush_tlb_page(vma, vmaddr) flushes one page
|
|
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
|
* - flush_tlb_kernel_range(start, end) flushes kernel pages
|
|
*
|
|
* - local_* variants of page and mm only apply to the current
|
|
* processor
|
|
*/
|
|
|
|
#ifndef CONFIG_PPC_8xx
|
|
/*
|
|
* These are the base non-SMP variants of page and mm flushing
|
|
*/
|
|
void local_flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
unsigned int pid;
|
|
|
|
preempt_disable();
|
|
pid = mm->context.id;
|
|
if (pid != MMU_NO_CONTEXT)
|
|
_tlbil_pid(pid);
|
|
preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(local_flush_tlb_mm);
|
|
|
|
void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
|
int tsize, int ind)
|
|
{
|
|
unsigned int pid;
|
|
|
|
preempt_disable();
|
|
pid = mm ? mm->context.id : 0;
|
|
if (pid != MMU_NO_CONTEXT)
|
|
_tlbil_va(vmaddr, pid, tsize, ind);
|
|
preempt_enable();
|
|
}
|
|
|
|
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
|
{
|
|
__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
|
|
mmu_get_tsize(mmu_virtual_psize), 0);
|
|
}
|
|
EXPORT_SYMBOL(local_flush_tlb_page);
|
|
|
|
void local_flush_tlb_page_psize(struct mm_struct *mm,
|
|
unsigned long vmaddr, int psize)
|
|
{
|
|
__local_flush_tlb_page(mm, vmaddr, mmu_get_tsize(psize), 0);
|
|
}
|
|
EXPORT_SYMBOL(local_flush_tlb_page_psize);
|
|
|
|
#endif
|
|
|
|
/*
|
|
* And here are the SMP non-local implementations
|
|
*/
|
|
#ifdef CONFIG_SMP
|
|
|
|
static DEFINE_RAW_SPINLOCK(tlbivax_lock);
|
|
|
|
struct tlb_flush_param {
|
|
unsigned long addr;
|
|
unsigned int pid;
|
|
unsigned int tsize;
|
|
unsigned int ind;
|
|
};
|
|
|
|
static void do_flush_tlb_mm_ipi(void *param)
|
|
{
|
|
struct tlb_flush_param *p = param;
|
|
|
|
_tlbil_pid(p ? p->pid : 0);
|
|
}
|
|
|
|
static void do_flush_tlb_page_ipi(void *param)
|
|
{
|
|
struct tlb_flush_param *p = param;
|
|
|
|
_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
|
|
}
|
|
|
|
|
|
/* Note on invalidations and PID:
|
|
*
|
|
* We snapshot the PID with preempt disabled. At this point, it can still
|
|
* change either because:
|
|
* - our context is being stolen (PID -> NO_CONTEXT) on another CPU
|
|
* - we are invaliating some target that isn't currently running here
|
|
* and is concurrently acquiring a new PID on another CPU
|
|
* - some other CPU is re-acquiring a lost PID for this mm
|
|
* etc...
|
|
*
|
|
* However, this shouldn't be a problem as we only guarantee
|
|
* invalidation of TLB entries present prior to this call, so we
|
|
* don't care about the PID changing, and invalidating a stale PID
|
|
* is generally harmless.
|
|
*/
|
|
|
|
void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
unsigned int pid;
|
|
|
|
preempt_disable();
|
|
pid = mm->context.id;
|
|
if (unlikely(pid == MMU_NO_CONTEXT))
|
|
goto no_context;
|
|
if (!mm_is_core_local(mm)) {
|
|
struct tlb_flush_param p = { .pid = pid };
|
|
/* Ignores smp_processor_id() even if set. */
|
|
smp_call_function_many(mm_cpumask(mm),
|
|
do_flush_tlb_mm_ipi, &p, 1);
|
|
}
|
|
_tlbil_pid(pid);
|
|
no_context:
|
|
preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(flush_tlb_mm);
|
|
|
|
void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
|
int tsize, int ind)
|
|
{
|
|
struct cpumask *cpu_mask;
|
|
unsigned int pid;
|
|
|
|
/*
|
|
* This function as well as __local_flush_tlb_page() must only be called
|
|
* for user contexts.
|
|
*/
|
|
if (WARN_ON(!mm))
|
|
return;
|
|
|
|
preempt_disable();
|
|
pid = mm->context.id;
|
|
if (unlikely(pid == MMU_NO_CONTEXT))
|
|
goto bail;
|
|
cpu_mask = mm_cpumask(mm);
|
|
if (!mm_is_core_local(mm)) {
|
|
/* If broadcast tlbivax is supported, use it */
|
|
if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
|
|
int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
|
|
if (lock)
|
|
raw_spin_lock(&tlbivax_lock);
|
|
_tlbivax_bcast(vmaddr, pid, tsize, ind);
|
|
if (lock)
|
|
raw_spin_unlock(&tlbivax_lock);
|
|
goto bail;
|
|
} else {
|
|
struct tlb_flush_param p = {
|
|
.pid = pid,
|
|
.addr = vmaddr,
|
|
.tsize = tsize,
|
|
.ind = ind,
|
|
};
|
|
/* Ignores smp_processor_id() even if set in cpu_mask */
|
|
smp_call_function_many(cpu_mask,
|
|
do_flush_tlb_page_ipi, &p, 1);
|
|
}
|
|
}
|
|
_tlbil_va(vmaddr, pid, tsize, ind);
|
|
bail:
|
|
preempt_enable();
|
|
}
|
|
|
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
|
{
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
if (vma && is_vm_hugetlb_page(vma))
|
|
flush_hugetlb_page(vma, vmaddr);
|
|
#endif
|
|
|
|
__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
|
|
mmu_get_tsize(mmu_virtual_psize), 0);
|
|
}
|
|
EXPORT_SYMBOL(flush_tlb_page);
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/*
|
|
* Flush kernel TLB entries in the given range
|
|
*/
|
|
#ifndef CONFIG_PPC_8xx
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
preempt_disable();
|
|
smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
|
|
_tlbil_pid(0);
|
|
preempt_enable();
|
|
#else
|
|
_tlbil_pid(0);
|
|
#endif
|
|
}
|
|
EXPORT_SYMBOL(flush_tlb_kernel_range);
|
|
#endif
|
|
|
|
/*
|
|
* Currently, for range flushing, we just do a full mm flush. This should
|
|
* be optimized based on a threshold on the size of the range, since
|
|
* some implementation can stack multiple tlbivax before a tlbsync but
|
|
* for now, we keep it that way
|
|
*/
|
|
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end)
|
|
|
|
{
|
|
if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK))
|
|
flush_tlb_page(vma, start);
|
|
else
|
|
flush_tlb_mm(vma->vm_mm);
|
|
}
|
|
EXPORT_SYMBOL(flush_tlb_range);
|
|
|
|
void tlb_flush(struct mmu_gather *tlb)
|
|
{
|
|
flush_tlb_mm(tlb->mm);
|
|
}
|
|
|
|
#ifndef CONFIG_PPC64
|
|
void __init early_init_mmu(void)
|
|
{
|
|
unsigned long root = of_get_flat_dt_root();
|
|
|
|
if (IS_ENABLED(CONFIG_PPC_47x) && IS_ENABLED(CONFIG_SMP) &&
|
|
of_get_flat_dt_prop(root, "cooperative-partition", NULL))
|
|
mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
|
|
}
|
|
#endif /* CONFIG_PPC64 */
|