mirror of
https://github.com/torvalds/linux.git
synced 2025-04-12 16:47:42 +00:00
Merge patch series "riscv: Add runtime constant support"
Charlie Jenkins <charlie@rivosinc.com> says: Ard brought this to my attention in this patch [1]. I benchmarked this patch on the Nezha D1 (which does not contain Zba or Zbkb so it uses the default algorithm) by navigating through a large directory structure. I created a 1000-deep directory structure and then cd and ls through it. With this patch there was a 0.57% performance improvement. [1] https://lore.kernel.org/lkml/CAMj1kXE4DJnwFejNWQu784GvyJO=aGNrzuLjSxiowX_e7nW8QA@mail.gmail.com/ * patches from https://lore.kernel.org/r/20250319-runtime_const_riscv-v10-0-745b31a11d65@rivosinc.com: riscv: Add runtime constant support riscv: Move nop definition to insn-def.h Link: https://lore.kernel.org/linux-riscv/20250319-runtime_const_riscv-v10-0-745b31a11d65@rivosinc.com/ Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
This commit is contained in:
commit
74f4bf9d15
@ -785,6 +785,28 @@ config RISCV_ISA_ZBC
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config TOOLCHAIN_HAS_ZBKB
|
||||
bool
|
||||
default y
|
||||
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zbkb)
|
||||
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zbkb)
|
||||
depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900
|
||||
depends on AS_HAS_OPTION_ARCH
|
||||
|
||||
config RISCV_ISA_ZBKB
|
||||
bool "Zbkb extension support for bit manipulation instructions"
|
||||
depends on TOOLCHAIN_HAS_ZBKB
|
||||
depends on RISCV_ALTERNATIVE
|
||||
default y
|
||||
help
|
||||
Adds support to dynamically detect the presence of the ZBKB
|
||||
extension (bit manipulation for cryptography) and enable its usage.
|
||||
|
||||
The Zbkb extension provides instructions to accelerate a number
|
||||
of common cryptography operations (pack, zip, etc).
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config RISCV_ISA_ZICBOM
|
||||
bool "Zicbom extension support for non-coherent DMA operation"
|
||||
depends on MMU
|
||||
|
@ -27,6 +27,7 @@
|
||||
#define REG_ASM __REG_SEL(.dword, .word)
|
||||
#define SZREG __REG_SEL(8, 4)
|
||||
#define LGREG __REG_SEL(3, 2)
|
||||
#define SRLI __REG_SEL(srliw, srli)
|
||||
|
||||
#if __SIZEOF_POINTER__ == 8
|
||||
#ifdef __ASSEMBLY__
|
||||
|
@ -79,7 +79,6 @@ struct dyn_arch_ftrace {
|
||||
#define AUIPC_RA (0x00000097)
|
||||
#define JALR_T0 (0x000282e7)
|
||||
#define AUIPC_T0 (0x00000297)
|
||||
#define NOP4 (0x00000013)
|
||||
|
||||
#define to_jalr_t0(offset) \
|
||||
(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
|
||||
|
@ -199,5 +199,8 @@
|
||||
#define RISCV_PAUSE ".4byte 0x100000f"
|
||||
#define ZAWRS_WRS_NTO ".4byte 0x00d00073"
|
||||
#define ZAWRS_WRS_STO ".4byte 0x01d00073"
|
||||
#define RISCV_NOP4 ".4byte 0x00000013"
|
||||
|
||||
#define RISCV_INSN_NOP4 _AC(0x00000013, U)
|
||||
|
||||
#endif /* __ASM_INSN_DEF_H */
|
||||
|
265
arch/riscv/include/asm/runtime-const.h
Normal file
265
arch/riscv/include/asm/runtime-const.h
Normal file
@ -0,0 +1,265 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_RISCV_RUNTIME_CONST_H
|
||||
#define _ASM_RISCV_RUNTIME_CONST_H
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/insn-def.h>
|
||||
#include <linux/memory.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
#define runtime_const_ptr(sym) \
|
||||
({ \
|
||||
typeof(sym) __ret; \
|
||||
asm_inline(".option push\n\t" \
|
||||
".option norvc\n\t" \
|
||||
"1:\t" \
|
||||
"lui %[__ret],0x89abd\n\t" \
|
||||
"addi %[__ret],%[__ret],-0x211\n\t" \
|
||||
".option pop\n\t" \
|
||||
".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
|
||||
".long 1b - .\n\t" \
|
||||
".popsection" \
|
||||
: [__ret] "=r" (__ret)); \
|
||||
__ret; \
|
||||
})
|
||||
#else
|
||||
/*
|
||||
* Loading 64-bit constants into a register from immediates is a non-trivial
|
||||
* task on riscv64. To get it somewhat performant, load 32 bits into two
|
||||
* different registers and then combine the results.
|
||||
*
|
||||
* If the processor supports the Zbkb extension, we can combine the final
|
||||
* "slli,slli,srli,add" into the single "pack" instruction. If the processor
|
||||
* doesn't support Zbkb but does support the Zbb extension, we can
|
||||
* combine the final "slli,srli,add" into one instruction "add.uw".
|
||||
*/
|
||||
#define RISCV_RUNTIME_CONST_64_PREAMBLE \
|
||||
".option push\n\t" \
|
||||
".option norvc\n\t" \
|
||||
"1:\t" \
|
||||
"lui %[__ret],0x89abd\n\t" \
|
||||
"lui %[__tmp],0x1234\n\t" \
|
||||
"addiw %[__ret],%[__ret],-0x211\n\t" \
|
||||
"addiw %[__tmp],%[__tmp],0x567\n\t" \
|
||||
|
||||
#define RISCV_RUNTIME_CONST_64_BASE \
|
||||
"slli %[__tmp],%[__tmp],32\n\t" \
|
||||
"slli %[__ret],%[__ret],32\n\t" \
|
||||
"srli %[__ret],%[__ret],32\n\t" \
|
||||
"add %[__ret],%[__ret],%[__tmp]\n\t" \
|
||||
|
||||
#define RISCV_RUNTIME_CONST_64_ZBA \
|
||||
".option push\n\t" \
|
||||
".option arch,+zba\n\t" \
|
||||
"slli %[__tmp],%[__tmp],32\n\t" \
|
||||
"add.uw %[__ret],%[__ret],%[__tmp]\n\t" \
|
||||
"nop\n\t" \
|
||||
"nop\n\t" \
|
||||
".option pop\n\t" \
|
||||
|
||||
#define RISCV_RUNTIME_CONST_64_ZBKB \
|
||||
".option push\n\t" \
|
||||
".option arch,+zbkb\n\t" \
|
||||
"pack %[__ret],%[__ret],%[__tmp]\n\t" \
|
||||
"nop\n\t" \
|
||||
"nop\n\t" \
|
||||
"nop\n\t" \
|
||||
".option pop\n\t" \
|
||||
|
||||
#define RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
|
||||
".option pop\n\t" \
|
||||
".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
|
||||
".long 1b - .\n\t" \
|
||||
".popsection" \
|
||||
|
||||
#if defined(CONFIG_RISCV_ISA_ZBA) && defined(CONFIG_RISCV_ISA_ZBKB)
|
||||
#define runtime_const_ptr(sym) \
|
||||
({ \
|
||||
typeof(sym) __ret, __tmp; \
|
||||
asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
|
||||
ALTERNATIVE_2( \
|
||||
RISCV_RUNTIME_CONST_64_BASE, \
|
||||
RISCV_RUNTIME_CONST_64_ZBA, \
|
||||
0, RISCV_ISA_EXT_ZBA, 1, \
|
||||
RISCV_RUNTIME_CONST_64_ZBKB, \
|
||||
0, RISCV_ISA_EXT_ZBKB, 1 \
|
||||
) \
|
||||
RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
|
||||
: [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
|
||||
__ret; \
|
||||
})
|
||||
#elif defined(CONFIG_RISCV_ISA_ZBA)
|
||||
#define runtime_const_ptr(sym) \
|
||||
({ \
|
||||
typeof(sym) __ret, __tmp; \
|
||||
asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
|
||||
ALTERNATIVE( \
|
||||
RISCV_RUNTIME_CONST_64_BASE, \
|
||||
RISCV_RUNTIME_CONST_64_ZBA, \
|
||||
0, RISCV_ISA_EXT_ZBA, 1 \
|
||||
) \
|
||||
RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
|
||||
: [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
|
||||
__ret; \
|
||||
})
|
||||
#elif defined(CONFIG_RISCV_ISA_ZBKB)
|
||||
#define runtime_const_ptr(sym) \
|
||||
({ \
|
||||
typeof(sym) __ret, __tmp; \
|
||||
asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
|
||||
ALTERNATIVE( \
|
||||
RISCV_RUNTIME_CONST_64_BASE, \
|
||||
RISCV_RUNTIME_CONST_64_ZBKB, \
|
||||
0, RISCV_ISA_EXT_ZBKB, 1 \
|
||||
) \
|
||||
RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
|
||||
: [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
|
||||
__ret; \
|
||||
})
|
||||
#else
|
||||
#define runtime_const_ptr(sym) \
|
||||
({ \
|
||||
typeof(sym) __ret, __tmp; \
|
||||
asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
|
||||
RISCV_RUNTIME_CONST_64_BASE \
|
||||
RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
|
||||
: [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
|
||||
__ret; \
|
||||
})
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define runtime_const_shift_right_32(val, sym) \
|
||||
({ \
|
||||
u32 __ret; \
|
||||
asm_inline(".option push\n\t" \
|
||||
".option norvc\n\t" \
|
||||
"1:\t" \
|
||||
SRLI " %[__ret],%[__val],12\n\t" \
|
||||
".option pop\n\t" \
|
||||
".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
|
||||
".long 1b - .\n\t" \
|
||||
".popsection" \
|
||||
: [__ret] "=r" (__ret) \
|
||||
: [__val] "r" (val)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define runtime_const_init(type, sym) do { \
|
||||
extern s32 __start_runtime_##type##_##sym[]; \
|
||||
extern s32 __stop_runtime_##type##_##sym[]; \
|
||||
\
|
||||
runtime_const_fixup(__runtime_fixup_##type, \
|
||||
(unsigned long)(sym), \
|
||||
__start_runtime_##type##_##sym, \
|
||||
__stop_runtime_##type##_##sym); \
|
||||
} while (0)
|
||||
|
||||
static inline void __runtime_fixup_caches(void *where, unsigned int insns)
|
||||
{
|
||||
/* On riscv there are currently only cache-wide flushes so va is ignored. */
|
||||
__always_unused uintptr_t va = (uintptr_t)where;
|
||||
|
||||
flush_icache_range(va, va + 4 * insns);
|
||||
}
|
||||
|
||||
/*
|
||||
* The 32-bit immediate is stored in a lui+addi pairing.
|
||||
* lui holds the upper 20 bits of the immediate in the first 20 bits of the instruction.
|
||||
* addi holds the lower 12 bits of the immediate in the first 12 bits of the instruction.
|
||||
*/
|
||||
static inline void __runtime_fixup_32(__le16 *lui_parcel, __le16 *addi_parcel, unsigned int val)
|
||||
{
|
||||
unsigned int lower_immediate, upper_immediate;
|
||||
u32 lui_insn, addi_insn, addi_insn_mask;
|
||||
__le32 lui_res, addi_res;
|
||||
|
||||
/* Mask out upper 12 bit of addi */
|
||||
addi_insn_mask = 0x000fffff;
|
||||
|
||||
lui_insn = (u32)le16_to_cpu(lui_parcel[0]) | (u32)le16_to_cpu(lui_parcel[1]) << 16;
|
||||
addi_insn = (u32)le16_to_cpu(addi_parcel[0]) | (u32)le16_to_cpu(addi_parcel[1]) << 16;
|
||||
|
||||
lower_immediate = sign_extend32(val, 11);
|
||||
upper_immediate = (val - lower_immediate);
|
||||
|
||||
if (upper_immediate & 0xfffff000) {
|
||||
/* replace upper 20 bits of lui with upper immediate */
|
||||
lui_insn &= 0x00000fff;
|
||||
lui_insn |= upper_immediate & 0xfffff000;
|
||||
} else {
|
||||
/* replace lui with nop if immediate is small enough to fit in addi */
|
||||
lui_insn = RISCV_INSN_NOP4;
|
||||
/*
|
||||
* lui is being skipped, so do a load instead of an add. A load
|
||||
* is performed by adding with the x0 register. Setting rs to
|
||||
* zero with the following mask will accomplish this goal.
|
||||
*/
|
||||
addi_insn_mask &= 0x07fff;
|
||||
}
|
||||
|
||||
if (lower_immediate & 0x00000fff) {
|
||||
/* replace upper 12 bits of addi with lower 12 bits of val */
|
||||
addi_insn &= addi_insn_mask;
|
||||
addi_insn |= (lower_immediate & 0x00000fff) << 20;
|
||||
} else {
|
||||
/* replace addi with nop if lower_immediate is empty */
|
||||
addi_insn = RISCV_INSN_NOP4;
|
||||
}
|
||||
|
||||
addi_res = cpu_to_le32(addi_insn);
|
||||
lui_res = cpu_to_le32(lui_insn);
|
||||
mutex_lock(&text_mutex);
|
||||
patch_insn_write(addi_parcel, &addi_res, sizeof(addi_res));
|
||||
patch_insn_write(lui_parcel, &lui_res, sizeof(lui_res));
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
static inline void __runtime_fixup_ptr(void *where, unsigned long val)
|
||||
{
|
||||
#ifdef CONFIG_32BIT
|
||||
__runtime_fixup_32(where, where + 4, val);
|
||||
__runtime_fixup_caches(where, 2);
|
||||
#else
|
||||
__runtime_fixup_32(where, where + 8, val);
|
||||
__runtime_fixup_32(where + 4, where + 12, val >> 32);
|
||||
__runtime_fixup_caches(where, 4);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Replace the least significant 5 bits of the srli/srliw immediate that is
|
||||
* located at bits 20-24
|
||||
*/
|
||||
static inline void __runtime_fixup_shift(void *where, unsigned long val)
|
||||
{
|
||||
__le16 *parcel = where;
|
||||
__le32 res;
|
||||
u32 insn;
|
||||
|
||||
insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
|
||||
|
||||
insn &= 0xfe0fffff;
|
||||
insn |= (val & 0b11111) << 20;
|
||||
|
||||
res = cpu_to_le32(insn);
|
||||
mutex_lock(&text_mutex);
|
||||
patch_text_nosync(where, &res, sizeof(insn));
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
|
||||
unsigned long val, s32 *start, s32 *end)
|
||||
{
|
||||
while (start < end) {
|
||||
fn(*start + (void *)start, val);
|
||||
start++;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _ASM_RISCV_RUNTIME_CONST_H */
|
@ -36,7 +36,7 @@ static int ftrace_check_current_call(unsigned long hook_pos,
|
||||
unsigned int *expected)
|
||||
{
|
||||
unsigned int replaced[2];
|
||||
unsigned int nops[2] = {NOP4, NOP4};
|
||||
unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
|
||||
|
||||
/* we expect nops at the hook position */
|
||||
if (!expected)
|
||||
@ -68,7 +68,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
|
||||
bool enable, bool ra)
|
||||
{
|
||||
unsigned int call[2];
|
||||
unsigned int nops[2] = {NOP4, NOP4};
|
||||
unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
|
||||
|
||||
if (ra)
|
||||
make_call_ra(hook_pos, target, call);
|
||||
@ -97,7 +97,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
unsigned int nops[2] = {NOP4, NOP4};
|
||||
unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
|
||||
|
||||
if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
|
||||
return -EPERM;
|
||||
|
@ -11,8 +11,8 @@
|
||||
#include <asm/bug.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/insn-def.h>
|
||||
|
||||
#define RISCV_INSN_NOP 0x00000013U
|
||||
#define RISCV_INSN_JAL 0x0000006fU
|
||||
|
||||
bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
||||
@ -33,7 +33,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
||||
(((u32)offset & GENMASK(10, 1)) << (21 - 1)) |
|
||||
(((u32)offset & GENMASK(20, 20)) << (31 - 20));
|
||||
} else {
|
||||
insn = RISCV_INSN_NOP;
|
||||
insn = RISCV_INSN_NOP4;
|
||||
}
|
||||
|
||||
if (early_boot_irqs_disabled) {
|
||||
|
@ -97,6 +97,9 @@ SECTIONS
|
||||
{
|
||||
EXIT_DATA
|
||||
}
|
||||
|
||||
RUNTIME_CONST_VARIABLES
|
||||
|
||||
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||
|
||||
.rel.dyn : {
|
||||
|
Loading…
x
Reference in New Issue
Block a user