mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00

- Implement arm64 build time sorting of the mcount location table When gcc is used to build arm64, the mcount_loc section is all zeros in the vmlinux elf file. The addresses are stored in the Elf_Rela location. To sort at build time, an array is allocated and the addresses are added to it via the content of the mcount_loc section as well as he Elf_Rela data. After sorting, the information is put back into the Elf_Rela which now has the section sorted. - Make sorting of mcount location table for arm64 work with clang as well When clang is used, the mcount_loc section contains the addresses, unlike the gcc build. An array is still created and the sorting works for both methods. - Remove weak functions from the mcount_loc section Have the sorttable code pass in the data of functions defined via nm -S which shows the functions as well as their sizes. Using this information the sorttable code can determine if a function in the mcount_loc section was weak and overridden. If the function is not found, it is set to be zero. On boot, when the mcount_loc section is read and the ftrace table is created, if the address in the mcount_loc is not in the kernel core text then it is removed and not added to the ftrace_filter_functions (the functions that can be attached by ftrace callbacks). - Update and fix the reporting of how much data is used for ftrace functions On boot, a report of how many pages were used by the ftrace table as well as how they were grouped (the table holds a list of sections that are groups of pages that were able to be allocated). The removing of the weak functions required the accounting to be updated. -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCZ+MnThQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qivsAQDhPOCaONai7rvHX9T1aOHGjdajZ7SI qoZgBOsc2ZUkoQD/U2M/m7Yof9aR4I+VFKtT5NsAwpfqPSOL/t/1j6UEOQ8= =45AV -----END PGP SIGNATURE----- Merge tag 'trace-sorttable-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace Pull tracing / sorttable updates from Steven Rostedt: - Implement arm64 build time sorting of the mcount location table When gcc is used to build arm64, the mcount_loc section is all zeros in the vmlinux elf file. The addresses are stored in the Elf_Rela location. To sort at build time, an array is allocated and the addresses are added to it via the content of the mcount_loc section as well as he Elf_Rela data. After sorting, the information is put back into the Elf_Rela which now has the section sorted. - Make sorting of mcount location table for arm64 work with clang as well When clang is used, the mcount_loc section contains the addresses, unlike the gcc build. An array is still created and the sorting works for both methods. - Remove weak functions from the mcount_loc section Have the sorttable code pass in the data of functions defined via 'nm -S' which shows the functions as well as their sizes. Using this information the sorttable code can determine if a function in the mcount_loc section was weak and overridden. If the function is not found, it is set to be zero. On boot, when the mcount_loc section is read and the ftrace table is created, if the address in the mcount_loc is not in the kernel core text then it is removed and not added to the ftrace_filter_functions (the functions that can be attached by ftrace callbacks). - Update and fix the reporting of how much data is used for ftrace functions On boot, a report of how many pages were used by the ftrace table as well as how they were grouped (the table holds a list of sections that are groups of pages that were able to be allocated). The removing of the weak functions required the accounting to be updated. * tag 'trace-sorttable-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: scripts/sorttable: Allow matches to functions before function entry scripts/sorttable: Use normal sort if theres no relocs in the mcount section ftrace: Check against is_kernel_text() instead of kaslr_offset() ftrace: Test mcount_loc addr before calling ftrace_call_addr() ftrace: Have ftrace pages output reflect freed pages ftrace: Update the mcount_loc check of skipped entries scripts/sorttable: Zero out weak functions in mcount_loc table scripts/sorttable: Always use an array for the mcount_loc sorting scripts/sorttable: Have mcount rela sort use direct values arm64: scripts/sorttable: Implement sorting mcount_loc at boot for arm64
312 lines
7.6 KiB
Bash
Executable File
312 lines
7.6 KiB
Bash
Executable File
#!/bin/sh
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
#
|
|
# link vmlinux
|
|
#
|
|
# vmlinux is linked from the objects in vmlinux.a and $(KBUILD_VMLINUX_LIBS).
|
|
# vmlinux.a contains objects that are linked unconditionally.
|
|
# $(KBUILD_VMLINUX_LIBS) are archives which are linked conditionally
|
|
# (not within --whole-archive), and do not require symbol indexes added.
|
|
#
|
|
# vmlinux
|
|
# ^
|
|
# |
|
|
# +--< vmlinux.a
|
|
# |
|
|
# +--< $(KBUILD_VMLINUX_LIBS)
|
|
# | +--< lib/lib.a + more
|
|
# |
|
|
# +-< ${kallsymso} (see description in KALLSYMS section)
|
|
#
|
|
# vmlinux version (uname -v) cannot be updated during normal
|
|
# descending-into-subdirs phase since we do not yet know if we need to
|
|
# update vmlinux.
|
|
# Therefore this step is delayed until just before final link of vmlinux.
|
|
#
|
|
# System.map is generated to document addresses of all kernel symbols
|
|
|
|
# Error out on error
|
|
set -e
|
|
|
|
LD="$1"
|
|
KBUILD_LDFLAGS="$2"
|
|
LDFLAGS_vmlinux="$3"
|
|
|
|
is_enabled() {
|
|
grep -q "^$1=y" include/config/auto.conf
|
|
}
|
|
|
|
# Nice output in kbuild format
|
|
# Will be supressed by "make -s"
|
|
info()
|
|
{
|
|
printf " %-7s %s\n" "${1}" "${2}"
|
|
}
|
|
|
|
# Link of vmlinux
|
|
# ${1} - output file
|
|
vmlinux_link()
|
|
{
|
|
local output=${1}
|
|
local objs
|
|
local libs
|
|
local ld
|
|
local ldflags
|
|
local ldlibs
|
|
|
|
info LD ${output}
|
|
|
|
# skip output file argument
|
|
shift
|
|
|
|
if is_enabled CONFIG_LTO_CLANG || is_enabled CONFIG_X86_KERNEL_IBT; then
|
|
# Use vmlinux.o instead of performing the slow LTO link again.
|
|
objs=vmlinux.o
|
|
libs=
|
|
else
|
|
objs=vmlinux.a
|
|
libs="${KBUILD_VMLINUX_LIBS}"
|
|
fi
|
|
|
|
if is_enabled CONFIG_GENERIC_BUILTIN_DTB; then
|
|
objs="${objs} .builtin-dtbs.o"
|
|
fi
|
|
|
|
if is_enabled CONFIG_MODULES; then
|
|
objs="${objs} .vmlinux.export.o"
|
|
fi
|
|
|
|
objs="${objs} init/version-timestamp.o"
|
|
|
|
if [ "${SRCARCH}" = "um" ]; then
|
|
wl=-Wl,
|
|
ld="${CC}"
|
|
ldflags="${CFLAGS_vmlinux}"
|
|
ldlibs="-lutil -lrt -lpthread"
|
|
else
|
|
wl=
|
|
ld="${LD}"
|
|
ldflags="${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux}"
|
|
ldlibs=
|
|
fi
|
|
|
|
ldflags="${ldflags} ${wl}--script=${objtree}/${KBUILD_LDS}"
|
|
|
|
# The kallsyms linking does not need debug symbols included.
|
|
if [ -n "${strip_debug}" ] ; then
|
|
ldflags="${ldflags} ${wl}--strip-debug"
|
|
fi
|
|
|
|
if is_enabled CONFIG_VMLINUX_MAP; then
|
|
ldflags="${ldflags} ${wl}-Map=${output}.map"
|
|
fi
|
|
|
|
${ld} ${ldflags} -o ${output} \
|
|
${wl}--whole-archive ${objs} ${wl}--no-whole-archive \
|
|
${wl}--start-group ${libs} ${wl}--end-group \
|
|
${kallsymso} ${btf_vmlinux_bin_o} ${arch_vmlinux_o} ${ldlibs}
|
|
}
|
|
|
|
# generate .BTF typeinfo from DWARF debuginfo
|
|
# ${1} - vmlinux image
|
|
gen_btf()
|
|
{
|
|
local btf_data=${1}.btf.o
|
|
|
|
info BTF "${btf_data}"
|
|
LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1}
|
|
|
|
# Create ${btf_data} which contains just .BTF section but no symbols. Add
|
|
# SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
|
|
# deletes all symbols including __start_BTF and __stop_BTF, which will
|
|
# be redefined in the linker script. Add 2>/dev/null to suppress GNU
|
|
# objcopy warnings: "empty loadable segment detected at ..."
|
|
${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
|
|
--strip-all ${1} "${btf_data}" 2>/dev/null
|
|
# Change e_type to ET_REL so that it can be used to link final vmlinux.
|
|
# GNU ld 2.35+ and lld do not allow an ET_EXEC input.
|
|
if is_enabled CONFIG_CPU_BIG_ENDIAN; then
|
|
et_rel='\0\1'
|
|
else
|
|
et_rel='\1\0'
|
|
fi
|
|
printf "${et_rel}" | dd of="${btf_data}" conv=notrunc bs=1 seek=16 status=none
|
|
|
|
btf_vmlinux_bin_o=${btf_data}
|
|
}
|
|
|
|
# Create ${2}.o file with all symbols from the ${1} object file
|
|
kallsyms()
|
|
{
|
|
local kallsymopt;
|
|
|
|
if is_enabled CONFIG_KALLSYMS_ALL; then
|
|
kallsymopt="${kallsymopt} --all-symbols"
|
|
fi
|
|
|
|
info KSYMS "${2}.S"
|
|
scripts/kallsyms ${kallsymopt} "${1}" > "${2}.S"
|
|
|
|
info AS "${2}.o"
|
|
${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
|
|
${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} -c -o "${2}.o" "${2}.S"
|
|
|
|
kallsymso=${2}.o
|
|
}
|
|
|
|
# Perform kallsyms for the given temporary vmlinux.
|
|
sysmap_and_kallsyms()
|
|
{
|
|
mksysmap "${1}" "${1}.syms"
|
|
kallsyms "${1}.syms" "${1}.kallsyms"
|
|
|
|
kallsyms_sysmap=${1}.syms
|
|
}
|
|
|
|
# Create map file with all symbols from ${1}
|
|
# See mksymap for additional details
|
|
mksysmap()
|
|
{
|
|
info NM ${2}
|
|
${NM} -n "${1}" | sed -f "${srctree}/scripts/mksysmap" > "${2}"
|
|
}
|
|
|
|
sorttable()
|
|
{
|
|
${NM} -S ${1} > .tmp_vmlinux.nm-sort
|
|
${objtree}/scripts/sorttable -s .tmp_vmlinux.nm-sort ${1}
|
|
}
|
|
|
|
cleanup()
|
|
{
|
|
rm -f .btf.*
|
|
rm -f .tmp_vmlinux.nm-sort
|
|
rm -f System.map
|
|
rm -f vmlinux
|
|
rm -f vmlinux.map
|
|
}
|
|
|
|
# Use "make V=1" to debug this script
|
|
case "${KBUILD_VERBOSE}" in
|
|
*1*)
|
|
set -x
|
|
;;
|
|
esac
|
|
|
|
if [ "$1" = "clean" ]; then
|
|
cleanup
|
|
exit 0
|
|
fi
|
|
|
|
${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init init/version-timestamp.o
|
|
|
|
arch_vmlinux_o=
|
|
if is_enabled CONFIG_ARCH_WANTS_PRE_LINK_VMLINUX; then
|
|
arch_vmlinux_o=arch/${SRCARCH}/tools/vmlinux.arch.o
|
|
fi
|
|
|
|
btf_vmlinux_bin_o=
|
|
kallsymso=
|
|
strip_debug=
|
|
|
|
if is_enabled CONFIG_KALLSYMS; then
|
|
true > .tmp_vmlinux0.syms
|
|
kallsyms .tmp_vmlinux0.syms .tmp_vmlinux0.kallsyms
|
|
fi
|
|
|
|
if is_enabled CONFIG_KALLSYMS || is_enabled CONFIG_DEBUG_INFO_BTF; then
|
|
|
|
# The kallsyms linking does not need debug symbols, but the BTF does.
|
|
if ! is_enabled CONFIG_DEBUG_INFO_BTF; then
|
|
strip_debug=1
|
|
fi
|
|
|
|
vmlinux_link .tmp_vmlinux1
|
|
fi
|
|
|
|
if is_enabled CONFIG_DEBUG_INFO_BTF; then
|
|
if ! gen_btf .tmp_vmlinux1; then
|
|
echo >&2 "Failed to generate BTF for vmlinux"
|
|
echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF"
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
if is_enabled CONFIG_KALLSYMS; then
|
|
|
|
# kallsyms support
|
|
# Generate section listing all symbols and add it into vmlinux
|
|
# It's a four step process:
|
|
# 0) Generate a dummy __kallsyms with empty symbol list.
|
|
# 1) Link .tmp_vmlinux1.kallsyms so it has all symbols and sections,
|
|
# with a dummy __kallsyms.
|
|
# Running kallsyms on that gives us .tmp_vmlinux1.kallsyms.o with
|
|
# the right size
|
|
# 2) Link .tmp_vmlinux2.kallsyms so it now has a __kallsyms section of
|
|
# the right size, but due to the added section, some
|
|
# addresses have shifted.
|
|
# From here, we generate a correct .tmp_vmlinux2.kallsyms.o
|
|
# 3) That link may have expanded the kernel image enough that
|
|
# more linker branch stubs / trampolines had to be added, which
|
|
# introduces new names, which further expands kallsyms. Do another
|
|
# pass if that is the case. In theory it's possible this results
|
|
# in even more stubs, but unlikely.
|
|
# KALLSYMS_EXTRA_PASS=1 may also used to debug or work around
|
|
# other bugs.
|
|
# 4) The correct ${kallsymso} is linked into the final vmlinux.
|
|
#
|
|
# a) Verify that the System.map from vmlinux matches the map from
|
|
# ${kallsymso}.
|
|
|
|
# The kallsyms linking does not need debug symbols included.
|
|
strip_debug=1
|
|
|
|
sysmap_and_kallsyms .tmp_vmlinux1
|
|
size1=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" ${kallsymso})
|
|
|
|
vmlinux_link .tmp_vmlinux2
|
|
sysmap_and_kallsyms .tmp_vmlinux2
|
|
size2=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" ${kallsymso})
|
|
|
|
if [ $size1 -ne $size2 ] || [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
|
|
vmlinux_link .tmp_vmlinux3
|
|
sysmap_and_kallsyms .tmp_vmlinux3
|
|
fi
|
|
fi
|
|
|
|
strip_debug=
|
|
|
|
vmlinux_link vmlinux
|
|
|
|
# fill in BTF IDs
|
|
if is_enabled CONFIG_DEBUG_INFO_BTF; then
|
|
info BTFIDS vmlinux
|
|
RESOLVE_BTFIDS_ARGS=""
|
|
if is_enabled CONFIG_WERROR; then
|
|
RESOLVE_BTFIDS_ARGS=" --fatal_warnings "
|
|
fi
|
|
${RESOLVE_BTFIDS} ${RESOLVE_BTFIDS_ARGS} vmlinux
|
|
fi
|
|
|
|
mksysmap vmlinux System.map
|
|
|
|
if is_enabled CONFIG_BUILDTIME_TABLE_SORT; then
|
|
info SORTTAB vmlinux
|
|
if ! sorttable vmlinux; then
|
|
echo >&2 Failed to sort kernel tables
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# step a (see comment above)
|
|
if is_enabled CONFIG_KALLSYMS; then
|
|
if ! cmp -s System.map "${kallsyms_sysmap}"; then
|
|
echo >&2 Inconsistent kallsyms data
|
|
echo >&2 'Try "make KALLSYMS_EXTRA_PASS=1" as a workaround'
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# For fixdep
|
|
echo "vmlinux: $0" > .vmlinux.d
|