mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00
Linux 6.14-rc6
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmfOKBUeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG1aQH/iC+Oyij4VxAjBek BOXIT/p6CwlIXb8ObiWWcRjDPizlcxb3RaV8J2RO+IqaQ2wltxpFANq2G7Re2FPm SNcEpIURAOVcxHGedcfFA91srO5F4FzNTO8LVp7MIbcgMYy3pdk+dbZmi6A691R+ t9pb74m+MAnF1o/MUx7pUlhAT/4ymuuR0F7WCSg4h0Xwe5m0nlJY89kJBC7PCjyd n3mdhsz3rDSLmt/z/T7HGD89r8sYSvm9cOKtL3ELgGTrm7boQV8ii9Y9w04DI8PQ JmIernugcCxmhH36mVUAHgJf2+/T388xFUh/D5+skeUOUZpaJZG866rnb32WpsHc eWLFUeg= =Wypt -----END PGP SIGNATURE----- Backmerge tag 'v6.14-rc6' into drm-next This is a backmerge from Linux 6.14-rc6, needed for the nova PR. Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
commit
626fb11566
3
.mailmap
3
.mailmap
@ -88,7 +88,6 @@ Antonio Quartulli <antonio@mandelbit.com> <antonio@open-mesh.com>
|
||||
Antonio Quartulli <antonio@mandelbit.com> <antonio.quartulli@open-mesh.com>
|
||||
Antonio Quartulli <antonio@mandelbit.com> <ordex@autistici.org>
|
||||
Antonio Quartulli <antonio@mandelbit.com> <ordex@ritirata.org>
|
||||
Antonio Quartulli <antonio@mandelbit.com> <antonio@openvpn.net>
|
||||
Antonio Quartulli <antonio@mandelbit.com> <a@unstable.cc>
|
||||
Anup Patel <anup@brainfault.org> <anup.patel@wdc.com>
|
||||
Archit Taneja <archit@ti.com>
|
||||
@ -524,6 +523,7 @@ Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
|
||||
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
|
||||
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
|
||||
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
|
||||
Natalie Vock <natalie.vock@gmx.de> <friedrich.vock@gmx.de>
|
||||
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
|
||||
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
|
||||
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
|
||||
@ -693,6 +693,7 @@ Subbaraman Narayanamurthy <quic_subbaram@quicinc.com> <subbaram@codeaurora.org>
|
||||
Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Sudarshan Rajagopalan <quic_sudaraja@quicinc.com> <sudaraja@codeaurora.org>
|
||||
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
|
||||
Sumit Garg <sumit.garg@kernel.org> <sumit.garg@linaro.org>
|
||||
Sumit Semwal <sumit.semwal@ti.com>
|
||||
Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org>
|
||||
Sven Eckelmann <sven@narfation.org> <seckelmann@datto.com>
|
||||
|
@ -176,7 +176,7 @@ Configuring the kernel
|
||||
values without prompting.
|
||||
|
||||
"make defconfig" Create a ./.config file by using the default
|
||||
symbol values from either arch/$ARCH/defconfig
|
||||
symbol values from either arch/$ARCH/configs/defconfig
|
||||
or arch/$ARCH/configs/${PLATFORM}_defconfig,
|
||||
depending on the architecture.
|
||||
|
||||
|
@ -212,6 +212,17 @@ pid>/``).
|
||||
This value defaults to 0.
|
||||
|
||||
|
||||
core_sort_vma
|
||||
=============
|
||||
|
||||
The default coredump writes VMAs in address order. By setting
|
||||
``core_sort_vma`` to 1, VMAs will be written from smallest size
|
||||
to largest size. This is known to break at least elfutils, but
|
||||
can be handy when dealing with very large (and truncated)
|
||||
coredumps where the more useful debugging details are included
|
||||
in the smaller VMAs.
|
||||
|
||||
|
||||
core_uses_pid
|
||||
=============
|
||||
|
||||
|
@ -18,6 +18,7 @@ Introduction
|
||||
both access system memory directly and with the same effective
|
||||
addresses.
|
||||
|
||||
**This driver is deprecated and will be removed in a future release.**
|
||||
|
||||
Hardware overview
|
||||
=================
|
||||
@ -453,7 +454,7 @@ Sysfs Class
|
||||
|
||||
A cxl sysfs class is added under /sys/class/cxl to facilitate
|
||||
enumeration and tuning of the accelerators. Its layout is
|
||||
described in Documentation/ABI/testing/sysfs-class-cxl
|
||||
described in Documentation/ABI/obsolete/sysfs-class-cxl
|
||||
|
||||
|
||||
Udev rules
|
||||
|
@ -146,6 +146,7 @@ properties:
|
||||
maxItems: 2
|
||||
|
||||
pwm-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: convst1
|
||||
- const: convst2
|
||||
|
@ -63,8 +63,8 @@ what id ``k11000`` corresponds to in the second or third idmapping. The
|
||||
straightforward algorithm to use is to apply the inverse of the first idmapping,
|
||||
mapping ``k11000`` up to ``u1000``. Afterwards, we can map ``u1000`` down using
|
||||
either the second idmapping mapping or third idmapping mapping. The second
|
||||
idmapping would map ``u1000`` down to ``21000``. The third idmapping would map
|
||||
``u1000`` down to ``u31000``.
|
||||
idmapping would map ``u1000`` down to ``k21000``. The third idmapping would map
|
||||
``u1000`` down to ``k31000``.
|
||||
|
||||
If we were given the same task for the following three idmappings::
|
||||
|
||||
|
@ -102,6 +102,9 @@ The system wide settings are configured under the /proc virtual file system:
|
||||
* sched_rt_period_us takes values from 1 to INT_MAX.
|
||||
* sched_rt_runtime_us takes values from -1 to sched_rt_period_us.
|
||||
* A run time of -1 specifies runtime == period, ie. no limit.
|
||||
* sched_rt_runtime_us/sched_rt_period_us > 0.05 inorder to preserve
|
||||
bandwidth for fair dl_server. For accurate value check average of
|
||||
runtime/period in /sys/kernel/debug/sched/fair_server/cpuX/
|
||||
|
||||
|
||||
2.2 Default behaviour
|
||||
|
@ -8,7 +8,7 @@ Landlock: unprivileged access control
|
||||
=====================================
|
||||
|
||||
:Author: Mickaël Salaün
|
||||
:Date: October 2024
|
||||
:Date: January 2025
|
||||
|
||||
The goal of Landlock is to enable restriction of ambient rights (e.g. global
|
||||
filesystem or network access) for a set of processes. Because Landlock
|
||||
@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with
|
||||
A sandboxed process can connect to a non-sandboxed process when its domain is
|
||||
not scoped. If a process's domain is scoped, it can only connect to sockets
|
||||
created by processes in the same scope.
|
||||
Moreover, If a process is scoped to send signal to a non-scoped process, it can
|
||||
Moreover, if a process is scoped to send signal to a non-scoped process, it can
|
||||
only send signals to processes in the same scope.
|
||||
|
||||
A connected datagram socket behaves like a stream socket when its domain is
|
||||
scoped, meaning if the domain is scoped after the socket is connected , it can
|
||||
scoped, meaning if the domain is scoped after the socket is connected, it can
|
||||
still :manpage:`send(2)` data just like a stream socket. However, in the same
|
||||
scenario, a non-connected datagram socket cannot send data (with
|
||||
:manpage:`sendto(2)`) outside its scope.
|
||||
|
37
MAINTAINERS
37
MAINTAINERS
@ -2878,7 +2878,7 @@ F: drivers/pinctrl/nxp/
|
||||
|
||||
ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER
|
||||
M: Jan Petrous <jan.petrous@oss.nxp.com>
|
||||
L: NXP S32 Linux Team <s32@nxp.com>
|
||||
R: s32@nxp.com
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
|
||||
@ -5775,6 +5775,7 @@ X: drivers/clk/clkdev.c
|
||||
|
||||
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
|
||||
M: Steve French <sfrench@samba.org>
|
||||
M: Steve French <smfrench@gmail.com>
|
||||
R: Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
|
||||
R: Ronnie Sahlberg <ronniesahlberg@gmail.com> (directory leases, sparse files)
|
||||
R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
|
||||
@ -5856,7 +5857,6 @@ F: Documentation/security/snp-tdx-threat-model.rst
|
||||
|
||||
CONFIGFS
|
||||
M: Joel Becker <jlbec@evilplan.org>
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
S: Supported
|
||||
T: git git://git.infradead.org/users/hch/configfs.git
|
||||
F: fs/configfs/
|
||||
@ -5927,6 +5927,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c
|
||||
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
|
||||
F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
|
||||
|
||||
CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM)
|
||||
M: Maarten Lankhorst <dev@lankhorst.se>
|
||||
M: Maxime Ripard <mripard@kernel.org>
|
||||
M: Natalie Vock <natalie.vock@gmx.de>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: include/linux/cgroup_dmem.h
|
||||
F: kernel/cgroup/dmem.c
|
||||
|
||||
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
|
||||
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@kernel.org>
|
||||
@ -6879,7 +6890,6 @@ F: kernel/dma/map_benchmark.c
|
||||
F: tools/testing/selftests/dma/
|
||||
|
||||
DMA MAPPING HELPERS
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
M: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Robin Murphy <robin.murphy@arm.com>
|
||||
L: iommu@lists.linux.dev
|
||||
@ -9459,14 +9469,11 @@ F: include/linux/fscrypt.h
|
||||
F: include/uapi/linux/fscrypt.h
|
||||
|
||||
FSI SUBSYSTEM
|
||||
M: Jeremy Kerr <jk@ozlabs.org>
|
||||
M: Joel Stanley <joel@jms.id.au>
|
||||
R: Alistar Popple <alistair@popple.id.au>
|
||||
R: Eddie James <eajames@linux.ibm.com>
|
||||
M: Eddie James <eajames@linux.ibm.com>
|
||||
R: Ninad Palsule <ninad@linux.ibm.com>
|
||||
L: linux-fsi@lists.ozlabs.org
|
||||
S: Supported
|
||||
Q: http://patchwork.ozlabs.org/project/linux-fsi/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/fsi.git
|
||||
F: drivers/fsi/
|
||||
F: include/linux/fsi*.h
|
||||
F: include/trace/events/fsi*.h
|
||||
@ -12672,7 +12679,9 @@ F: tools/testing/selftests/
|
||||
|
||||
KERNEL SMB3 SERVER (KSMBD)
|
||||
M: Namjae Jeon <linkinjeon@kernel.org>
|
||||
M: Namjae Jeon <linkinjeon@samba.org>
|
||||
M: Steve French <sfrench@samba.org>
|
||||
M: Steve French <smfrench@gmail.com>
|
||||
R: Sergey Senozhatsky <senozhatsky@chromium.org>
|
||||
R: Tom Talpey <tom@talpey.com>
|
||||
L: linux-cifs@vger.kernel.org
|
||||
@ -12889,7 +12898,7 @@ F: include/keys/trusted_dcp.h
|
||||
F: security/keys/trusted-keys/trusted_dcp.c
|
||||
|
||||
KEYS-TRUSTED-TEE
|
||||
M: Sumit Garg <sumit.garg@linaro.org>
|
||||
M: Sumit Garg <sumit.garg@kernel.org>
|
||||
L: linux-integrity@vger.kernel.org
|
||||
L: keyrings@vger.kernel.org
|
||||
S: Supported
|
||||
@ -15708,7 +15717,7 @@ F: include/uapi/linux/cciss*.h
|
||||
|
||||
MICROSOFT MANA RDMA DRIVER
|
||||
M: Long Li <longli@microsoft.com>
|
||||
M: Ajay Sharma <sharmaajay@microsoft.com>
|
||||
M: Konstantin Taranov <kotaranov@microsoft.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/mana/
|
||||
@ -17688,7 +17697,7 @@ F: Documentation/ABI/testing/sysfs-bus-optee-devices
|
||||
F: drivers/tee/optee/
|
||||
|
||||
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
|
||||
M: Sumit Garg <sumit.garg@linaro.org>
|
||||
M: Sumit Garg <sumit.garg@kernel.org>
|
||||
L: op-tee@lists.trustedfirmware.org
|
||||
S: Maintained
|
||||
F: drivers/char/hw_random/optee-rng.c
|
||||
@ -20354,6 +20363,7 @@ RISC-V ARCHITECTURE
|
||||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
M: Albert Ou <aou@eecs.berkeley.edu>
|
||||
R: Alexandre Ghiti <alex@ghiti.fr>
|
||||
L: linux-riscv@lists.infradead.org
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-riscv/list/
|
||||
@ -21947,10 +21957,13 @@ F: sound/soc/uniphier/
|
||||
|
||||
SOCKET TIMESTAMPING
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
R: Jason Xing <kernelxing@tencent.com>
|
||||
S: Maintained
|
||||
F: Documentation/networking/timestamping.rst
|
||||
F: include/linux/net_tstamp.h
|
||||
F: include/uapi/linux/net_tstamp.h
|
||||
F: tools/testing/selftests/bpf/*/net_timestamping*
|
||||
F: tools/testing/selftests/net/*timestamp*
|
||||
F: tools/testing/selftests/net/so_txtime.c
|
||||
|
||||
SOEKRIS NET48XX LED SUPPORT
|
||||
@ -23297,7 +23310,7 @@ F: include/media/i2c/tw9910.h
|
||||
|
||||
TEE SUBSYSTEM
|
||||
M: Jens Wiklander <jens.wiklander@linaro.org>
|
||||
R: Sumit Garg <sumit.garg@linaro.org>
|
||||
R: Sumit Garg <sumit.garg@kernel.org>
|
||||
L: op-tee@lists.trustedfirmware.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-class-tee
|
||||
|
7
Makefile
7
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1123,6 +1123,11 @@ endif
|
||||
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
|
||||
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
|
||||
|
||||
# userspace programs are linked via the compiler, use the correct linker
|
||||
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy)
|
||||
KBUILD_USERLDFLAGS += --ld-path=$(LD)
|
||||
endif
|
||||
|
||||
# make the checker run with the right architecture
|
||||
CHECKFLAGS += --arch=$(ARCH)
|
||||
|
||||
|
@ -62,7 +62,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
||||
}
|
||||
|
||||
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long pfn, struct vm_fault *vmf)
|
||||
unsigned long pfn, bool need_lock)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
pgd_t *pgd;
|
||||
@ -99,12 +99,11 @@ again:
|
||||
if (!pte)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we are using split PTE locks, then we need to take the page
|
||||
* lock here. Otherwise we are using shared mm->page_table_lock
|
||||
* which is already locked, thus cannot take it.
|
||||
*/
|
||||
if (ptl != vmf->ptl) {
|
||||
if (need_lock) {
|
||||
/*
|
||||
* Use nested version here to indicate that we are already
|
||||
* holding one similar spinlock.
|
||||
*/
|
||||
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
|
||||
if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
@ -114,7 +113,7 @@ again:
|
||||
|
||||
ret = do_adjust_pte(vma, address, pfn, pte);
|
||||
|
||||
if (ptl != vmf->ptl)
|
||||
if (need_lock)
|
||||
spin_unlock(ptl);
|
||||
pte_unmap(pte);
|
||||
|
||||
@ -123,9 +122,10 @@ again:
|
||||
|
||||
static void
|
||||
make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep, unsigned long pfn,
|
||||
struct vm_fault *vmf)
|
||||
unsigned long addr, pte_t *ptep, unsigned long pfn)
|
||||
{
|
||||
const unsigned long pmd_start_addr = ALIGN_DOWN(addr, PMD_SIZE);
|
||||
const unsigned long pmd_end_addr = pmd_start_addr + PMD_SIZE;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct vm_area_struct *mpnt;
|
||||
unsigned long offset;
|
||||
@ -141,6 +141,14 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
|
||||
*/
|
||||
flush_dcache_mmap_lock(mapping);
|
||||
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
|
||||
/*
|
||||
* If we are using split PTE locks, then we need to take the pte
|
||||
* lock. Otherwise we are using shared mm->page_table_lock which
|
||||
* is already locked, thus cannot take it.
|
||||
*/
|
||||
bool need_lock = IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS);
|
||||
unsigned long mpnt_addr;
|
||||
|
||||
/*
|
||||
* If this VMA is not in our MM, we can ignore it.
|
||||
* Note that we intentionally mask out the VMA
|
||||
@ -151,7 +159,12 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
|
||||
if (!(mpnt->vm_flags & VM_MAYSHARE))
|
||||
continue;
|
||||
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
|
||||
aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn, vmf);
|
||||
mpnt_addr = mpnt->vm_start + offset;
|
||||
|
||||
/* Avoid deadlocks by not grabbing the same PTE lock again. */
|
||||
if (mpnt_addr >= pmd_start_addr && mpnt_addr < pmd_end_addr)
|
||||
need_lock = false;
|
||||
aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock);
|
||||
}
|
||||
flush_dcache_mmap_unlock(mapping);
|
||||
if (aliases)
|
||||
@ -194,7 +207,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
||||
__flush_dcache_folio(mapping, folio);
|
||||
if (mapping) {
|
||||
if (cache_is_vivt())
|
||||
make_coherent(mapping, vma, addr, ptep, pfn, vmf);
|
||||
make_coherent(mapping, vma, addr, ptep, pfn);
|
||||
else if (vma->vm_flags & VM_EXEC)
|
||||
__flush_icache_all();
|
||||
}
|
||||
|
@ -16,6 +16,32 @@
|
||||
#include <asm/sysreg.h>
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
|
||||
.macro init_el2_hcr val
|
||||
mov_q x0, \val
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
|
||||
* can reset into an UNKNOWN state and might not read as 1 until it has
|
||||
* been initialized explicitly.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*
|
||||
* Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
|
||||
* indicating whether the CPU is running in E2H mode.
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
cmp x1, #0
|
||||
b.ge .LnVHE_\@
|
||||
|
||||
orr x0, x0, #HCR_E2H
|
||||
.LnVHE_\@:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro __init_el2_sctlr
|
||||
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
|
||||
msr sctlr_el2, x0
|
||||
@ -244,11 +270,6 @@
|
||||
.Lskip_gcs_\@:
|
||||
.endm
|
||||
|
||||
.macro __init_el2_nvhe_prepare_eret
|
||||
mov x0, #INIT_PSTATE_EL1
|
||||
msr spsr_el2, x0
|
||||
.endm
|
||||
|
||||
.macro __init_el2_mpam
|
||||
/* Memory Partitioning And Monitoring: disable EL2 traps */
|
||||
mrs x1, id_aa64pfr0_el1
|
||||
|
@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty);
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz);
|
||||
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
|
||||
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
||||
{
|
||||
unsigned long stride = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (stride == PMD_SIZE)
|
||||
__flush_tlb_range(vma, start, end, stride, false, 2);
|
||||
else if (stride == PUD_SIZE)
|
||||
__flush_tlb_range(vma, start, end, stride, false, 1);
|
||||
else
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
|
||||
switch (stride) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
|
||||
break;
|
||||
#endif
|
||||
case CONT_PMD_SIZE:
|
||||
case PMD_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
|
||||
break;
|
||||
case CONT_PTE_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
|
||||
break;
|
||||
default:
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __ASM_HUGETLB_H */
|
||||
|
@ -119,7 +119,7 @@
|
||||
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
|
||||
#define TCR_EL2_T0SZ_MASK 0x3f
|
||||
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
|
||||
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
|
||||
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
|
||||
|
||||
/* VTCR_EL2 Registers bits */
|
||||
#define VTCR_EL2_DS TCR_EL2_DS
|
||||
|
@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
||||
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
|
||||
int __init kvm_arm_vmid_alloc_init(void);
|
||||
void __init kvm_arm_vmid_alloc_free(void);
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_clear_active(void);
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
|
@ -298,25 +298,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
msr sctlr_el2, x0
|
||||
isb
|
||||
0:
|
||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
|
||||
* RES1 in that case. Publish the E2H bit early so that
|
||||
* it can be picked up by the init_el2_state macro.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
|
||||
|
||||
orr x0, x0, #HCR_E2H
|
||||
1:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
|
||||
init_el2_hcr HCR_HOST_NVHE_FLAGS
|
||||
init_el2_state
|
||||
|
||||
/* Hypervisor stub */
|
||||
@ -339,7 +322,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
msr sctlr_el1, x1
|
||||
mov x2, xzr
|
||||
3:
|
||||
__init_el2_nvhe_prepare_eret
|
||||
mov x0, #INIT_PSTATE_EL1
|
||||
msr spsr_el2, x0
|
||||
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
orr x0, x0, x2
|
||||
|
@ -559,6 +559,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
mmu = vcpu->arch.hw_mmu;
|
||||
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
|
||||
|
||||
/*
|
||||
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
|
||||
* which happens eagerly in VHE.
|
||||
*
|
||||
* Also, the VMID allocator only preserves VMIDs that are active at the
|
||||
* time of rollover, so KVM might need to grab a new VMID for the MMU if
|
||||
* this is called from kvm_sched_in().
|
||||
*/
|
||||
kvm_arm_vmid_update(&mmu->vmid);
|
||||
|
||||
/*
|
||||
* We guarantee that both TLBs and I-cache are private to each
|
||||
* vcpu. If detecting that a vcpu from the same VM has
|
||||
@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* The VMID allocator only tracks active VMIDs per
|
||||
* physical CPU, and therefore the VMID allocated may not be
|
||||
* preserved on VMID roll-over if the task was preempted,
|
||||
* making a thread's VMID inactive. So we need to call
|
||||
* kvm_arm_vmid_update() in non-premptible context.
|
||||
*/
|
||||
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
|
||||
has_vhe())
|
||||
__load_stage2(vcpu->arch.hw_mmu,
|
||||
vcpu->arch.hw_mmu->arch);
|
||||
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
|
||||
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
unsigned long tcr, ips;
|
||||
unsigned long tcr;
|
||||
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
||||
tcr = read_sysreg(tcr_el1);
|
||||
ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
|
||||
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
|
||||
tcr |= TCR_EPD1_MASK;
|
||||
} else {
|
||||
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
|
||||
tcr &= TCR_EL2_MASK;
|
||||
tcr |= TCR_EL2_RES1;
|
||||
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
}
|
||||
tcr &= ~TCR_T0SZ_MASK;
|
||||
tcr |= TCR_T0SZ(hyp_va_bits);
|
||||
tcr &= ~TCR_EL2_PS_MASK;
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
params->tcr_el2 = tcr;
|
||||
|
||||
params->pgd_pa = kvm_mmu_get_httbr();
|
||||
|
@ -73,8 +73,12 @@ __do_hyp_init:
|
||||
eret
|
||||
SYM_CODE_END(__kvm_hyp_init)
|
||||
|
||||
/*
|
||||
* Initialize EL2 CPU state to sane values.
|
||||
*
|
||||
* HCR_EL2.E2H must have been initialized already.
|
||||
*/
|
||||
SYM_CODE_START_LOCAL(__kvm_init_el2_state)
|
||||
/* Initialize EL2 CPU state to sane values. */
|
||||
init_el2_state // Clobbers x0..x2
|
||||
finalise_el2_state
|
||||
ret
|
||||
@ -206,9 +210,9 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
|
||||
|
||||
2: msr SPsel, #1 // We want to use SP_EL{1,2}
|
||||
|
||||
bl __kvm_init_el2_state
|
||||
init_el2_hcr 0
|
||||
|
||||
__init_el2_nvhe_prepare_eret
|
||||
bl __kvm_init_el2_state
|
||||
|
||||
/* Enable MMU, set vectors and stack. */
|
||||
mov x0, x28
|
||||
|
@ -218,6 +218,9 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
|
||||
if (is_cpu_on)
|
||||
release_boot_args(boot_args);
|
||||
|
||||
write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR);
|
||||
write_sysreg(INIT_PSTATE_EL1, SPSR_EL2);
|
||||
|
||||
__host_enter(host_ctxt);
|
||||
}
|
||||
|
||||
|
@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
|
||||
}
|
||||
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 vmid, old_active_vmid;
|
||||
bool updated = false;
|
||||
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
|
||||
@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
|
||||
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
||||
old_active_vmid, vmid))
|
||||
return false;
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
|
||||
|
||||
/* Check that our VMID belongs to the current generation. */
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
if (!vmid_gen_match(vmid)) {
|
||||
if (!vmid_gen_match(vmid))
|
||||
vmid = new_vmid(kvm_vmid);
|
||||
updated = true;
|
||||
}
|
||||
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
|
||||
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
|
||||
|
||||
return updated;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
||||
{
|
||||
int contig_ptes = 0;
|
||||
int contig_ptes = 1;
|
||||
|
||||
*pgsize = size;
|
||||
|
||||
switch (size) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
if (pud_sect_supported())
|
||||
contig_ptes = 1;
|
||||
break;
|
||||
#endif
|
||||
case PMD_SIZE:
|
||||
contig_ptes = 1;
|
||||
break;
|
||||
case CONT_PMD_SIZE:
|
||||
*pgsize = PMD_SIZE;
|
||||
contig_ptes = CONT_PMDS;
|
||||
@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
||||
*pgsize = PAGE_SIZE;
|
||||
contig_ptes = CONT_PTES;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(!__hugetlb_valid_size(size));
|
||||
}
|
||||
|
||||
return contig_ptes;
|
||||
@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
|
||||
unsigned long pgsize,
|
||||
unsigned long ncontig)
|
||||
{
|
||||
pte_t orig_pte = __ptep_get(ptep);
|
||||
unsigned long i;
|
||||
pte_t pte, tmp_pte;
|
||||
bool present;
|
||||
|
||||
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
|
||||
pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
/*
|
||||
* If HW_AFDBM is enabled, then the HW could turn on
|
||||
* the dirty or accessed bit for any page in the set,
|
||||
* so check them all.
|
||||
*/
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
present = pte_present(pte);
|
||||
while (--ncontig) {
|
||||
ptep++;
|
||||
addr += pgsize;
|
||||
tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
if (present) {
|
||||
if (pte_dirty(tmp_pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
if (pte_young(tmp_pte))
|
||||
pte = pte_mkyoung(pte);
|
||||
}
|
||||
}
|
||||
return orig_pte;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static pte_t get_clear_contig_flush(struct mm_struct *mm,
|
||||
@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
__pte_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
int ncontig;
|
||||
size_t pgsize;
|
||||
pte_t orig_pte = __ptep_get(ptep);
|
||||
|
||||
if (!pte_cont(orig_pte))
|
||||
return __ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
|
||||
|
||||
ncontig = num_contig_ptes(sz, &pgsize);
|
||||
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
|
||||
}
|
||||
|
||||
@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
|
||||
|
||||
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
unsigned long psize = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
|
||||
if (pte_user_exec(__ptep_get(ptep)))
|
||||
return huge_ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
|
||||
}
|
||||
|
||||
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
|
||||
|
@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern u16 memstart_offset_seed;
|
||||
|
||||
/*
|
||||
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
|
||||
* map randomization can be enabled by shrinking the IPA space.
|
||||
*/
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
int parange = cpuid_feature_extract_unsigned_field(
|
||||
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
s64 range = linear_region_size -
|
||||
|
@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = ptep_get(ptep);
|
||||
@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_tlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
@ -249,18 +249,6 @@ static __init int setup_node(int pxm)
|
||||
return acpi_map_pxm_to_node(pxm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
|
||||
* I/O localities since SRAT does not list them. I/O localities are
|
||||
* not supported at this point.
|
||||
*/
|
||||
unsigned int numa_distance_cnt;
|
||||
|
||||
static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
|
||||
{
|
||||
return slit->locality_count;
|
||||
}
|
||||
|
||||
void __init numa_set_distance(int from, int to, int distance)
|
||||
{
|
||||
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
|
||||
|
@ -126,14 +126,14 @@ void kexec_reboot(void)
|
||||
/* All secondary cpus go to kexec_smp_wait */
|
||||
if (smp_processor_id() > 0) {
|
||||
relocated_kexec_smp_wait(NULL);
|
||||
unreachable();
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
do_kexec = (void *)reboot_code_buffer;
|
||||
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
|
||||
|
||||
unreachable();
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
||||
|
@ -387,6 +387,9 @@ static void __init check_kernel_sections_mem(void)
|
||||
*/
|
||||
static void __init arch_mem_init(char **cmdline_p)
|
||||
{
|
||||
/* Recalculate max_low_pfn for "mem=xxx" */
|
||||
max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
|
||||
|
||||
if (usermem)
|
||||
pr_info("User-defined physical RAM map overwrite\n");
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/tracepoint.h>
|
||||
@ -423,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu)
|
||||
mb();
|
||||
}
|
||||
|
||||
void __noreturn arch_cpu_idle_dead(void)
|
||||
static void __noreturn idle_play_dead(void)
|
||||
{
|
||||
register uint64_t addr;
|
||||
register void (*init_fn)(void);
|
||||
@ -447,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void)
|
||||
BUG();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static void __noreturn poll_play_dead(void)
|
||||
{
|
||||
register uint64_t addr;
|
||||
register void (*init_fn)(void);
|
||||
|
||||
idle_task_exit();
|
||||
__this_cpu_write(cpu_state, CPU_DEAD);
|
||||
|
||||
__smp_mb();
|
||||
do {
|
||||
__asm__ __volatile__("nop\n\t");
|
||||
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
|
||||
} while (addr == 0);
|
||||
|
||||
init_fn = (void *)TO_CACHE(addr);
|
||||
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
|
||||
|
||||
init_fn();
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
static void (*play_dead)(void) = idle_play_dead;
|
||||
|
||||
void __noreturn arch_cpu_idle_dead(void)
|
||||
{
|
||||
play_dead();
|
||||
BUG(); /* play_dead() doesn't return */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
int hibernate_resume_nonboot_cpu_disable(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
play_dead = poll_play_dead;
|
||||
ret = suspend_disable_secondary_cpus();
|
||||
play_dead = idle_play_dead;
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -669,6 +669,12 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
|
||||
struct kvm_run *run = vcpu->run;
|
||||
unsigned long badv = vcpu->arch.badv;
|
||||
|
||||
/* Inject ADE exception if exceed max GPA size */
|
||||
if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
|
||||
kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
ret = kvm_handle_mm_fault(vcpu, badv, write);
|
||||
if (ret) {
|
||||
/* Treat as MMIO */
|
||||
|
@ -317,6 +317,13 @@ int kvm_arch_enable_virtualization_cpu(void)
|
||||
kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
|
||||
read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
|
||||
|
||||
/*
|
||||
* HW Guest CSR registers are lost after CPU suspend and resume.
|
||||
* Clear last_vcpu so that Guest CSR registers forced to reload
|
||||
* from vCPU SW state.
|
||||
*/
|
||||
this_cpu_ptr(vmcs)->last_vcpu = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -311,7 +311,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret = RESUME_GUEST;
|
||||
unsigned long estat = vcpu->arch.host_estat;
|
||||
u32 intr = estat & 0x1fff; /* Ignore NMI */
|
||||
u32 intr = estat & CSR_ESTAT_IS;
|
||||
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
|
||||
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
|
@ -48,7 +48,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
if (kvm_pvtime_supported())
|
||||
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
|
||||
|
||||
kvm->arch.gpa_size = BIT(cpu_vabits - 1);
|
||||
/*
|
||||
* cpu_vabits means user address space only (a half of total).
|
||||
* GPA size of VM is the same with the size of user address space.
|
||||
*/
|
||||
kvm->arch.gpa_size = BIT(cpu_vabits);
|
||||
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
|
||||
kvm->arch.invalid_ptes[0] = 0;
|
||||
kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
|
||||
|
@ -3,6 +3,7 @@
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kfence.h>
|
||||
#include <linux/memblock.h>
|
||||
@ -63,8 +64,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
||||
}
|
||||
|
||||
info.length = len;
|
||||
info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
if (filp && is_file_hugepages(filp))
|
||||
info.align_mask = huge_page_mask_align(filp);
|
||||
else
|
||||
info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
|
||||
|
||||
if (dir == DOWN) {
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
|
@ -44,8 +44,10 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm)
|
||||
pgd_t *new_pgd;
|
||||
|
||||
new_pgd = __pgd_alloc(mm, 0);
|
||||
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
|
||||
memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
|
||||
if (likely(new_pgd != NULL)) {
|
||||
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
|
||||
memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
|
||||
}
|
||||
return new_pgd;
|
||||
}
|
||||
|
||||
|
@ -468,6 +468,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
|
||||
Elf_Sym *sym, const char *symname))
|
||||
{
|
||||
int i;
|
||||
struct section *extab_sec = sec_lookup("__ex_table");
|
||||
int extab_index = extab_sec ? extab_sec - secs : -1;
|
||||
|
||||
/* Walk through the relocations */
|
||||
for (i = 0; i < ehdr.e_shnum; i++) {
|
||||
@ -480,6 +482,9 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
|
||||
if (sec->shdr.sh_type != SHT_REL_TYPE)
|
||||
continue;
|
||||
|
||||
if (sec->shdr.sh_info == extab_index)
|
||||
continue;
|
||||
|
||||
sec_symtab = sec->link;
|
||||
sec_applies = &secs[sec->shdr.sh_info];
|
||||
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
|
||||
|
@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = *ptep;
|
||||
@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
/*
|
||||
* clear the huge pte entry firstly, so that the other smp threads will
|
||||
* not get old pte entry after finishing flush_tlb_page and before
|
||||
* setting new huge pte entry
|
||||
*/
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_tlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
pte_t *ptep, unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t entry;
|
||||
|
||||
|
@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
|
||||
}
|
||||
@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_hugetlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
@ -231,7 +231,7 @@
|
||||
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
|
||||
sc_prepend, sc_append, \
|
||||
cas_prepend, cas_append, \
|
||||
__ret, __ptr, (long), __old, __new); \
|
||||
__ret, __ptr, (long)(int)(long), __old, __new); \
|
||||
break; \
|
||||
case 8: \
|
||||
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
|
||||
|
@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
|
||||
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
|
||||
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
|
||||
: [ov] "Jr" (oldval), [nv] "Jr" (newval)
|
||||
: [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
|
||||
: "memory");
|
||||
__disable_user_access();
|
||||
|
||||
|
@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu)
|
||||
if (!np)
|
||||
return -ENOENT;
|
||||
|
||||
if (of_property_read_bool(np, "cache-size"))
|
||||
if (of_property_present(np, "cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
|
||||
if (of_property_read_bool(np, "i-cache-size"))
|
||||
if (of_property_present(np, "i-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
|
||||
if (of_property_read_bool(np, "d-cache-size"))
|
||||
if (of_property_present(np, "d-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
|
||||
|
||||
prev = np;
|
||||
@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu)
|
||||
break;
|
||||
if (level <= levels)
|
||||
break;
|
||||
if (of_property_read_bool(np, "cache-size"))
|
||||
if (of_property_present(np, "cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
|
||||
if (of_property_read_bool(np, "i-cache-size"))
|
||||
if (of_property_present(np, "i-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
|
||||
if (of_property_read_bool(np, "d-cache-size"))
|
||||
if (of_property_present(np, "d-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
|
||||
levels = level;
|
||||
}
|
||||
|
@ -479,7 +479,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
|
||||
if (bit < RISCV_ISA_EXT_BASE)
|
||||
*this_hwcap |= isa2hwcap[bit];
|
||||
}
|
||||
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
|
||||
} while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX));
|
||||
}
|
||||
|
||||
static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap)
|
||||
|
@ -322,8 +322,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
riscv_init_cbo_blocksizes();
|
||||
riscv_fill_hwcap();
|
||||
init_rt_signal_env();
|
||||
apply_boot_alternatives();
|
||||
init_rt_signal_env();
|
||||
|
||||
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
|
||||
riscv_isa_extension_available(NULL, ZICBOM))
|
||||
|
@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all)
|
||||
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
|
||||
total_context_size += riscv_v_sc_size;
|
||||
}
|
||||
/*
|
||||
* Preserved a __riscv_ctx_hdr for END signal context header if an
|
||||
* extension uses __riscv_extra_ext_header
|
||||
*/
|
||||
if (total_context_size)
|
||||
total_context_size += sizeof(struct __riscv_ctx_hdr);
|
||||
|
||||
frame_size += total_context_size;
|
||||
|
||||
|
@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
|
||||
|
||||
if (imsic->vsfile_cpu >= 0) {
|
||||
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
|
||||
set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/wordpart.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
|
||||
@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
|
||||
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
|
||||
if (!target_vcpu)
|
||||
return SBI_ERR_INVALID_PARAM;
|
||||
if (!kvm_riscv_vcpu_stopped(target_vcpu))
|
||||
return SBI_HSM_STATE_STARTED;
|
||||
else if (vcpu->stat.generic.blocking)
|
||||
if (kvm_riscv_vcpu_stopped(target_vcpu))
|
||||
return SBI_HSM_STATE_STOPPED;
|
||||
else if (target_vcpu->stat.generic.blocking)
|
||||
return SBI_HSM_STATE_SUSPENDED;
|
||||
else
|
||||
return SBI_HSM_STATE_STOPPED;
|
||||
return SBI_HSM_STATE_STARTED;
|
||||
}
|
||||
|
||||
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
}
|
||||
return 0;
|
||||
case SBI_EXT_HSM_HART_SUSPEND:
|
||||
switch (cp->a0) {
|
||||
switch (lower_32_bits(cp->a0)) {
|
||||
case SBI_HSM_SUSPEND_RET_DEFAULT:
|
||||
kvm_riscv_vcpu_wfi(vcpu);
|
||||
break;
|
||||
|
@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
u64 next_cycle;
|
||||
|
||||
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
||||
unsigned long hmask = cp->a0;
|
||||
unsigned long hbase = cp->a1;
|
||||
unsigned long hart_bit = 0, sentmask = 0;
|
||||
|
||||
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
if (hbase != -1UL) {
|
||||
if (tmp->vcpu_id < hbase)
|
||||
continue;
|
||||
if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
|
||||
hart_bit = tmp->vcpu_id - hbase;
|
||||
if (hart_bit >= __riscv_xlen)
|
||||
goto done;
|
||||
if (!(hmask & (1UL << hart_bit)))
|
||||
continue;
|
||||
}
|
||||
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
|
||||
if (ret < 0)
|
||||
break;
|
||||
sentmask |= 1UL << hart_bit;
|
||||
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
|
||||
}
|
||||
|
||||
done:
|
||||
if (hbase != -1UL && (hmask ^ sentmask))
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/wordpart.h>
|
||||
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
#include <asm/sbi.h>
|
||||
@ -19,7 +20,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
|
||||
switch (funcid) {
|
||||
case SBI_EXT_SUSP_SYSTEM_SUSPEND:
|
||||
if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
|
||||
if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
return 0;
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t orig_pte = ptep_get(ptep);
|
||||
int pte_num;
|
||||
|
@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
|
||||
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
return __huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugetlb_flags(struct folio *folio)
|
||||
{
|
||||
@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep)
|
||||
{
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
|
||||
return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
|
||||
@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
|
||||
|
||||
if (changed) {
|
||||
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
__huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
}
|
||||
return changed;
|
||||
@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
|
||||
}
|
||||
|
@ -266,12 +266,13 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op, struct ftrace_regs *fregs)
|
||||
{
|
||||
unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14];
|
||||
unsigned long sp = arch_ftrace_regs(fregs)->regs.gprs[15];
|
||||
|
||||
if (unlikely(ftrace_graph_is_dead()))
|
||||
return;
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
return;
|
||||
if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
|
||||
if (!function_graph_enter_regs(*parent, ip, 0, (unsigned long *)sp, fregs))
|
||||
*parent = (unsigned long)&return_to_handler;
|
||||
}
|
||||
|
||||
|
@ -285,10 +285,10 @@ static void __init test_monitor_call(void)
|
||||
return;
|
||||
asm volatile(
|
||||
" mc 0,0\n"
|
||||
"0: xgr %0,%0\n"
|
||||
"0: lhi %[val],0\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (val));
|
||||
EX_TABLE(0b, 1b)
|
||||
: [val] "+d" (val));
|
||||
if (!val)
|
||||
panic("Monitor call doesn't work!\n");
|
||||
}
|
||||
|
@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
return __rste_to_pte(pte_val(*ptep));
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get(mm, addr, ptep);
|
||||
pmd_t *pmdp = (pmd_t *) ptep;
|
||||
|
@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
pte_t *ptep, unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
unsigned int i, nptes, orig_shift, shift;
|
||||
unsigned long size;
|
||||
|
@ -1341,6 +1341,7 @@ config X86_REBOOTFIXUPS
|
||||
config MICROCODE
|
||||
def_bool y
|
||||
depends on CPU_SUP_AMD || CPU_SUP_INTEL
|
||||
select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
|
||||
|
||||
config MICROCODE_INITRD32
|
||||
def_bool y
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "misc.h"
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/bootparam_utils.h>
|
||||
#include <asm/e820/types.h>
|
||||
#include <asm/processor.h>
|
||||
#include "pgtable.h"
|
||||
@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
|
||||
bool l5_required = false;
|
||||
|
||||
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
|
||||
sanitize_boot_params(bp);
|
||||
boot_params_ptr = bp;
|
||||
|
||||
/*
|
||||
|
@ -2853,19 +2853,8 @@ struct snp_msg_desc *snp_msg_alloc(void)
|
||||
if (!mdesc->response)
|
||||
goto e_free_request;
|
||||
|
||||
mdesc->certs_data = alloc_shared_pages(SEV_FW_BLOB_MAX_SIZE);
|
||||
if (!mdesc->certs_data)
|
||||
goto e_free_response;
|
||||
|
||||
/* initial the input address for guest request */
|
||||
mdesc->input.req_gpa = __pa(mdesc->request);
|
||||
mdesc->input.resp_gpa = __pa(mdesc->response);
|
||||
mdesc->input.data_gpa = __pa(mdesc->certs_data);
|
||||
|
||||
return mdesc;
|
||||
|
||||
e_free_response:
|
||||
free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
|
||||
e_free_request:
|
||||
free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
|
||||
e_unmap:
|
||||
@ -2885,7 +2874,6 @@ void snp_msg_free(struct snp_msg_desc *mdesc)
|
||||
kfree(mdesc->ctx);
|
||||
free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
|
||||
free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
|
||||
free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
|
||||
iounmap((__force void __iomem *)mdesc->secrets);
|
||||
|
||||
memset(mdesc, 0, sizeof(*mdesc));
|
||||
@ -3054,7 +3042,7 @@ retry_request:
|
||||
* sequence number must be incremented or the VMPCK must be deleted to
|
||||
* prevent reuse of the IV.
|
||||
*/
|
||||
rc = snp_issue_guest_request(req, &mdesc->input, rio);
|
||||
rc = snp_issue_guest_request(req, &req->input, rio);
|
||||
switch (rc) {
|
||||
case -ENOSPC:
|
||||
/*
|
||||
@ -3064,7 +3052,7 @@ retry_request:
|
||||
* order to increment the sequence number and thus avoid
|
||||
* IV reuse.
|
||||
*/
|
||||
override_npages = mdesc->input.data_npages;
|
||||
override_npages = req->input.data_npages;
|
||||
req->exit_code = SVM_VMGEXIT_GUEST_REQUEST;
|
||||
|
||||
/*
|
||||
@ -3120,7 +3108,7 @@ retry_request:
|
||||
}
|
||||
|
||||
if (override_npages)
|
||||
mdesc->input.data_npages = override_npages;
|
||||
req->input.data_npages = override_npages;
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -3158,6 +3146,11 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
|
||||
*/
|
||||
memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request));
|
||||
|
||||
/* Initialize the input address for guest request */
|
||||
req->input.req_gpa = __pa(mdesc->request);
|
||||
req->input.resp_gpa = __pa(mdesc->response);
|
||||
req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0;
|
||||
|
||||
rc = __handle_guest_request(mdesc, req, rio);
|
||||
if (rc) {
|
||||
if (rc == -EIO &&
|
||||
|
@ -190,6 +190,7 @@ static __always_inline bool int80_is_external(void)
|
||||
|
||||
/**
|
||||
* do_int80_emulation - 32-bit legacy syscall C entry from asm
|
||||
* @regs: syscall arguments in struct pt_args on the stack.
|
||||
*
|
||||
* This entry point can be used by 32-bit and 64-bit programs to perform
|
||||
* 32-bit system calls. Instances of INT $0x80 can be found inline in
|
||||
|
@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
|
||||
if (event->attr.type == event->pmu->type)
|
||||
event->hw.config |= x86_pmu_get_event_config(event);
|
||||
|
||||
if (event->attr.sample_period && x86_pmu.limit_period) {
|
||||
if (!event->attr.freq && x86_pmu.limit_period) {
|
||||
s64 left = event->attr.sample_period;
|
||||
x86_pmu.limit_period(event, &left);
|
||||
if (left > event->attr.sample_period)
|
||||
|
@ -3952,6 +3952,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
|
||||
return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
|
||||
}
|
||||
|
||||
static u64 intel_pmu_freq_start_period(struct perf_event *event)
|
||||
{
|
||||
int type = event->attr.type;
|
||||
u64 config, factor;
|
||||
s64 start;
|
||||
|
||||
/*
|
||||
* The 127 is the lowest possible recommended SAV (sample after value)
|
||||
* for a 4000 freq (default freq), according to the event list JSON file.
|
||||
* Also, assume the workload is idle 50% time.
|
||||
*/
|
||||
factor = 64 * 4000;
|
||||
if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
|
||||
goto end;
|
||||
|
||||
/*
|
||||
* The estimation of the start period in the freq mode is
|
||||
* based on the below assumption.
|
||||
*
|
||||
* For a cycles or an instructions event, 1GHZ of the
|
||||
* underlying platform, 1 IPC. The workload is idle 50% time.
|
||||
* The start period = 1,000,000,000 * 1 / freq / 2.
|
||||
* = 500,000,000 / freq
|
||||
*
|
||||
* Usually, the branch-related events occur less than the
|
||||
* instructions event. According to the Intel event list JSON
|
||||
* file, the SAV (sample after value) of a branch-related event
|
||||
* is usually 1/4 of an instruction event.
|
||||
* The start period of branch-related events = 125,000,000 / freq.
|
||||
*
|
||||
* The cache-related events occurs even less. The SAV is usually
|
||||
* 1/20 of an instruction event.
|
||||
* The start period of cache-related events = 25,000,000 / freq.
|
||||
*/
|
||||
config = event->attr.config & PERF_HW_EVENT_MASK;
|
||||
if (type == PERF_TYPE_HARDWARE) {
|
||||
switch (config) {
|
||||
case PERF_COUNT_HW_CPU_CYCLES:
|
||||
case PERF_COUNT_HW_INSTRUCTIONS:
|
||||
case PERF_COUNT_HW_BUS_CYCLES:
|
||||
case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
|
||||
case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
|
||||
case PERF_COUNT_HW_REF_CPU_CYCLES:
|
||||
factor = 500000000;
|
||||
break;
|
||||
case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
|
||||
case PERF_COUNT_HW_BRANCH_MISSES:
|
||||
factor = 125000000;
|
||||
break;
|
||||
case PERF_COUNT_HW_CACHE_REFERENCES:
|
||||
case PERF_COUNT_HW_CACHE_MISSES:
|
||||
factor = 25000000;
|
||||
break;
|
||||
default:
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
if (type == PERF_TYPE_HW_CACHE)
|
||||
factor = 25000000;
|
||||
end:
|
||||
/*
|
||||
* Usually, a prime or a number with less factors (close to prime)
|
||||
* is chosen as an SAV, which makes it less likely that the sampling
|
||||
* period synchronizes with some periodic event in the workload.
|
||||
* Minus 1 to make it at least avoiding values near power of twos
|
||||
* for the default freq.
|
||||
*/
|
||||
start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
|
||||
|
||||
if (start > x86_pmu.max_period)
|
||||
start = x86_pmu.max_period;
|
||||
|
||||
if (x86_pmu.limit_period)
|
||||
x86_pmu.limit_period(event, &start);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
static int intel_pmu_hw_config(struct perf_event *event)
|
||||
{
|
||||
int ret = x86_pmu_hw_config(event);
|
||||
@ -3963,6 +4042,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (event->attr.freq && event->attr.sample_freq) {
|
||||
event->hw.sample_period = intel_pmu_freq_start_period(event);
|
||||
event->hw.last_period = event->hw.sample_period;
|
||||
local64_set(&event->hw.period_left, event->hw.sample_period);
|
||||
}
|
||||
|
||||
if (event->attr.precise_ip) {
|
||||
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
|
||||
return -EINVAL;
|
||||
|
@ -879,6 +879,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
||||
X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
|
||||
{},
|
||||
};
|
||||
|
@ -780,6 +780,7 @@ struct kvm_vcpu_arch {
|
||||
u32 pkru;
|
||||
u32 hflags;
|
||||
u64 efer;
|
||||
u64 host_debugctl;
|
||||
u64 apic_base;
|
||||
struct kvm_lapic *apic; /* kernel irqchip context */
|
||||
bool load_eoi_exitmap_pending;
|
||||
|
@ -198,9 +198,8 @@
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
|
||||
* to the retpoline thunk with a CS prefix when the register requires
|
||||
* a RAX prefix byte to encode. Also see apply_retpolines().
|
||||
* Emits a conditional CS prefix that is compatible with
|
||||
* -mindirect-branch-cs-prefix.
|
||||
*/
|
||||
.macro __CS_PREFIX reg:req
|
||||
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
|
||||
@ -420,20 +419,27 @@ static inline void call_depth_return_thunk(void) {}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
* Emits a conditional CS prefix that is compatible with
|
||||
* -mindirect-branch-cs-prefix.
|
||||
*/
|
||||
#define __CS_PREFIX(reg) \
|
||||
".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
|
||||
".ifc \\rs," reg "\n" \
|
||||
".byte 0x2e\n" \
|
||||
".endif\n" \
|
||||
".endr\n"
|
||||
|
||||
/*
|
||||
* Inline asm uses the %V modifier which is only in newer GCC
|
||||
* which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
|
||||
*/
|
||||
# define CALL_NOSPEC \
|
||||
ALTERNATIVE_2( \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE, \
|
||||
"lfence;\n" \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE_LFENCE)
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
|
||||
"call __x86_indirect_thunk_%V[thunk_target]\n"
|
||||
#else
|
||||
#define CALL_NOSPEC "call *%[thunk_target]\n"
|
||||
#endif
|
||||
|
||||
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
||||
|
||||
|
@ -23,17 +23,17 @@ typedef union {
|
||||
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
|
||||
|
||||
/*
|
||||
* traditional i386 two-level paging structure:
|
||||
* Traditional i386 two-level paging structure:
|
||||
*/
|
||||
|
||||
#define PGDIR_SHIFT 22
|
||||
#define PTRS_PER_PGD 1024
|
||||
|
||||
|
||||
/*
|
||||
* the i386 is two-level, so we don't really have any
|
||||
* PMD directory physically.
|
||||
* The i386 is two-level, so we don't really have any
|
||||
* PMD directory physically:
|
||||
*/
|
||||
#define PTRS_PER_PMD 1
|
||||
|
||||
#define PTRS_PER_PTE 1024
|
||||
|
||||
|
@ -203,6 +203,9 @@ struct snp_guest_req {
|
||||
unsigned int vmpck_id;
|
||||
u8 msg_version;
|
||||
u8 msg_type;
|
||||
|
||||
struct snp_req_data input;
|
||||
void *certs_data;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -263,9 +266,6 @@ struct snp_msg_desc {
|
||||
struct snp_guest_msg secret_request, secret_response;
|
||||
|
||||
struct snp_secrets_page *secrets;
|
||||
struct snp_req_data input;
|
||||
|
||||
void *certs_data;
|
||||
|
||||
struct aesgcm_ctx *ctx;
|
||||
|
||||
|
@ -143,7 +143,6 @@ bool __init early_is_amd_nb(u32 device)
|
||||
|
||||
struct resource *amd_get_mmconfig_range(struct resource *res)
|
||||
{
|
||||
u32 address;
|
||||
u64 base, msr;
|
||||
unsigned int segn_busn_bits;
|
||||
|
||||
@ -151,13 +150,11 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return NULL;
|
||||
|
||||
/* assume all cpus from fam10h have mmconfig */
|
||||
if (boot_cpu_data.x86 < 0x10)
|
||||
/* Assume CPUs from Fam10h have mmconfig, although not all VMs do */
|
||||
if (boot_cpu_data.x86 < 0x10 ||
|
||||
rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
|
||||
return NULL;
|
||||
|
||||
address = MSR_FAM10H_MMIO_CONF_BASE;
|
||||
rdmsrl(address, msr);
|
||||
|
||||
/* mmconfig is not enabled */
|
||||
if (!(msr & FAM10H_MMIO_CONF_ENABLE))
|
||||
return NULL;
|
||||
|
@ -808,7 +808,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
||||
|
||||
/* If bit 31 is set, this is an unknown format */
|
||||
for (j = 0 ; j < 3 ; j++)
|
||||
for (j = 0 ; j < 4 ; j++)
|
||||
if (regs[j] & (1 << 31))
|
||||
regs[j] = 0;
|
||||
|
||||
|
@ -153,8 +153,8 @@ static void geode_configure(void)
|
||||
u8 ccr3;
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Suspend on halt power saving and enable #SUSP pin */
|
||||
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
|
||||
/* Suspend on halt power saving */
|
||||
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
|
||||
|
||||
ccr3 = getCx86(CX86_CCR3);
|
||||
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
||||
|
@ -635,26 +635,37 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||
}
|
||||
#endif
|
||||
|
||||
#define TLB_INST_4K 0x01
|
||||
#define TLB_INST_4M 0x02
|
||||
#define TLB_INST_2M_4M 0x03
|
||||
#define TLB_INST_4K 0x01
|
||||
#define TLB_INST_4M 0x02
|
||||
#define TLB_INST_2M_4M 0x03
|
||||
|
||||
#define TLB_INST_ALL 0x05
|
||||
#define TLB_INST_1G 0x06
|
||||
#define TLB_INST_ALL 0x05
|
||||
#define TLB_INST_1G 0x06
|
||||
|
||||
#define TLB_DATA_4K 0x11
|
||||
#define TLB_DATA_4M 0x12
|
||||
#define TLB_DATA_2M_4M 0x13
|
||||
#define TLB_DATA_4K_4M 0x14
|
||||
#define TLB_DATA_4K 0x11
|
||||
#define TLB_DATA_4M 0x12
|
||||
#define TLB_DATA_2M_4M 0x13
|
||||
#define TLB_DATA_4K_4M 0x14
|
||||
|
||||
#define TLB_DATA_1G 0x16
|
||||
#define TLB_DATA_1G 0x16
|
||||
#define TLB_DATA_1G_2M_4M 0x17
|
||||
|
||||
#define TLB_DATA0_4K 0x21
|
||||
#define TLB_DATA0_4M 0x22
|
||||
#define TLB_DATA0_2M_4M 0x23
|
||||
#define TLB_DATA0_4K 0x21
|
||||
#define TLB_DATA0_4M 0x22
|
||||
#define TLB_DATA0_2M_4M 0x23
|
||||
|
||||
#define STLB_4K 0x41
|
||||
#define STLB_4K_2M 0x42
|
||||
#define STLB_4K 0x41
|
||||
#define STLB_4K_2M 0x42
|
||||
|
||||
/*
|
||||
* All of leaf 0x2's one-byte TLB descriptors implies the same number of
|
||||
* entries for their respective TLB types. The 0x63 descriptor is an
|
||||
* exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
|
||||
* for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
|
||||
* 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
|
||||
* intel_tlb_table[] mapping.
|
||||
*/
|
||||
#define TLB_0x63_2M_4M_ENTRIES 32
|
||||
|
||||
static const struct _tlb_table intel_tlb_table[] = {
|
||||
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
|
||||
@ -676,7 +687,8 @@ static const struct _tlb_table intel_tlb_table[] = {
|
||||
{ 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
|
||||
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
|
||||
{ 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
|
||||
{ 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
|
||||
{ 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative"
|
||||
" (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" },
|
||||
{ 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
|
||||
{ 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
|
||||
{ 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
|
||||
@ -776,6 +788,12 @@ static void intel_tlb_lookup(const unsigned char desc)
|
||||
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
|
||||
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
|
||||
break;
|
||||
case TLB_DATA_1G_2M_4M:
|
||||
if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
|
||||
tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
|
||||
if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
|
||||
tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
|
||||
fallthrough;
|
||||
case TLB_DATA_1G:
|
||||
if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
|
||||
tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
|
||||
@ -799,7 +817,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c)
|
||||
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
||||
|
||||
/* If bit 31 is set, this is an unknown format */
|
||||
for (j = 0 ; j < 3 ; j++)
|
||||
for (j = 0 ; j < 4 ; j++)
|
||||
if (regs[j] & (1 << 31))
|
||||
regs[j] = 0;
|
||||
|
||||
|
@ -23,14 +23,18 @@
|
||||
|
||||
#include <linux/earlycpio.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <crypto/sha2.h>
|
||||
|
||||
#include <asm/microcode.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cmdline.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/msr.h>
|
||||
@ -145,6 +149,113 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
|
||||
*/
|
||||
static u32 bsp_cpuid_1_eax __ro_after_init;
|
||||
|
||||
static bool sha_check = true;
|
||||
|
||||
struct patch_digest {
|
||||
u32 patch_id;
|
||||
u8 sha256[SHA256_DIGEST_SIZE];
|
||||
};
|
||||
|
||||
#include "amd_shas.c"
|
||||
|
||||
static int cmp_id(const void *key, const void *elem)
|
||||
{
|
||||
struct patch_digest *pd = (struct patch_digest *)elem;
|
||||
u32 patch_id = *(u32 *)key;
|
||||
|
||||
if (patch_id == pd->patch_id)
|
||||
return 0;
|
||||
else if (patch_id < pd->patch_id)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool need_sha_check(u32 cur_rev)
|
||||
{
|
||||
switch (cur_rev >> 8) {
|
||||
case 0x80012: return cur_rev <= 0x800126f; break;
|
||||
case 0x80082: return cur_rev <= 0x800820f; break;
|
||||
case 0x83010: return cur_rev <= 0x830107c; break;
|
||||
case 0x86001: return cur_rev <= 0x860010e; break;
|
||||
case 0x86081: return cur_rev <= 0x8608108; break;
|
||||
case 0x87010: return cur_rev <= 0x8701034; break;
|
||||
case 0x8a000: return cur_rev <= 0x8a0000a; break;
|
||||
case 0xa0010: return cur_rev <= 0xa00107a; break;
|
||||
case 0xa0011: return cur_rev <= 0xa0011da; break;
|
||||
case 0xa0012: return cur_rev <= 0xa001243; break;
|
||||
case 0xa0082: return cur_rev <= 0xa00820e; break;
|
||||
case 0xa1011: return cur_rev <= 0xa101153; break;
|
||||
case 0xa1012: return cur_rev <= 0xa10124e; break;
|
||||
case 0xa1081: return cur_rev <= 0xa108109; break;
|
||||
case 0xa2010: return cur_rev <= 0xa20102f; break;
|
||||
case 0xa2012: return cur_rev <= 0xa201212; break;
|
||||
case 0xa4041: return cur_rev <= 0xa404109; break;
|
||||
case 0xa5000: return cur_rev <= 0xa500013; break;
|
||||
case 0xa6012: return cur_rev <= 0xa60120a; break;
|
||||
case 0xa7041: return cur_rev <= 0xa704109; break;
|
||||
case 0xa7052: return cur_rev <= 0xa705208; break;
|
||||
case 0xa7080: return cur_rev <= 0xa708009; break;
|
||||
case 0xa70c0: return cur_rev <= 0xa70C009; break;
|
||||
case 0xaa001: return cur_rev <= 0xaa00116; break;
|
||||
case 0xaa002: return cur_rev <= 0xaa00218; break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
|
||||
pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
|
||||
{
|
||||
struct patch_digest *pd = NULL;
|
||||
u8 digest[SHA256_DIGEST_SIZE];
|
||||
struct sha256_state s;
|
||||
int i;
|
||||
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
|
||||
x86_family(bsp_cpuid_1_eax) > 0x19)
|
||||
return true;
|
||||
|
||||
if (!need_sha_check(cur_rev))
|
||||
return true;
|
||||
|
||||
if (!sha_check)
|
||||
return true;
|
||||
|
||||
pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
|
||||
if (!pd) {
|
||||
pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
sha256_init(&s);
|
||||
sha256_update(&s, data, len);
|
||||
sha256_final(&s, digest);
|
||||
|
||||
if (memcmp(digest, pd->sha256, sizeof(digest))) {
|
||||
pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
|
||||
|
||||
for (i = 0; i < SHA256_DIGEST_SIZE; i++)
|
||||
pr_cont("0x%x ", digest[i]);
|
||||
pr_info("\n");
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 get_patch_level(void)
|
||||
{
|
||||
u32 rev, dummy __always_unused;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
return rev;
|
||||
}
|
||||
|
||||
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
|
||||
{
|
||||
union zen_patch_rev p;
|
||||
@ -246,8 +357,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
|
||||
* On success, @sh_psize returns the patch size according to the section header,
|
||||
* to the caller.
|
||||
*/
|
||||
static bool
|
||||
__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
|
||||
static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
|
||||
{
|
||||
u32 p_type, p_size;
|
||||
const u32 *hdr;
|
||||
@ -484,10 +594,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
|
||||
}
|
||||
}
|
||||
|
||||
static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
|
||||
static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
|
||||
unsigned int psize)
|
||||
{
|
||||
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
|
||||
u32 rev, dummy;
|
||||
|
||||
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
|
||||
return -1;
|
||||
|
||||
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
|
||||
|
||||
@ -505,47 +618,13 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
|
||||
}
|
||||
|
||||
/* verify patch application was successful */
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
if (rev != mc->hdr.patch_id)
|
||||
*cur_rev = get_patch_level();
|
||||
if (*cur_rev != mc->hdr.patch_id)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Early load occurs before we can vmalloc(). So we look for the microcode
|
||||
* patch container file in initrd, traverse equivalent cpu table, look for a
|
||||
* matching microcode patch, and update, all in initrd memory in place.
|
||||
* When vmalloc() is available for use later -- on 64-bit during first AP load,
|
||||
* and on 32-bit during save_microcode_in_initrd_amd() -- we can call
|
||||
* load_microcode_amd() to save equivalent cpu table and microcode patches in
|
||||
* kernel heap memory.
|
||||
*
|
||||
* Returns true if container found (sets @desc), false otherwise.
|
||||
*/
|
||||
static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
|
||||
{
|
||||
struct cont_desc desc = { 0 };
|
||||
struct microcode_amd *mc;
|
||||
|
||||
scan_containers(ucode, size, &desc);
|
||||
|
||||
mc = desc.mc;
|
||||
if (!mc)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Allow application of the same revision to pick up SMT-specific
|
||||
* changes even if the revision of the other SMT thread is already
|
||||
* up-to-date.
|
||||
*/
|
||||
if (old_rev > mc->hdr.patch_id)
|
||||
return false;
|
||||
|
||||
return __apply_microcode_amd(mc, desc.psize);
|
||||
}
|
||||
|
||||
static bool get_builtin_microcode(struct cpio_data *cp)
|
||||
{
|
||||
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
||||
@ -583,14 +662,35 @@ static bool __init find_blobs_in_containers(struct cpio_data *ret)
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
* Early load occurs before we can vmalloc(). So we look for the microcode
|
||||
* patch container file in initrd, traverse equivalent cpu table, look for a
|
||||
* matching microcode patch, and update, all in initrd memory in place.
|
||||
* When vmalloc() is available for use later -- on 64-bit during first AP load,
|
||||
* and on 32-bit during save_microcode_in_initrd() -- we can call
|
||||
* load_microcode_amd() to save equivalent cpu table and microcode patches in
|
||||
* kernel heap memory.
|
||||
*/
|
||||
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
|
||||
{
|
||||
struct cont_desc desc = { };
|
||||
struct microcode_amd *mc;
|
||||
struct cpio_data cp = { };
|
||||
u32 dummy;
|
||||
char buf[4];
|
||||
u32 rev;
|
||||
|
||||
if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
|
||||
if (!strncmp(buf, "off", 3)) {
|
||||
sha_check = false;
|
||||
pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
|
||||
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
||||
}
|
||||
}
|
||||
|
||||
bsp_cpuid_1_eax = cpuid_1_eax;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
|
||||
rev = get_patch_level();
|
||||
ed->old_rev = rev;
|
||||
|
||||
/* Needed in load_microcode_amd() */
|
||||
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
|
||||
@ -598,37 +698,23 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
|
||||
if (!find_blobs_in_containers(&cp))
|
||||
return;
|
||||
|
||||
if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
|
||||
}
|
||||
|
||||
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
|
||||
|
||||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
unsigned int cpuid_1_eax = native_cpuid_eax(1);
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
struct cont_desc desc = { 0 };
|
||||
enum ucode_state ret;
|
||||
struct cpio_data cp;
|
||||
|
||||
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
||||
return 0;
|
||||
|
||||
if (!find_blobs_in_containers(&cp))
|
||||
return -EINVAL;
|
||||
|
||||
scan_containers(cp.data, cp.size, &desc);
|
||||
if (!desc.mc)
|
||||
return -EINVAL;
|
||||
|
||||
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
if (ret > UCODE_UPDATED)
|
||||
return -EINVAL;
|
||||
mc = desc.mc;
|
||||
if (!mc)
|
||||
return;
|
||||
|
||||
return 0;
|
||||
/*
|
||||
* Allow application of the same revision to pick up SMT-specific
|
||||
* changes even if the revision of the other SMT thread is already
|
||||
* up-to-date.
|
||||
*/
|
||||
if (ed->old_rev > mc->hdr.patch_id)
|
||||
return;
|
||||
|
||||
if (__apply_microcode_amd(mc, &rev, desc.psize))
|
||||
ed->new_rev = rev;
|
||||
}
|
||||
early_initcall(save_microcode_in_initrd);
|
||||
|
||||
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
|
||||
struct ucode_patch *n,
|
||||
@ -729,14 +815,9 @@ static void free_cache(void)
|
||||
static struct ucode_patch *find_patch(unsigned int cpu)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
u32 rev, dummy __always_unused;
|
||||
u16 equiv_id = 0;
|
||||
|
||||
/* fetch rev if not populated yet: */
|
||||
if (!uci->cpu_sig.rev) {
|
||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
uci->cpu_sig.rev = rev;
|
||||
}
|
||||
uci->cpu_sig.rev = get_patch_level();
|
||||
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
|
||||
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
|
||||
@ -759,22 +840,20 @@ void reload_ucode_amd(unsigned int cpu)
|
||||
|
||||
mc = p->data;
|
||||
|
||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
rev = get_patch_level();
|
||||
if (rev < mc->hdr.patch_id) {
|
||||
if (__apply_microcode_amd(mc, p->size))
|
||||
pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
|
||||
if (__apply_microcode_amd(mc, &rev, p->size))
|
||||
pr_info_once("reload revision: 0x%08x\n", rev);
|
||||
}
|
||||
}
|
||||
|
||||
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
struct ucode_patch *p;
|
||||
|
||||
csig->sig = cpuid_eax(0x00000001);
|
||||
csig->rev = c->microcode;
|
||||
csig->rev = get_patch_level();
|
||||
|
||||
/*
|
||||
* a patch could have been loaded early, set uci->mc so that
|
||||
@ -815,7 +894,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!__apply_microcode_amd(mc_amd, p->size)) {
|
||||
if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
|
||||
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
|
||||
cpu, mc_amd->hdr.patch_id);
|
||||
return UCODE_ERROR;
|
||||
@ -937,8 +1016,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
||||
}
|
||||
|
||||
/* Scan the blob in @data and add microcode patches to the cache. */
|
||||
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
||||
size_t size)
|
||||
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
u8 *fw = (u8 *)data;
|
||||
size_t offset;
|
||||
@ -1013,6 +1091,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
unsigned int cpuid_1_eax = native_cpuid_eax(1);
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
struct cont_desc desc = { 0 };
|
||||
enum ucode_state ret;
|
||||
struct cpio_data cp;
|
||||
|
||||
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
||||
return 0;
|
||||
|
||||
if (!find_blobs_in_containers(&cp))
|
||||
return -EINVAL;
|
||||
|
||||
scan_containers(cp.data, cp.size, &desc);
|
||||
if (!desc.mc)
|
||||
return -EINVAL;
|
||||
|
||||
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
if (ret > UCODE_UPDATED)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(save_microcode_in_initrd);
|
||||
|
||||
/*
|
||||
* AMD microcode firmware naming convention, up to family 15h they are in
|
||||
* the legacy file:
|
||||
|
444
arch/x86/kernel/cpu/microcode/amd_shas.c
Normal file
444
arch/x86/kernel/cpu/microcode/amd_shas.c
Normal file
@ -0,0 +1,444 @@
|
||||
/* Keep 'em sorted. */
|
||||
static const struct patch_digest phashes[] = {
|
||||
{ 0x8001227, {
|
||||
0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
|
||||
0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
|
||||
0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
|
||||
0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
|
||||
}
|
||||
},
|
||||
{ 0x8001250, {
|
||||
0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
|
||||
0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
|
||||
0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
|
||||
0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
|
||||
}
|
||||
},
|
||||
{ 0x800126e, {
|
||||
0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
|
||||
0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
|
||||
0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
|
||||
0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
|
||||
}
|
||||
},
|
||||
{ 0x800126f, {
|
||||
0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
|
||||
0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
|
||||
0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
|
||||
0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
|
||||
}
|
||||
},
|
||||
{ 0x800820d, {
|
||||
0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
|
||||
0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
|
||||
0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
|
||||
0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
|
||||
}
|
||||
},
|
||||
{ 0x8301025, {
|
||||
0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
|
||||
0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
|
||||
0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
|
||||
0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
|
||||
}
|
||||
},
|
||||
{ 0x8301055, {
|
||||
0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
|
||||
0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
|
||||
0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
|
||||
0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
|
||||
}
|
||||
},
|
||||
{ 0x8301072, {
|
||||
0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
|
||||
0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
|
||||
0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
|
||||
0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
|
||||
}
|
||||
},
|
||||
{ 0x830107a, {
|
||||
0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
|
||||
0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
|
||||
0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
|
||||
0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
|
||||
}
|
||||
},
|
||||
{ 0x830107b, {
|
||||
0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
|
||||
0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
|
||||
0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
|
||||
0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
|
||||
}
|
||||
},
|
||||
{ 0x830107c, {
|
||||
0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
|
||||
0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
|
||||
0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
|
||||
0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
|
||||
}
|
||||
},
|
||||
{ 0x860010d, {
|
||||
0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
|
||||
0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
|
||||
0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
|
||||
0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
|
||||
}
|
||||
},
|
||||
{ 0x8608108, {
|
||||
0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
|
||||
0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
|
||||
0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
|
||||
0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
|
||||
}
|
||||
},
|
||||
{ 0x8701034, {
|
||||
0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
|
||||
0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
|
||||
0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
|
||||
0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
|
||||
}
|
||||
},
|
||||
{ 0x8a00008, {
|
||||
0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
|
||||
0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
|
||||
0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
|
||||
0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
|
||||
}
|
||||
},
|
||||
{ 0x8a0000a, {
|
||||
0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
|
||||
0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
|
||||
0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
|
||||
0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
|
||||
}
|
||||
},
|
||||
{ 0xa00104c, {
|
||||
0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
|
||||
0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
|
||||
0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
|
||||
0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
|
||||
}
|
||||
},
|
||||
{ 0xa00104e, {
|
||||
0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
|
||||
0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
|
||||
0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
|
||||
0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
|
||||
}
|
||||
},
|
||||
{ 0xa001053, {
|
||||
0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
|
||||
0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
|
||||
0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
|
||||
0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
|
||||
}
|
||||
},
|
||||
{ 0xa001058, {
|
||||
0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
|
||||
0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
|
||||
0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
|
||||
0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
|
||||
}
|
||||
},
|
||||
{ 0xa001075, {
|
||||
0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
|
||||
0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
|
||||
0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
|
||||
0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
|
||||
}
|
||||
},
|
||||
{ 0xa001078, {
|
||||
0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
|
||||
0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
|
||||
0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
|
||||
0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
|
||||
}
|
||||
},
|
||||
{ 0xa001079, {
|
||||
0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
|
||||
0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
|
||||
0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
|
||||
0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
|
||||
}
|
||||
},
|
||||
{ 0xa00107a, {
|
||||
0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
|
||||
0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
|
||||
0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
|
||||
0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
|
||||
}
|
||||
},
|
||||
{ 0xa001143, {
|
||||
0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
|
||||
0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
|
||||
0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
|
||||
0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
|
||||
}
|
||||
},
|
||||
{ 0xa001144, {
|
||||
0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
|
||||
0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
|
||||
0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
|
||||
0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
|
||||
}
|
||||
},
|
||||
{ 0xa00115d, {
|
||||
0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
|
||||
0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
|
||||
0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
|
||||
0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
|
||||
}
|
||||
},
|
||||
{ 0xa001173, {
|
||||
0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
|
||||
0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
|
||||
0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
|
||||
0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
|
||||
}
|
||||
},
|
||||
{ 0xa0011a8, {
|
||||
0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
|
||||
0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
|
||||
0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
|
||||
0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
|
||||
}
|
||||
},
|
||||
{ 0xa0011ce, {
|
||||
0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
|
||||
0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
|
||||
0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
|
||||
0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
|
||||
}
|
||||
},
|
||||
{ 0xa0011d1, {
|
||||
0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
|
||||
0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
|
||||
0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
|
||||
0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
|
||||
}
|
||||
},
|
||||
{ 0xa0011d3, {
|
||||
0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
|
||||
0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
|
||||
0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
|
||||
0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
|
||||
}
|
||||
},
|
||||
{ 0xa0011d5, {
|
||||
0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
|
||||
0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
|
||||
0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
|
||||
0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
|
||||
}
|
||||
},
|
||||
{ 0xa001223, {
|
||||
0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
|
||||
0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
|
||||
0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
|
||||
0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
|
||||
}
|
||||
},
|
||||
{ 0xa001224, {
|
||||
0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
|
||||
0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
|
||||
0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
|
||||
0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
|
||||
}
|
||||
},
|
||||
{ 0xa001227, {
|
||||
0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
|
||||
0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
|
||||
0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
|
||||
0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
|
||||
}
|
||||
},
|
||||
{ 0xa001229, {
|
||||
0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
|
||||
0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
|
||||
0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
|
||||
0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
|
||||
}
|
||||
},
|
||||
{ 0xa00122e, {
|
||||
0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
|
||||
0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
|
||||
0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
|
||||
0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
|
||||
}
|
||||
},
|
||||
{ 0xa001231, {
|
||||
0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
|
||||
0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
|
||||
0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
|
||||
0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
|
||||
}
|
||||
},
|
||||
{ 0xa001234, {
|
||||
0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
|
||||
0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
|
||||
0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
|
||||
0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
|
||||
}
|
||||
},
|
||||
{ 0xa001236, {
|
||||
0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
|
||||
0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
|
||||
0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
|
||||
0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
|
||||
}
|
||||
},
|
||||
{ 0xa001238, {
|
||||
0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
|
||||
0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
|
||||
0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
|
||||
0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
|
||||
}
|
||||
},
|
||||
{ 0xa00820c, {
|
||||
0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
|
||||
0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
|
||||
0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
|
||||
0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
|
||||
}
|
||||
},
|
||||
{ 0xa10113e, {
|
||||
0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
|
||||
0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
|
||||
0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
|
||||
0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
|
||||
}
|
||||
},
|
||||
{ 0xa101144, {
|
||||
0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
|
||||
0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
|
||||
0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
|
||||
0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
|
||||
}
|
||||
},
|
||||
{ 0xa101148, {
|
||||
0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
|
||||
0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
|
||||
0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
|
||||
0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
|
||||
}
|
||||
},
|
||||
{ 0xa10123e, {
|
||||
0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
|
||||
0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
|
||||
0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
|
||||
0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
|
||||
}
|
||||
},
|
||||
{ 0xa101244, {
|
||||
0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
|
||||
0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
|
||||
0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
|
||||
0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
|
||||
}
|
||||
},
|
||||
{ 0xa101248, {
|
||||
0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
|
||||
0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
|
||||
0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
|
||||
0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
|
||||
}
|
||||
},
|
||||
{ 0xa108108, {
|
||||
0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
|
||||
0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
|
||||
0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
|
||||
0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
|
||||
}
|
||||
},
|
||||
{ 0xa20102d, {
|
||||
0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
|
||||
0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
|
||||
0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
|
||||
0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
|
||||
}
|
||||
},
|
||||
{ 0xa201210, {
|
||||
0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
|
||||
0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
|
||||
0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
|
||||
0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
|
||||
}
|
||||
},
|
||||
{ 0xa404107, {
|
||||
0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
|
||||
0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
|
||||
0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
|
||||
0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
|
||||
}
|
||||
},
|
||||
{ 0xa500011, {
|
||||
0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
|
||||
0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
|
||||
0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
|
||||
0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
|
||||
}
|
||||
},
|
||||
{ 0xa601209, {
|
||||
0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
|
||||
0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
|
||||
0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
|
||||
0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
|
||||
}
|
||||
},
|
||||
{ 0xa704107, {
|
||||
0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
|
||||
0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
|
||||
0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
|
||||
0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
|
||||
}
|
||||
},
|
||||
{ 0xa705206, {
|
||||
0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
|
||||
0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
|
||||
0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
|
||||
0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
|
||||
}
|
||||
},
|
||||
{ 0xa708007, {
|
||||
0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
|
||||
0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
|
||||
0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
|
||||
0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
|
||||
}
|
||||
},
|
||||
{ 0xa70c005, {
|
||||
0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
|
||||
0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
|
||||
0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
|
||||
0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
|
||||
}
|
||||
},
|
||||
{ 0xaa00116, {
|
||||
0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
|
||||
0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
|
||||
0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
|
||||
0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
|
||||
}
|
||||
},
|
||||
{ 0xaa00212, {
|
||||
0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
|
||||
0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
|
||||
0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
|
||||
0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
|
||||
}
|
||||
},
|
||||
{ 0xaa00213, {
|
||||
0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
|
||||
0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
|
||||
0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
|
||||
0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
|
||||
}
|
||||
},
|
||||
{ 0xaa00215, {
|
||||
0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
|
||||
0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
|
||||
0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
|
||||
0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
|
||||
}
|
||||
},
|
||||
};
|
@ -100,14 +100,12 @@ extern bool force_minrev;
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
|
||||
void load_ucode_amd_ap(unsigned int family);
|
||||
int save_microcode_in_initrd_amd(unsigned int family);
|
||||
void reload_ucode_amd(unsigned int cpu);
|
||||
struct microcode_ops *init_amd_microcode(void);
|
||||
void exit_amd_microcode(void);
|
||||
#else /* CONFIG_CPU_SUP_AMD */
|
||||
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
|
||||
static inline void load_ucode_amd_ap(unsigned int family) { }
|
||||
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
||||
static inline void reload_ucode_amd(unsigned int cpu) { }
|
||||
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
|
||||
static inline void exit_amd_microcode(void) { }
|
||||
|
@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
|
||||
struct file *backing;
|
||||
long ret;
|
||||
|
||||
/*
|
||||
* ECREATE would detect this too, but checking here also ensures
|
||||
* that the 'encl_size' calculations below can never overflow.
|
||||
*/
|
||||
if (!is_power_of_2(secs->size))
|
||||
return -EINVAL;
|
||||
|
||||
va_page = sgx_encl_grow(encl, true);
|
||||
if (IS_ERR(va_page))
|
||||
return PTR_ERR(va_page);
|
||||
|
@ -2,6 +2,7 @@
|
||||
/*
|
||||
* Architecture specific OF callbacks.
|
||||
*/
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void)
|
||||
if (initial_dtb)
|
||||
early_memunmap(dt, map_len);
|
||||
#endif
|
||||
if (of_have_populated_dt())
|
||||
if (acpi_disabled && of_have_populated_dt())
|
||||
x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
|
||||
}
|
||||
|
@ -25,8 +25,10 @@
|
||||
#include <asm/posted_intr.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <asm/trace/irq_vectors.h>
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||
|
@ -1763,7 +1763,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
|
||||
entry->ecx = entry->edx = 0;
|
||||
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
|
||||
entry->eax = entry->ebx;
|
||||
entry->eax = entry->ebx = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void kvm_mmu_start_lpage_recovery(struct once *once)
|
||||
static int kvm_mmu_start_lpage_recovery(struct once *once)
|
||||
{
|
||||
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
|
||||
struct kvm *kvm = container_of(ka, struct kvm, arch);
|
||||
@ -7471,13 +7471,14 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
|
||||
kvm_nx_huge_page_recovery_worker_kill,
|
||||
kvm, "kvm-nx-lpage-recovery");
|
||||
|
||||
if (!nx_thread)
|
||||
return;
|
||||
if (IS_ERR(nx_thread))
|
||||
return PTR_ERR(nx_thread);
|
||||
|
||||
vhost_task_start(nx_thread);
|
||||
|
||||
/* Make the task visible only once it is fully started. */
|
||||
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_mmu_post_init_vm(struct kvm *kvm)
|
||||
@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
|
||||
if (nx_hugepage_mitigation_hard_disabled)
|
||||
return 0;
|
||||
|
||||
call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
|
||||
if (!kvm->arch.nx_huge_page_recovery_thread)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
|
||||
}
|
||||
|
||||
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
|
||||
|
@ -4590,6 +4590,8 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm)
|
||||
|
||||
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
|
||||
{
|
||||
struct kvm *kvm = svm->vcpu.kvm;
|
||||
|
||||
/*
|
||||
* All host state for SEV-ES guests is categorized into three swap types
|
||||
* based on how it is handled by hardware during a world switch:
|
||||
@ -4613,14 +4615,22 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
|
||||
|
||||
/*
|
||||
* If DebugSwap is enabled, debug registers are loaded but NOT saved by
|
||||
* the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
|
||||
* saves and loads debug registers (Type-A).
|
||||
* the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does
|
||||
* not save or load debug registers. Sadly, KVM can't prevent SNP
|
||||
* guests from lying about DebugSwap on secondary vCPUs, i.e. the
|
||||
* SEV_FEATURES provided at "AP Create" isn't guaranteed to match what
|
||||
* the guest has actually enabled (or not!) in the VMSA.
|
||||
*
|
||||
* If DebugSwap is *possible*, save the masks so that they're restored
|
||||
* if the guest enables DebugSwap. But for the DRs themselves, do NOT
|
||||
* rely on the CPU to restore the host values; KVM will restore them as
|
||||
* needed in common code, via hw_breakpoint_restore(). Note, KVM does
|
||||
* NOT support virtualizing Breakpoint Extensions, i.e. the mask MSRs
|
||||
* don't need to be restored per se, KVM just needs to ensure they are
|
||||
* loaded with the correct values *if* the CPU writes the MSRs.
|
||||
*/
|
||||
if (sev_vcpu_has_debug_swap(svm)) {
|
||||
hostsa->dr0 = native_get_debugreg(0);
|
||||
hostsa->dr1 = native_get_debugreg(1);
|
||||
hostsa->dr2 = native_get_debugreg(2);
|
||||
hostsa->dr3 = native_get_debugreg(3);
|
||||
if (sev_vcpu_has_debug_swap(svm) ||
|
||||
(sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) {
|
||||
hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
|
||||
hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
|
||||
hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
|
||||
|
@ -3165,6 +3165,27 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* AMD changed the architectural behavior of bits 5:2. On CPUs
|
||||
* without BusLockTrap, bits 5:2 control "external pins", but
|
||||
* on CPUs that support BusLockDetect, bit 2 enables BusLockTrap
|
||||
* and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed
|
||||
* the guest to set bits 5:2 despite not actually virtualizing
|
||||
* Performance-Monitoring/Breakpoint external pins. Drop bits
|
||||
* 5:2 for backwards compatibility.
|
||||
*/
|
||||
data &= ~GENMASK(5, 2);
|
||||
|
||||
/*
|
||||
* Suppress BTF as KVM doesn't virtualize BTF, but there's no
|
||||
* way to communicate lack of support to the guest.
|
||||
*/
|
||||
if (data & DEBUGCTLMSR_BTF) {
|
||||
kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
|
||||
data &= ~DEBUGCTLMSR_BTF;
|
||||
}
|
||||
|
||||
if (data & DEBUGCTL_RESERVED_BITS)
|
||||
return 1;
|
||||
|
||||
@ -4189,6 +4210,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
|
||||
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
/*
|
||||
* Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
|
||||
* VMRUN controls whether or not physical IRQs are masked (KVM always
|
||||
* runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the
|
||||
* temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
|
||||
* into guest state if delivery of an event during VMRUN triggers a
|
||||
* #VMEXIT, and the guest_state transitions already tell lockdep that
|
||||
* IRQs are being enabled/disabled. Note! GIF=0 for the entirety of
|
||||
* this path, so IRQs aren't actually unmasked while running host code.
|
||||
*/
|
||||
raw_local_irq_enable();
|
||||
|
||||
amd_clear_divider();
|
||||
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
@ -4197,6 +4230,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
|
||||
else
|
||||
__svm_vcpu_run(svm, spec_ctrl_intercepted);
|
||||
|
||||
raw_local_irq_disable();
|
||||
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
@ -4253,6 +4288,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
|
||||
clgi();
|
||||
kvm_load_guest_xsave_state(vcpu);
|
||||
|
||||
/*
|
||||
* Hardware only context switches DEBUGCTL if LBR virtualization is
|
||||
* enabled. Manually load DEBUGCTL if necessary (and restore it after
|
||||
* VM-Exit), as running with the host's DEBUGCTL can negatively affect
|
||||
* guest state and can even be fatal, e.g. due to Bus Lock Detect.
|
||||
*/
|
||||
if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
|
||||
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
|
||||
update_debugctlmsr(svm->vmcb->save.dbgctl);
|
||||
|
||||
kvm_wait_lapic_expire(vcpu);
|
||||
|
||||
/*
|
||||
@ -4280,6 +4325,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
|
||||
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
|
||||
kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
|
||||
|
||||
if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
|
||||
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
|
||||
update_debugctlmsr(vcpu->arch.host_debugctl);
|
||||
|
||||
kvm_load_host_xsave_state(vcpu);
|
||||
stgi();
|
||||
|
||||
|
@ -584,7 +584,7 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
|
||||
/* svm.c */
|
||||
#define MSR_INVALID 0xffffffffU
|
||||
|
||||
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
|
||||
#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
|
||||
|
||||
extern bool dump_invalid_vmcb;
|
||||
|
||||
|
@ -170,12 +170,8 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
|
||||
3: vmrun %_ASM_AX
|
||||
4:
|
||||
cli
|
||||
|
||||
/* Pop @svm to RAX while it's the only available register. */
|
||||
pop %_ASM_AX
|
||||
|
||||
@ -340,12 +336,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
mov KVM_VMCB_pa(%rax), %rax
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
|
||||
1: vmrun %rax
|
||||
|
||||
2: cli
|
||||
|
||||
2:
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
|
||||
|
||||
|
@ -5084,6 +5084,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
|
||||
|
||||
load_vmcs12_host_state(vcpu, vmcs12);
|
||||
|
||||
/*
|
||||
* Process events if an injectable IRQ or NMI is pending, even
|
||||
* if the event is blocked (RFLAGS.IF is cleared on VM-Exit).
|
||||
* If an event became pending while L2 was active, KVM needs to
|
||||
* either inject the event or request an IRQ/NMI window. SMIs
|
||||
* don't need to be processed as SMM is mutually exclusive with
|
||||
* non-root mode. INIT/SIPI don't need to be checked as INIT
|
||||
* is blocked post-VMXON, and SIPIs are ignored.
|
||||
*/
|
||||
if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending)
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1514,16 +1514,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
|
||||
*/
|
||||
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
|
||||
shrink_ple_window(vcpu);
|
||||
|
||||
vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
|
||||
|
||||
vmx_vcpu_pi_load(vcpu, cpu);
|
||||
|
||||
vmx->host_debugctlmsr = get_debugctlmsr();
|
||||
}
|
||||
|
||||
void vmx_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
@ -7458,8 +7454,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
|
||||
}
|
||||
|
||||
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
|
||||
if (vmx->host_debugctlmsr)
|
||||
update_debugctlmsr(vmx->host_debugctlmsr);
|
||||
if (vcpu->arch.host_debugctl)
|
||||
update_debugctlmsr(vcpu->arch.host_debugctl);
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
/*
|
||||
|
@ -340,8 +340,6 @@ struct vcpu_vmx {
|
||||
/* apic deadline value in host tsc */
|
||||
u64 hv_deadline_tsc;
|
||||
|
||||
unsigned long host_debugctlmsr;
|
||||
|
||||
/*
|
||||
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
|
||||
* msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
|
||||
|
@ -10968,6 +10968,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
set_debugreg(0, 7);
|
||||
}
|
||||
|
||||
vcpu->arch.host_debugctl = get_debugctlmsr();
|
||||
|
||||
guest_timing_enter_irqoff();
|
||||
|
||||
for (;;) {
|
||||
@ -12877,11 +12879,11 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
kvm_unload_vcpu_mmus(kvm);
|
||||
kvm_destroy_vcpus(kvm);
|
||||
kvm_x86_call(vm_destroy)(kvm);
|
||||
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
|
||||
kvm_pic_destroy(kvm);
|
||||
kvm_ioapic_destroy(kvm);
|
||||
kvm_destroy_vcpus(kvm);
|
||||
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
|
||||
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
|
||||
kvm_mmu_uninit_vm(kvm);
|
||||
|
@ -77,7 +77,7 @@ struct bio_slab {
|
||||
struct kmem_cache *slab;
|
||||
unsigned int slab_ref;
|
||||
unsigned int slab_size;
|
||||
char name[8];
|
||||
char name[12];
|
||||
};
|
||||
static DEFINE_MUTEX(bio_slab_lock);
|
||||
static DEFINE_XARRAY(bio_slabs);
|
||||
|
@ -329,7 +329,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
|
||||
|
||||
if (nsegs < lim->max_segments &&
|
||||
bytes + bv.bv_len <= max_bytes &&
|
||||
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
|
||||
bv.bv_offset + bv.bv_len <= lim->min_segment_size) {
|
||||
nsegs++;
|
||||
bytes += bv.bv_len;
|
||||
} else {
|
||||
|
@ -246,6 +246,7 @@ int blk_validate_limits(struct queue_limits *lim)
|
||||
{
|
||||
unsigned int max_hw_sectors;
|
||||
unsigned int logical_block_sectors;
|
||||
unsigned long seg_size;
|
||||
int err;
|
||||
|
||||
/*
|
||||
@ -303,7 +304,7 @@ int blk_validate_limits(struct queue_limits *lim)
|
||||
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
|
||||
lim->max_dev_sectors);
|
||||
if (lim->max_user_sectors) {
|
||||
if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
|
||||
if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
|
||||
return -EINVAL;
|
||||
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
|
||||
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
|
||||
@ -341,7 +342,7 @@ int blk_validate_limits(struct queue_limits *lim)
|
||||
*/
|
||||
if (!lim->seg_boundary_mask)
|
||||
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||
if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
|
||||
if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
@ -362,10 +363,17 @@ int blk_validate_limits(struct queue_limits *lim)
|
||||
*/
|
||||
if (!lim->max_segment_size)
|
||||
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||
if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
|
||||
if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* setup min segment size for building new segment in fast path */
|
||||
if (lim->seg_boundary_mask > lim->max_segment_size - 1)
|
||||
seg_size = lim->max_segment_size;
|
||||
else
|
||||
seg_size = lim->seg_boundary_mask + 1;
|
||||
lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* We require drivers to at least do logical block aligned I/O, but
|
||||
* historically could not check for that due to the separate calls
|
||||
|
@ -410,13 +410,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk,
|
||||
}
|
||||
}
|
||||
hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
|
||||
atomic_inc(&disk->nr_zone_wplugs);
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
|
||||
sector_t sector)
|
||||
static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
|
||||
sector_t sector)
|
||||
{
|
||||
unsigned int zno = disk_zone_no(disk, sector);
|
||||
unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
|
||||
@ -437,6 +438,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
|
||||
sector_t sector)
|
||||
{
|
||||
if (!atomic_read(&disk->nr_zone_wplugs))
|
||||
return NULL;
|
||||
|
||||
return disk_get_hashed_zone_wplug(disk, sector);
|
||||
}
|
||||
|
||||
static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
|
||||
{
|
||||
struct blk_zone_wplug *zwplug =
|
||||
@ -503,6 +513,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk,
|
||||
zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
|
||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
||||
hlist_del_init_rcu(&zwplug->node);
|
||||
atomic_dec(&disk->nr_zone_wplugs);
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
@ -593,6 +604,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
if (bio_list_empty(&zwplug->bio_list))
|
||||
return;
|
||||
|
||||
pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
|
||||
zwplug->disk->disk_name, zwplug->zone_no);
|
||||
while ((bio = bio_list_pop(&zwplug->bio_list)))
|
||||
blk_zone_wplug_bio_io_error(zwplug, bio);
|
||||
}
|
||||
@ -1040,6 +1056,47 @@ plug:
|
||||
return true;
|
||||
}
|
||||
|
||||
static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
|
||||
{
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
struct blk_zone_wplug *zwplug;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* We have native support for zone append operations, so we are not
|
||||
* going to handle @bio through plugging. However, we may already have a
|
||||
* zone write plug for the target zone if that zone was previously
|
||||
* partially written using regular writes. In such case, we risk leaving
|
||||
* the plug in the disk hash table if the zone is fully written using
|
||||
* zone append operations. Avoid this by removing the zone write plug.
|
||||
*/
|
||||
zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
|
||||
if (likely(!zwplug))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
|
||||
/*
|
||||
* We are about to remove the zone write plug. But if the user
|
||||
* (mistakenly) has issued regular writes together with native zone
|
||||
* append, we must aborts the writes as otherwise the plugged BIOs would
|
||||
* not be executed by the plug BIO work as disk_get_zone_wplug() will
|
||||
* return NULL after the plug is removed. Aborting the plugged write
|
||||
* BIOs is consistent with the fact that these writes will most likely
|
||||
* fail anyway as there is no ordering guarantees between zone append
|
||||
* operations and regular write operations.
|
||||
*/
|
||||
if (!bio_list_empty(&zwplug->bio_list)) {
|
||||
pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
|
||||
disk->disk_name, zwplug->zone_no);
|
||||
disk_zone_wplug_abort(zwplug);
|
||||
}
|
||||
disk_remove_zone_wplug(disk, zwplug);
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
|
||||
* @bio: The BIO being submitted
|
||||
@ -1096,8 +1153,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
|
||||
*/
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_ZONE_APPEND:
|
||||
if (!bdev_emulates_zone_append(bdev))
|
||||
if (!bdev_emulates_zone_append(bdev)) {
|
||||
blk_zone_wplug_handle_native_zone_append(bio);
|
||||
return false;
|
||||
}
|
||||
fallthrough;
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
@ -1284,6 +1343,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
atomic_set(&disk->nr_zone_wplugs, 0);
|
||||
disk->zone_wplugs_hash_bits =
|
||||
min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
|
||||
|
||||
@ -1338,6 +1398,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
|
||||
kfree(disk->zone_wplugs_hash);
|
||||
disk->zone_wplugs_hash = NULL;
|
||||
disk->zone_wplugs_hash_bits = 0;
|
||||
@ -1550,11 +1611,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to track the write pointer of all zones that are not
|
||||
* empty nor full. So make sure we have a zone write plug for
|
||||
* such zone if the device has a zone write plug hash table.
|
||||
* If the device needs zone append emulation, we need to track the
|
||||
* write pointer of all zones that are not empty nor full. So make sure
|
||||
* we have a zone write plug for such zone if the device has a zone
|
||||
* write plug hash table.
|
||||
*/
|
||||
if (!disk->zone_wplugs_hash)
|
||||
if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
|
||||
return 0;
|
||||
|
||||
disk_zone_wplug_sync_wp_offset(disk, zone);
|
||||
|
@ -14,6 +14,7 @@
|
||||
struct elevator_type;
|
||||
|
||||
#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
|
||||
#define BLK_MIN_SEGMENT_SIZE 4096
|
||||
|
||||
/* Max future timer expiry for timeouts */
|
||||
#define BLK_MAX_TIMEOUT (5 * HZ)
|
||||
@ -358,8 +359,12 @@ struct bio *bio_split_zone_append(struct bio *bio,
|
||||
static inline bool bio_may_need_split(struct bio *bio,
|
||||
const struct queue_limits *lim)
|
||||
{
|
||||
return lim->chunk_sectors || bio->bi_vcnt != 1 ||
|
||||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
|
||||
if (lim->chunk_sectors)
|
||||
return true;
|
||||
if (bio->bi_vcnt != 1)
|
||||
return true;
|
||||
return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
|
||||
lim->min_segment_size;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
|
||||
out[size] = 0;
|
||||
|
||||
while (i < size) {
|
||||
u8 c = le16_to_cpu(in[i]) & 0xff;
|
||||
u8 c = le16_to_cpu(in[i]) & 0x7f;
|
||||
|
||||
if (c && !isprint(c))
|
||||
c = '!';
|
||||
|
@ -21,9 +21,15 @@ struct platform_profile_handler {
|
||||
struct device dev;
|
||||
int minor;
|
||||
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
const struct platform_profile_ops *ops;
|
||||
};
|
||||
|
||||
struct aggregate_choices_data {
|
||||
unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
int count;
|
||||
};
|
||||
|
||||
static const char * const profile_names[] = {
|
||||
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
|
||||
[PLATFORM_PROFILE_COOL] = "cool",
|
||||
@ -73,7 +79,7 @@ static int _store_class_profile(struct device *dev, void *data)
|
||||
|
||||
lockdep_assert_held(&profile_lock);
|
||||
handler = to_pprof_handler(dev);
|
||||
if (!test_bit(*bit, handler->choices))
|
||||
if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return handler->ops->profile_set(dev, *bit);
|
||||
@ -239,21 +245,44 @@ static const struct class platform_profile_class = {
|
||||
/**
|
||||
* _aggregate_choices - Aggregate the available profile choices
|
||||
* @dev: The device
|
||||
* @data: The available profile choices
|
||||
* @arg: struct aggregate_choices_data
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
static int _aggregate_choices(struct device *dev, void *data)
|
||||
static int _aggregate_choices(struct device *dev, void *arg)
|
||||
{
|
||||
unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
struct aggregate_choices_data *data = arg;
|
||||
struct platform_profile_handler *handler;
|
||||
unsigned long *aggregate = data;
|
||||
|
||||
lockdep_assert_held(&profile_lock);
|
||||
handler = to_pprof_handler(dev);
|
||||
if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
|
||||
bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
|
||||
bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
|
||||
if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
|
||||
bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
|
||||
else
|
||||
bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
|
||||
bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
|
||||
data->count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* _remove_hidden_choices - Remove hidden choices from aggregate data
|
||||
* @dev: The device
|
||||
* @arg: struct aggregate_choices_data
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
static int _remove_hidden_choices(struct device *dev, void *arg)
|
||||
{
|
||||
struct aggregate_choices_data *data = arg;
|
||||
struct platform_profile_handler *handler;
|
||||
|
||||
lockdep_assert_held(&profile_lock);
|
||||
handler = to_pprof_handler(dev);
|
||||
bitmap_andnot(data->aggregate, handler->choices,
|
||||
handler->hidden_choices, PLATFORM_PROFILE_LAST);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -270,22 +299,31 @@ static ssize_t platform_profile_choices_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
struct aggregate_choices_data data = {
|
||||
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
|
||||
.count = 0,
|
||||
};
|
||||
int err;
|
||||
|
||||
set_bit(PLATFORM_PROFILE_LAST, aggregate);
|
||||
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
|
||||
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
|
||||
err = class_for_each_device(&platform_profile_class, NULL,
|
||||
aggregate, _aggregate_choices);
|
||||
&data, _aggregate_choices);
|
||||
if (err)
|
||||
return err;
|
||||
if (data.count == 1) {
|
||||
err = class_for_each_device(&platform_profile_class, NULL,
|
||||
&data, _remove_hidden_choices);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* no profile handler registered any more */
|
||||
if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
|
||||
if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
|
||||
return -EINVAL;
|
||||
|
||||
return _commmon_choices_show(aggregate, buf);
|
||||
return _commmon_choices_show(data.aggregate, buf);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -373,7 +411,10 @@ static ssize_t platform_profile_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
struct aggregate_choices_data data = {
|
||||
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
|
||||
.count = 0,
|
||||
};
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
@ -381,13 +422,13 @@ static ssize_t platform_profile_store(struct device *dev,
|
||||
i = sysfs_match_string(profile_names, buf);
|
||||
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
|
||||
return -EINVAL;
|
||||
set_bit(PLATFORM_PROFILE_LAST, choices);
|
||||
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
|
||||
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
|
||||
ret = class_for_each_device(&platform_profile_class, NULL,
|
||||
choices, _aggregate_choices);
|
||||
&data, _aggregate_choices);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!test_bit(i, choices))
|
||||
if (!test_bit(i, data.aggregate))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = class_for_each_device(&platform_profile_class, NULL, &i,
|
||||
@ -453,12 +494,15 @@ EXPORT_SYMBOL_GPL(platform_profile_notify);
|
||||
*/
|
||||
int platform_profile_cycle(void)
|
||||
{
|
||||
struct aggregate_choices_data data = {
|
||||
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
|
||||
.count = 0,
|
||||
};
|
||||
enum platform_profile_option next = PLATFORM_PROFILE_LAST;
|
||||
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
|
||||
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
int err;
|
||||
|
||||
set_bit(PLATFORM_PROFILE_LAST, choices);
|
||||
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
|
||||
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
|
||||
err = class_for_each_device(&platform_profile_class, NULL,
|
||||
&profile, _aggregate_profiles);
|
||||
@ -470,14 +514,14 @@ int platform_profile_cycle(void)
|
||||
return -EINVAL;
|
||||
|
||||
err = class_for_each_device(&platform_profile_class, NULL,
|
||||
choices, _aggregate_choices);
|
||||
&data, _aggregate_choices);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* never iterate into a custom if all drivers supported it */
|
||||
clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
|
||||
clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
|
||||
|
||||
next = find_next_bit_wrap(choices,
|
||||
next = find_next_bit_wrap(data.aggregate,
|
||||
PLATFORM_PROFILE_LAST,
|
||||
profile + 1);
|
||||
|
||||
@ -532,6 +576,14 @@ struct device *platform_profile_register(struct device *dev, const char *name,
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (ops->hidden_choices) {
|
||||
err = ops->hidden_choices(drvdata, pprof->hidden_choices);
|
||||
if (err) {
|
||||
dev_err(dev, "platform_profile hidden_choices failed\n");
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
|
||||
guard(mutex)(&profile_lock);
|
||||
|
||||
/* create class interface for individual handler */
|
||||
|
@ -274,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
|
||||
if (refcount_dec_and_test(&device->ref)) {
|
||||
hlist_del_init(&device->hlist);
|
||||
kfree(device->context.name);
|
||||
kfree(device);
|
||||
}
|
||||
|
@ -386,8 +386,12 @@ struct ahci_host_priv {
|
||||
static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
|
||||
unsigned int portid)
|
||||
{
|
||||
return portid >= hpriv->nports ||
|
||||
!(hpriv->mask_port_map & (1 << portid));
|
||||
if (portid >= hpriv->nports)
|
||||
return true;
|
||||
/* mask_port_map not set means that all ports are available */
|
||||
if (!hpriv->mask_port_map)
|
||||
return false;
|
||||
return !(hpriv->mask_port_map & (1 << portid));
|
||||
}
|
||||
|
||||
extern int ahci_ignore_sss;
|
||||
|
@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
|
||||
hpriv->saved_port_map = port_map;
|
||||
}
|
||||
|
||||
/* mask_port_map not set means that all ports are available */
|
||||
if (hpriv->mask_port_map) {
|
||||
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
|
||||
port_map,
|
||||
|
@ -4143,10 +4143,6 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
|
||||
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
|
||||
ATA_QUIRK_ZERO_AFTER_TRIM |
|
||||
ATA_QUIRK_NO_NCQ_ON_ATI },
|
||||
{ "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
|
||||
ATA_QUIRK_ZERO_AFTER_TRIM |
|
||||
ATA_QUIRK_NO_NCQ_ON_ATI |
|
||||
ATA_QUIRK_NOLPM },
|
||||
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
|
||||
ATA_QUIRK_ZERO_AFTER_TRIM |
|
||||
ATA_QUIRK_NO_NCQ_ON_ATI },
|
||||
|
@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
|
||||
out:
|
||||
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
|
||||
put_device(sup_dev);
|
||||
put_device(con_dev);
|
||||
put_device(par_dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
|
||||
if (ph.len > sizeof(struct ublk_params))
|
||||
ph.len = sizeof(struct ublk_params);
|
||||
|
||||
/* parameters can only be changed when device isn't live */
|
||||
mutex_lock(&ub->mutex);
|
||||
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
|
||||
if (test_bit(UB_STATE_USED, &ub->state)) {
|
||||
/*
|
||||
* Parameters can only be changed when device hasn't
|
||||
* been started yet
|
||||
*/
|
||||
ret = -EACCES;
|
||||
} else if (copy_from_user(&ub->params, argp, ph.len)) {
|
||||
ret = -EFAULT;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user