1
0
mirror of https://github.com/torvalds/linux.git synced 2025-04-12 16:47:42 +00:00

ASoC: Fixes for v6.14

The bulk of this is driver specific fixes, mostly unremarkable.  There's
 also one core fix from Charles, fixing up confusion around the limiting
 of maximum control values.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmfSEi8ACgkQJNaLcl1U
 h9ALVQf+OpOZuoTfAiu44d/l3WXEtn7l6rwihWbozUuNW7LwfnNSDYnBstOl/FDM
 6ry8oJbaj8t2We3joJsixrGbrGYiJKFcbZvFd6FyPQzF63s0tUAZk9iu182If3i7
 nrjFSch0hYDR3A/o5NfOufelRwmNUvOEXFgvLIJ+Hn1qDeJz5nyUyauCGSu/M0S0
 blF36uVjuiEc+2QpNLcSvBqh9aJ9Atg9X4GFhpSsq4Kp6eFtRmiERHYlWQEI0ppM
 becNQnEwn4n9RZp0bt5OYx/f7wE1BcjNlJyKXV5rS74MypSZCeZKe5N+F4IoiKKN
 o/HwNoh1rl/+e0n8JmqHUyRXM6Fp2g==
 =nXOh
 -----END PGP SIGNATURE-----

Merge tag 'asoc-fix-v6.14-rc6' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v6.14

The bulk of this is driver specific fixes, mostly unremarkable.  There's
also one core fix from Charles, fixing up confusion around the limiting
of maximum control values.
This commit is contained in:
Takashi Iwai 2025-03-13 07:33:48 +01:00
commit de69d56daa
1021 changed files with 11852 additions and 6422 deletions
.mailmapCREDITS
Documentation
MAINTAINERSMakefile
arch

@ -226,6 +226,7 @@ Fangrui Song <i@maskray.me> <maskray@google.com>
Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
Feng Tang <feng.79.tang@gmail.com> <feng.tang@intel.com>
Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org>
Filipe Lautert <filipe@icewall.org>
Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
@ -317,6 +318,8 @@ Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
Jean Tourrilhes <jt@hpl.hp.com>
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
Jeff Garzik <jgarzik@pretzel.yyz.us>
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <jjohnson@codeaurora.org>
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <quic_jjohnson@quicinc.com>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
@ -376,6 +379,7 @@ Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
Iskren Chernev <me@iskren.info> <iskren.chernev@gmail.com>
Kalle Valo <kvalo@kernel.org> <kvalo@codeaurora.org>
Kalle Valo <kvalo@kernel.org> <quic_kvalo@quicinc.com>
Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Karthikeyan Periyasamy <quic_periyasa@quicinc.com> <periyasa@codeaurora.org>
Kathiravan T <quic_kathirav@quicinc.com> <kathirav@codeaurora.org>
@ -518,6 +522,7 @@ Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
Natalie Vock <natalie.vock@gmx.de> <friedrich.vock@gmx.de>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
@ -530,6 +535,7 @@ Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
Nick Desaulniers <nick.desaulniers+lkml@gmail.com> <ndesaulniers@google.com>
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
@ -608,6 +614,8 @@ Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>
Rodrigo Siqueira <siqueira@igalia.com> <Rodrigo.Siqueira@amd.com>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>

@ -2515,11 +2515,9 @@ D: SLS distribution
D: Initial implementation of VC's, pty's and select()
N: Pavel Machek
E: pavel@ucw.cz
E: pavel@kernel.org
P: 4096R/92DFCE96 4FA7 9EEF FCD4 C44F C585 B8C7 C060 2241 92DF CE96
D: Softcursor for vga, hypertech cdrom support, vcsa bugfix, nbd,
D: sun4/330 port, capabilities for elf, speedup for rm on ext2, USB,
D: work on suspend-to-ram/disk, killing duplicates from ioctl32,
D: NBD, Sun4/330 port, USB, work on suspend-to-ram/disk,
D: Altera SoCFPGA and Nokia N900 support.
S: Czech Republic

@ -37,7 +37,7 @@ intended to be exhaustive.
shadow stacks rather than GCS.
* Support for GCS is reported to userspace via HWCAP_GCS in the aux vector
AT_HWCAP2 entry.
AT_HWCAP entry.
* GCS is enabled per thread. While there is support for disabling GCS
at runtime this should be done with great care.

@ -18,6 +18,7 @@ Introduction
both access system memory directly and with the same effective
addresses.
**This driver is deprecated and will be removed in a future release.**
Hardware overview
=================
@ -453,7 +454,7 @@ Sysfs Class
A cxl sysfs class is added under /sys/class/cxl to facilitate
enumeration and tuning of the accelerators. Its layout is
described in Documentation/ABI/testing/sysfs-class-cxl
described in Documentation/ABI/obsolete/sysfs-class-cxl
Udev rules

@ -25,7 +25,7 @@ to cache translations for virtual addresses. The IOMMU driver uses the
mmu_notifier() support to keep the device TLB cache and the CPU cache in
sync. When an ATS lookup fails for a virtual address, the device should
use the PRI in order to request the virtual address to be paged into the
CPU page tables. The device must use ATS again in order the fetch the
CPU page tables. The device must use ATS again in order to fetch the
translation before use.
Shared Hardware Workqueues
@ -216,7 +216,7 @@ submitting work and processing completions.
Single Root I/O Virtualization (SR-IOV) focuses on providing independent
hardware interfaces for virtualizing hardware. Hence, it's required to be
almost fully functional interface to software supporting the traditional
an almost fully functional interface to software supporting the traditional
BARs, space for interrupts via MSI-X, its own register layout.
Virtual Functions (VFs) are assisted by the Physical Function (PF)
driver.

@ -53,11 +53,17 @@ properties:
reg:
maxItems: 1
power-controller:
type: object
reboot-mode:
type: object
required:
- compatible
- reg
additionalProperties: true
additionalProperties: false
examples:
- |

@ -8,6 +8,7 @@ title: Qualcomm Graphics Clock & Reset Controller
maintainers:
- Taniya Das <quic_tdas@quicinc.com>
- Imran Shaik <quic_imrashai@quicinc.com>
description: |
Qualcomm graphics clock control module provides the clocks, resets and power
@ -23,10 +24,12 @@ description: |
include/dt-bindings/clock/qcom,gpucc-sm8150.h
include/dt-bindings/clock/qcom,gpucc-sm8250.h
include/dt-bindings/clock/qcom,gpucc-sm8350.h
include/dt-bindings/clock/qcom,qcs8300-gpucc.h
properties:
compatible:
enum:
- qcom,qcs8300-gpucc
- qcom,sdm845-gpucc
- qcom,sa8775p-gpucc
- qcom,sc7180-gpucc

@ -8,16 +8,20 @@ title: Qualcomm Camera Clock & Reset Controller on SA8775P
maintainers:
- Taniya Das <quic_tdas@quicinc.com>
- Imran Shaik <quic_imrashai@quicinc.com>
description: |
Qualcomm camera clock control module provides the clocks, resets and power
domains on SA8775p.
See also: include/dt-bindings/clock/qcom,sa8775p-camcc.h
See also:
include/dt-bindings/clock/qcom,qcs8300-camcc.h
include/dt-bindings/clock/qcom,sa8775p-camcc.h
properties:
compatible:
enum:
- qcom,qcs8300-camcc
- qcom,sa8775p-camcc
clocks:

@ -18,6 +18,7 @@ description: |
properties:
compatible:
enum:
- qcom,qcs8300-videocc
- qcom,sa8775p-videocc
clocks:

@ -0,0 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/powertip,hx8238a.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Powertip Electronic Technology Co. 320 x 240 LCD panel
maintainers:
- Lukasz Majewski <lukma@denx.de>
allOf:
- $ref: panel-dpi.yaml#
properties:
compatible:
items:
- const: powertip,hx8238a
- {} # panel-dpi, but not listed here to avoid false select
height-mm: true
panel-timing: true
port: true
power-supply: true
width-mm: true
additionalProperties: false
...

@ -0,0 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/powertip,st7272.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Powertip Electronic Technology Co. 320 x 240 LCD panel
maintainers:
- Lukasz Majewski <lukma@denx.de>
allOf:
- $ref: panel-dpi.yaml#
properties:
compatible:
items:
- const: powertip,st7272
- {} # panel-dpi, but not listed here to avoid false select
height-mm: true
panel-timing: true
port: true
power-supply: true
width-mm: true
additionalProperties: false
...

@ -23,7 +23,7 @@ properties:
compatible:
enum:
- ti,am625-dss
- ti,am62a7,dss
- ti,am62a7-dss
- ti,am65x-dss
reg:

@ -14,9 +14,8 @@ allOf:
description: |
The Microchip LAN966x outband interrupt controller (OIC) maps the internal
interrupt sources of the LAN966x device to an external interrupt.
When the LAN966x device is used as a PCI device, the external interrupt is
routed to the PCI interrupt.
interrupt sources of the LAN966x device to a PCI interrupt when the LAN966x
device is used as a PCI device.
properties:
compatible:

@ -33,6 +33,10 @@ properties:
clocks:
maxItems: 1
clock-names:
items:
- const: nf_clk
dmas:
maxItems: 1
@ -51,6 +55,7 @@ required:
- reg-names
- interrupts
- clocks
- clock-names
unevaluatedProperties: false
@ -66,7 +71,8 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&nf_clk>;
clocks = <&clk>;
clock-names = "nf_clk";
cdns,board-delay-ps = <4830>;
nand@0 {

@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies ath10k wireless devices
maintainers:
- Kalle Valo <kvalo@kernel.org>
- Jeff Johnson <jjohnson@kernel.org>
description:

@ -8,7 +8,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies ath11k wireless devices (PCIe)
maintainers:
- Kalle Valo <kvalo@kernel.org>
- Jeff Johnson <jjohnson@kernel.org>
description: |

@ -8,7 +8,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies ath11k wireless devices
maintainers:
- Kalle Valo <kvalo@kernel.org>
- Jeff Johnson <jjohnson@kernel.org>
description: |

@ -9,7 +9,6 @@ title: Qualcomm Technologies ath12k wireless devices (PCIe) with WSI interface
maintainers:
- Jeff Johnson <jjohnson@kernel.org>
- Kalle Valo <kvalo@kernel.org>
description: |
Qualcomm Technologies IEEE 802.11be PCIe devices with WSI interface.

@ -9,7 +9,6 @@ title: Qualcomm Technologies ath12k wireless devices (PCIe)
maintainers:
- Jeff Johnson <quic_jjohnson@quicinc.com>
- Kalle Valo <kvalo@kernel.org>
description:
Qualcomm Technologies IEEE 802.11be PCIe devices.

@ -36,6 +36,7 @@ properties:
- qcom,qcs404-qfprom
- qcom,qcs615-qfprom
- qcom,qcs8300-qfprom
- qcom,sar2130p-qfprom
- qcom,sc7180-qfprom
- qcom,sc7280-qfprom
- qcom,sc8280xp-qfprom

@ -22,7 +22,7 @@ description:
Each sub-node is identified using the node's name, with valid values listed
for each of the pmics below.
For mp5496, s1, s2
For mp5496, s1, s2, l2, l5
For pm2250, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22

@ -41,6 +41,12 @@ Device Drivers Base
.. kernel-doc:: drivers/base/class.c
:export:
.. kernel-doc:: include/linux/device/faux.h
:internal:
.. kernel-doc:: drivers/base/faux.c
:export:
.. kernel-doc:: drivers/base/node.c
:internal:

@ -0,0 +1,98 @@
Submitting patches to bcachefs:
===============================
Patches must be tested before being submitted, either with the xfstests suite
[0], or the full bcachefs test suite in ktest [1], depending on what's being
touched. Note that ktest wraps xfstests and will be an easier method to running
it for most users; it includes single-command wrappers for all the mainstream
in-kernel local filesystems.
Patches will undergo more testing after being merged (including
lockdep/kasan/preempt/etc. variants), these are not generally required to be
run by the submitter - but do put some thought into what you're changing and
which tests might be relevant, e.g. are you dealing with tricky memory layout
work? kasan, are you doing locking work? then lockdep; and ktest includes
single-command variants for the debug build types you'll most likely need.
The exception to this rule is incomplete WIP/RFC patches: if you're working on
something nontrivial, it's encouraged to send out a WIP patch to let people
know what you're doing and make sure you're on the right track. Just make sure
it includes a brief note as to what's done and what's incomplete, to avoid
confusion.
Rigorous checkpatch.pl adherence is not required (many of its warnings are
considered out of date), but try not to deviate too much without reason.
Focus on writing code that reads well and is organized well; code should be
aesthetically pleasing.
CI:
===
Instead of running your tests locally, when running the full test suite it's
prefereable to let a server farm do it in parallel, and then have the results
in a nice test dashboard (which can tell you which failures are new, and
presents results in a git log view, avoiding the need for most bisecting).
That exists [2], and community members may request an account. If you work for
a big tech company, you'll need to help out with server costs to get access -
but the CI is not restricted to running bcachefs tests: it runs any ktest test
(which generally makes it easy to wrap other tests that can run in qemu).
Other things to think about:
============================
- How will we debug this code? Is there sufficient introspection to diagnose
when something starts acting wonky on a user machine?
We don't necessarily need every single field of every data structure visible
with introspection, but having the important fields of all the core data
types wired up makes debugging drastically easier - a bit of thoughtful
foresight greatly reduces the need to have people build custom kernels with
debug patches.
More broadly, think about all the debug tooling that might be needed.
- Does it make the codebase more or less of a mess? Can we also try to do some
organizing, too?
- Do new tests need to be written? New assertions? How do we know and verify
that the code is correct, and what happens if something goes wrong?
We don't yet have automated code coverage analysis or easy fault injection -
but for now, pretend we did and ask what they might tell us.
Assertions are hugely important, given that we don't yet have a systems
language that can do ergonomic embedded correctness proofs. Hitting an assert
in testing is much better than wandering off into undefined behaviour la-la
land - use them. Use them judiciously, and not as a replacement for proper
error handling, but use them.
- Does it need to be performance tested? Should we add new peformance counters?
bcachefs has a set of persistent runtime counters which can be viewed with
the 'bcachefs fs top' command; this should give users a basic idea of what
their filesystem is currently doing. If you're doing a new feature or looking
at old code, think if anything should be added.
- If it's a new on disk format feature - have upgrades and downgrades been
tested? (Automated tests exists but aren't in the CI, due to the hassle of
disk image management; coordinate to have them run.)
Mailing list, IRC:
==================
Patches should hit the list [3], but much discussion and code review happens on
IRC as well [4]; many people appreciate the more conversational approach and
quicker feedback.
Additionally, we have a lively user community doing excellent QA work, which
exists primarily on IRC. Please make use of that resource; user feedback is
important for any nontrivial feature, and documenting it in commit messages
would be a good idea.
[0]: git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git
[1]: https://evilpiepirate.org/git/ktest.git/
[2]: https://evilpiepirate.org/~testdashboard/ci/
[3]: linux-bcachefs@vger.kernel.org
[4]: irc.oftc.net#bcache, #bcachefs-dev

@ -9,4 +9,5 @@ bcachefs Documentation
:numbered:
CodingStyle
SubmittingPatches
errorcodes

@ -1524,7 +1524,8 @@ attribute-sets:
nested-attributes: bitset
-
name: hwtstamp-flags
type: u32
type: nest
nested-attributes: bitset
operations:
enum-model: directional

@ -369,8 +369,8 @@ to their default.
addr.can_family = AF_CAN;
addr.can_ifindex = if_nametoindex("can0");
addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
addr.can_addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
addr.can_addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
ret = bind(s, (struct sockaddr *)&addr, sizeof(addr));
if (ret < 0)

@ -112,7 +112,7 @@ Functions
Callbacks
=========
There are six callbacks:
There are seven callbacks:
::
@ -182,6 +182,13 @@ There are six callbacks:
the length of the message. skb->len - offset may be greater
then full_len since strparser does not trim the skb.
::
int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
The read_sock callback is used by strparser instead of
sock->ops->read_sock, if provided.
::
int (*read_sock_done)(struct strparser *strp, int err);

@ -308,7 +308,7 @@ an involved disclosed party. The current ambassadors list:
Google Kees Cook <keescook@chromium.org>
LLVM Nick Desaulniers <ndesaulniers@google.com>
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
============= ========================================================
If you want your organization to be added to the ambassadors list, please

@ -287,7 +287,7 @@ revelada involucrada. La lista de embajadores actuales:
Google Kees Cook <keescook@chromium.org>
LLVM Nick Desaulniers <ndesaulniers@google.com>
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
============= ========================================================
Si quiere que su organización se añada a la lista de embajadores, por

@ -8,7 +8,7 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
:Date: October 2024
:Date: January 2025
The goal of Landlock is to enable restriction of ambient rights (e.g. global
filesystem or network access) for a set of processes. Because Landlock
@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with
A sandboxed process can connect to a non-sandboxed process when its domain is
not scoped. If a process's domain is scoped, it can only connect to sockets
created by processes in the same scope.
Moreover, If a process is scoped to send signal to a non-scoped process, it can
Moreover, if a process is scoped to send signal to a non-scoped process, it can
only send signals to processes in the same scope.
A connected datagram socket behaves like a stream socket when its domain is
scoped, meaning if the domain is scoped after the socket is connected , it can
scoped, meaning if the domain is scoped after the socket is connected, it can
still :manpage:`send(2)` data just like a stream socket. However, in the same
scenario, a non-connected datagram socket cannot send data (with
:manpage:`sendto(2)`) outside its scope.

@ -1419,7 +1419,7 @@ fetch) is injected in the guest.
S390:
^^^^^
Returns -EINVAL if the VM has the KVM_VM_S390_UCONTROL flag set.
Returns -EINVAL or -EEXIST if the VM has the KVM_VM_S390_UCONTROL flag set.
Returns -EINVAL if called on a protected VM.
4.36 KVM_SET_TSS_ADDR

@ -1046,14 +1046,14 @@ F: drivers/crypto/ccp/hsti.*
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>
M: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
R: Rodrigo Siqueira <siqueira@igalia.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
AMD DISPLAY CORE - DML
M: Chaitanya Dhere <chaitanya.dhere@amd.com>
M: Austin Zheng <austin.zheng@amd.com>
M: Jun Lei <jun.lei@amd.com>
S: Supported
F: drivers/gpu/drm/amd/display/dc/dml/
@ -2209,8 +2209,8 @@ F: sound/soc/codecs/cs42l84.*
F: sound/soc/codecs/ssm3515.c
ARM/APPLE MACHINE SUPPORT
M: Hector Martin <marcan@marcan.st>
M: Sven Peter <sven@svenpeter.dev>
M: Janne Grunau <j@jannau.net>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
L: asahi@lists.linux.dev
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -2285,7 +2285,7 @@ F: drivers/irqchip/irq-aspeed-i2c-ic.c
ARM/ASPEED MACHINE SUPPORT
M: Joel Stanley <joel@jms.id.au>
R: Andrew Jeffery <andrew@codeconstruct.com.au>
M: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
@ -2878,7 +2878,7 @@ F: drivers/pinctrl/nxp/
ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER
M: Jan Petrous <jan.petrous@oss.nxp.com>
L: NXP S32 Linux Team <s32@nxp.com>
R: s32@nxp.com
S: Maintained
F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@ -3655,7 +3655,6 @@ F: Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
F: drivers/phy/qualcomm/phy-ath79-usb.c
ATHEROS ATH GENERIC UTILITIES
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: linux-wireless@vger.kernel.org
S: Supported
@ -3860,13 +3859,6 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
F: drivers/pwm/pwm-axi-pwmgen.c
AXXIA I2C CONTROLLER
M: Krzysztof Adamski <krzysztof.adamski@nokia.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/i2c/i2c-axxia.txt
F: drivers/i2c/busses/i2c-axxia.c
AZ6007 DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
@ -3955,6 +3947,7 @@ M: Kent Overstreet <kent.overstreet@linux.dev>
L: linux-bcachefs@vger.kernel.org
S: Supported
C: irc://irc.oftc.net/bcache
P: Documentation/filesystems/bcachefs/SubmittingPatches.rst
T: git https://evilpiepirate.org/git/bcachefs.git
F: fs/bcachefs/
F: Documentation/filesystems/bcachefs/
@ -5663,7 +5656,7 @@ F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
R: Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
R: Bill Wendling <morbo@google.com>
R: Justin Stitt <justinstitt@google.com>
L: llvm@lists.linux.dev
@ -5863,7 +5856,6 @@ F: Documentation/security/snp-tdx-threat-model.rst
CONFIGFS
M: Joel Becker <jlbec@evilplan.org>
M: Christoph Hellwig <hch@lst.de>
S: Supported
T: git git://git.infradead.org/users/hch/configfs.git
F: fs/configfs/
@ -5934,6 +5926,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM)
M: Maarten Lankhorst <dev@lankhorst.se>
M: Maxime Ripard <mripard@kernel.org>
M: Natalie Vock <natalie.vock@gmx.de>
L: cgroups@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: include/linux/cgroup_dmem.h
F: kernel/cgroup/dmem.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
@ -6886,7 +6889,6 @@ F: kernel/dma/map_benchmark.c
F: tools/testing/selftests/dma/
DMA MAPPING HELPERS
M: Christoph Hellwig <hch@lst.de>
M: Marek Szyprowski <m.szyprowski@samsung.com>
R: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
@ -7116,8 +7118,10 @@ F: rust/kernel/device.rs
F: rust/kernel/device_id.rs
F: rust/kernel/devres.rs
F: rust/kernel/driver.rs
F: rust/kernel/faux.rs
F: rust/kernel/platform.rs
F: samples/rust/rust_driver_platform.rs
F: samples/rust/rust_driver_faux.rs
DRIVERS FOR OMAP ADAPTIVE VOLTAGE SCALING (AVS)
M: Nishanth Menon <nm@ti.com>
@ -7431,7 +7435,6 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Karol Herbst <kherbst@redhat.com>
M: Lyude Paul <lyude@redhat.com>
M: Danilo Krummrich <dakr@kernel.org>
L: dri-devel@lists.freedesktop.org
@ -9418,7 +9421,7 @@ F: fs/freevxfs/
FREEZER
M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Pavel Machek <pavel@ucw.cz>
M: Pavel Machek <pavel@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/power/freezing-of-tasks.rst
@ -9835,8 +9838,7 @@ F: drivers/input/touchscreen/goodix*
GOOGLE ETHERNET DRIVERS
M: Jeroen de Borst <jeroendb@google.com>
M: Praveen Kaligineedi <pkaligineedi@google.com>
R: Shailend Chand <shailend@google.com>
M: Harshitha Ramamurthy <hramamurthy@google.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
@ -9878,7 +9880,7 @@ S: Maintained
F: drivers/staging/gpib/
GPIO ACPI SUPPORT
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Mika Westerberg <westeri@kernel.org>
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
L: linux-gpio@vger.kernel.org
L: linux-acpi@vger.kernel.org
@ -10253,7 +10255,7 @@ F: drivers/video/fbdev/hgafb.c
HIBERNATION (aka Software Suspend, aka swsusp)
M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Pavel Machek <pavel@ucw.cz>
M: Pavel Machek <pavel@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
B: https://bugzilla.kernel.org
@ -10822,7 +10824,7 @@ S: Odd Fixes
F: drivers/tty/hvc/
I2C ACPI SUPPORT
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Mika Westerberg <westeri@kernel.org>
L: linux-i2c@vger.kernel.org
L: linux-acpi@vger.kernel.org
S: Maintained
@ -13124,8 +13126,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
F: scripts/leaking_addresses.pl
LED SUBSYSTEM
M: Pavel Machek <pavel@ucw.cz>
M: Lee Jones <lee@kernel.org>
M: Pavel Machek <pavel@kernel.org>
L: linux-leds@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/leds.git
@ -15689,7 +15691,7 @@ F: include/uapi/linux/cciss*.h
MICROSOFT MANA RDMA DRIVER
M: Long Li <longli@microsoft.com>
M: Ajay Sharma <sharmaajay@microsoft.com>
M: Konstantin Taranov <kotaranov@microsoft.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/mana/
@ -16438,7 +16440,7 @@ X: drivers/net/can/
X: drivers/net/wireless/
NETWORKING DRIVERS (WIRELESS)
M: Kalle Valo <kvalo@kernel.org>
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: https://wireless.wiki.kernel.org/
@ -16462,6 +16464,28 @@ F: include/net/dsa.h
F: net/dsa/
F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [ETHTOOL]
M: Andrew Lunn <andrew@lunn.ch>
M: Jakub Kicinski <kuba@kernel.org>
F: Documentation/netlink/specs/ethtool.yaml
F: Documentation/networking/ethtool-netlink.rst
F: include/linux/ethtool*
F: include/uapi/linux/ethtool*
F: net/ethtool/
F: tools/testing/selftests/drivers/net/*/ethtool*
NETWORKING [ETHTOOL CABLE TEST]
M: Andrew Lunn <andrew@lunn.ch>
F: net/ethtool/cabletest.c
F: tools/testing/selftests/drivers/net/*/ethtool*
K: cable_test
NETWORKING [ETHTOOL MAC MERGE]
M: Vladimir Oltean <vladimir.oltean@nxp.com>
F: net/ethtool/mm.c
F: tools/testing/selftests/drivers/net/hw/ethtool_mm.sh
K: ethtool_mm
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
@ -16493,6 +16517,7 @@ F: include/linux/netdev*
F: include/linux/netlink.h
F: include/linux/netpoll.h
F: include/linux/rtnetlink.h
F: include/linux/sctp.h
F: include/linux/seq_file_net.h
F: include/linux/skbuff*
F: include/net/
@ -16509,6 +16534,7 @@ F: include/uapi/linux/netdev*
F: include/uapi/linux/netlink.h
F: include/uapi/linux/netlink_diag.h
F: include/uapi/linux/rtnetlink.h
F: include/uapi/linux/sctp.h
F: lib/net_utils.c
F: lib/random32.c
F: net/
@ -16621,6 +16647,7 @@ F: tools/testing/selftests/net/mptcp/
NETWORKING [TCP]
M: Eric Dumazet <edumazet@google.com>
M: Neal Cardwell <ncardwell@google.com>
R: Kuniyuki Iwashima <kuniyu@amazon.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/net_cachelines/tcp_sock.rst
@ -16648,6 +16675,31 @@ F: include/net/tls.h
F: include/uapi/linux/tls.h
F: net/tls/*
NETWORKING [SOCKETS]
M: Eric Dumazet <edumazet@google.com>
M: Kuniyuki Iwashima <kuniyu@amazon.com>
M: Paolo Abeni <pabeni@redhat.com>
M: Willem de Bruijn <willemb@google.com>
S: Maintained
F: include/linux/sock_diag.h
F: include/linux/socket.h
F: include/linux/sockptr.h
F: include/net/sock.h
F: include/net/sock_reuseport.h
F: include/uapi/linux/socket.h
F: net/core/*sock*
F: net/core/scm.c
F: net/socket.c
NETWORKING [UNIX SOCKETS]
M: Kuniyuki Iwashima <kuniyu@amazon.com>
S: Maintained
F: include/net/af_unix.h
F: include/net/netns/unix.h
F: include/uapi/linux/unix_diag.h
F: net/unix/
F: tools/testing/selftests/net/af_unix/
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manishc@marvell.com>
M: Rahul Verma <rahulv@marvell.com>
@ -16781,7 +16833,7 @@ F: include/linux/tick.h
F: kernel/time/tick*.*
NOKIA N900 CAMERA SUPPORT (ET8EK8 SENSOR, AD5820 FOCUS)
M: Pavel Machek <pavel@ucw.cz>
M: Pavel Machek <pavel@kernel.org>
M: Sakari Ailus <sakari.ailus@iki.fi>
L: linux-media@vger.kernel.org
S: Maintained
@ -17713,6 +17765,7 @@ L: netdev@vger.kernel.org
L: dev@openvswitch.org
S: Maintained
W: http://openvswitch.org
F: Documentation/networking/openvswitch.rst
F: include/uapi/linux/openvswitch.h
F: net/openvswitch/
F: tools/testing/selftests/net/openvswitch/
@ -19312,7 +19365,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/tuners/qt1010*
QUALCOMM ATH12K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: ath12k@lists.infradead.org
S: Supported
@ -19322,7 +19374,6 @@ F: drivers/net/wireless/ath/ath12k/
N: ath12k
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: ath10k@lists.infradead.org
S: Supported
@ -19332,7 +19383,6 @@ F: drivers/net/wireless/ath/ath10k/
N: ath10k
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: ath11k@lists.infradead.org
S: Supported
@ -19467,6 +19517,15 @@ L: dmaengine@vger.kernel.org
S: Supported
F: drivers/dma/qcom/hidma*
QUALCOMM I2C QCOM GENI DRIVER
M: Mukesh Kumar Savaliya <quic_msavaliy@quicinc.com>
M: Viken Dadhaniya <quic_vdadhani@quicinc.com>
L: linux-i2c@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml
F: drivers/i2c/busses/i2c-qcom-geni.c
QUALCOMM I2C CCI DRIVER
M: Loic Poulain <loic.poulain@linaro.org>
M: Robert Foss <rfoss@kernel.org>
@ -19607,7 +19666,6 @@ F: drivers/net/wireless/quantenna
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
M: Xinhui Pan <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
B: https://gitlab.freedesktop.org/drm/amd/-/issues
@ -19829,7 +19887,7 @@ F: net/rds/
F: tools/testing/selftests/net/rds/
RDT - RESOURCE ALLOCATION
M: Fenghua Yu <fenghua.yu@intel.com>
M: Tony Luck <tony.luck@intel.com>
M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
@ -20280,6 +20338,7 @@ RISC-V ARCHITECTURE
M: Paul Walmsley <paul.walmsley@sifive.com>
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Albert Ou <aou@eecs.berkeley.edu>
R: Alexandre Ghiti <alex@ghiti.fr>
L: linux-riscv@lists.infradead.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-riscv/list/
@ -21873,10 +21932,13 @@ F: sound/soc/uniphier/
SOCKET TIMESTAMPING
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
R: Jason Xing <kernelxing@tencent.com>
S: Maintained
F: Documentation/networking/timestamping.rst
F: include/linux/net_tstamp.h
F: include/uapi/linux/net_tstamp.h
F: tools/testing/selftests/bpf/*/net_timestamping*
F: tools/testing/selftests/net/*timestamp*
F: tools/testing/selftests/net/so_txtime.c
SOEKRIS NET48XX LED SUPPORT
@ -22806,7 +22868,7 @@ F: drivers/sh/
SUSPEND TO RAM
M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Len Brown <len.brown@intel.com>
M: Pavel Machek <pavel@ucw.cz>
M: Pavel Machek <pavel@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
B: https://bugzilla.kernel.org
@ -24019,7 +24081,6 @@ F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE)
M: Steven Rostedt <rostedt@goodmis.org>
M: Masami Hiramatsu <mhiramat@kernel.org>
R: Karol Herbst <karolherbst@gmail.com>
R: Pekka Paalanen <ppaalanen@gmail.com>
L: linux-kernel@vger.kernel.org
L: nouveau@lists.freedesktop.org

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc5
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@ -1120,8 +1120,8 @@ LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
endif
# Align the bit size of userspace programs with the kernel
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
@ -1421,18 +1421,13 @@ ifneq ($(wildcard $(resolve_btfids_O)),)
$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
endif
# Clear a bunch of variables before executing the submake
ifeq ($(quiet),silent_)
tools_silent=s
endif
tools/: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
tools/%: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
# ---------------------------------------------------------------------------
# Kernel selftest

@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT))
/*
* These are used to set parameters in the core dumps.
@ -137,10 +137,6 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
})
#define SET_PERSONALITY(EX) \
set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
? PER_LINUX_32BIT : PER_LINUX)
extern int alpha_l1i_cacheshape;
extern int alpha_l1d_cacheshape;
extern int alpha_l2_cacheshape;

@ -135,7 +135,7 @@ struct crb_struct {
/* virtual->physical map */
unsigned long map_entries;
unsigned long map_pages;
struct vf_map_struct map[1];
struct vf_map_struct map[];
};
struct memclust_struct {

@ -360,7 +360,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
extern void paging_init(void);
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
/* We have our own get_unmapped_area */
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* _ALPHA_PGTABLE_H */

@ -8,23 +8,19 @@
#ifndef __ASM_ALPHA_PROCESSOR_H
#define __ASM_ALPHA_PROCESSOR_H
#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
#define STACK_TOP \
(current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
#define STACK_TOP (0x00120000000UL)
#define STACK_TOP_MAX 0x00120000000UL
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE \
((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
/* This is dead. Everything has been moved to thread_info. */
struct thread_struct { };

@ -42,6 +42,8 @@ struct pt_regs {
unsigned long trap_a0;
unsigned long trap_a1;
unsigned long trap_a2;
/* This makes the stack 16-byte aligned as GCC expects */
unsigned long __pad0;
/* These are saved by PAL-code: */
unsigned long ps;
unsigned long pc;

@ -19,9 +19,13 @@ static void __used foo(void)
DEFINE(TI_STATUS, offsetof(struct thread_info, status));
BLANK();
DEFINE(SP_OFF, offsetof(struct pt_regs, ps));
DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
BLANK();
DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
BLANK();
DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
}

@ -15,10 +15,6 @@
.set noat
.cfi_sections .debug_frame
/* Stack offsets. */
#define SP_OFF 184
#define SWITCH_STACK_SIZE 64
.macro CFI_START_OSF_FRAME func
.align 4
.globl \func
@ -198,8 +194,8 @@ CFI_END_OSF_FRAME entArith
CFI_START_OSF_FRAME entMM
SAVE_ALL
/* save $9 - $15 so the inline exception code can manipulate them. */
subq $sp, 56, $sp
.cfi_adjust_cfa_offset 56
subq $sp, 64, $sp
.cfi_adjust_cfa_offset 64
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@ -214,7 +210,7 @@ CFI_START_OSF_FRAME entMM
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
addq $sp, 56, $19
addq $sp, 64, $19
/* handle the fault */
lda $8, 0x3fff
bic $sp, $8, $8
@ -227,7 +223,7 @@ CFI_START_OSF_FRAME entMM
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
addq $sp, 56, $sp
addq $sp, 64, $sp
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
@ -235,7 +231,7 @@ CFI_START_OSF_FRAME entMM
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
.cfi_adjust_cfa_offset -64
/* finish up the syscall as normal. */
br ret_from_sys_call
CFI_END_OSF_FRAME entMM
@ -382,8 +378,8 @@ entUnaUser:
.cfi_restore $0
.cfi_adjust_cfa_offset -256
SAVE_ALL /* setup normal kernel stack */
lda $sp, -56($sp)
.cfi_adjust_cfa_offset 56
lda $sp, -64($sp)
.cfi_adjust_cfa_offset 64
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@ -399,7 +395,7 @@ entUnaUser:
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
lda $8, 0x3fff
addq $sp, 56, $19
addq $sp, 64, $19
bic $sp, $8, $8
jsr $26, do_entUnaUser
ldq $9, 0($sp)
@ -409,7 +405,7 @@ entUnaUser:
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
lda $sp, 56($sp)
lda $sp, 64($sp)
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
@ -417,7 +413,7 @@ entUnaUser:
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
.cfi_adjust_cfa_offset -64
br ret_from_sys_call
CFI_END_OSF_FRAME entUna

@ -1210,8 +1210,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
return ret;
}
/* Get an address range which is currently unmapped. Similar to the
generic version except that we know how to honor ADDR_LIMIT_32BIT. */
/* Get an address range which is currently unmapped. */
static unsigned long
arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
@ -1230,13 +1229,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
unsigned long limit;
/* "32 bit" actually means 31 bit, since pointers sign extend. */
if (current->personality & ADDR_LIMIT_32BIT)
limit = 0x80000000;
else
limit = TASK_SIZE;
unsigned long limit = TASK_SIZE;
if (len > limit)
return -ENOMEM;

@ -13,6 +13,7 @@
#include <linux/log2.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu-helper.h>
#include <linux/string_choices.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
@ -212,7 +213,7 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
/* If both conditions above are met, we are fine. */
DBGA("pci_dac_dma_supported %s from %ps\n",
ok ? "yes" : "no", __builtin_return_address(0));
str_yes_no(ok), __builtin_return_address(0));
return ok;
}

@ -649,7 +649,7 @@ s_reg_to_mem (unsigned long s_reg)
static int unauser_reg_offsets[32] = {
R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
/* r9 ... r15 are stored in front of regs. */
-56, -48, -40, -32, -24, -16, -8,
-64, -56, -48, -40, -32, -24, -16, /* padding at -8 */
R(r16), R(r17), R(r18),
R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
R(r27), R(r28), R(gp),

@ -78,8 +78,8 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
(r) <= 18 ? (r)+10 : (r)-10])
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-17 : \
(r) <= 18 ? (r)+11 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,

@ -225,7 +225,6 @@ config ARM64
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_FREGS
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_RETVAL
select HAVE_GCC_PLUGINS
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && \
HW_PERF_EVENTS && HAVE_PERF_EVENTS_NMI

@ -48,7 +48,11 @@ KBUILD_CFLAGS += $(CC_FLAGS_NO_FPU) \
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(compat_vdso)
ifeq ($(call test-ge, $(CONFIG_RUSTC_VERSION), 108500),y)
KBUILD_RUSTFLAGS += --target=aarch64-unknown-none-softfloat
else
KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
endif
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)

@ -226,7 +226,6 @@
};
&uart5 {
pinctrl-0 = <&uart5_xfer>;
rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
status = "okay";
};

@ -396,6 +396,12 @@
status = "okay";
};
&uart5 {
/delete-property/ dmas;
/delete-property/ dma-names;
pinctrl-0 = <&uart5_xfer>;
};
/* Mule UCAN */
&usb_host0_ehci {
status = "okay";

@ -17,8 +17,7 @@
&gmac2io {
phy-handle = <&yt8531c>;
tx_delay = <0x19>;
rx_delay = <0x05>;
phy-mode = "rgmii-id";
status = "okay";
mdio {

@ -15,6 +15,7 @@
&gmac2io {
phy-handle = <&rtl8211e>;
phy-mode = "rgmii";
tx_delay = <0x24>;
rx_delay = <0x18>;
status = "okay";

@ -109,7 +109,6 @@
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
clock_in_out = "input";
phy-mode = "rgmii";
phy-supply = <&vcc_io>;
pinctrl-0 = <&rgmiim1_pins>;
pinctrl-names = "default";

@ -22,11 +22,11 @@
};
/* EC turns on w/ pp900_usb_en */
pp900_usb: pp900-ap {
pp900_usb: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pcie_en */
pp900_pcie: pp900-ap {
pp900_pcie: regulator-pp900-ap {
};
pp3000: regulator-pp3000 {
@ -126,7 +126,7 @@
};
/* Always on; plain and simple */
pp3000_ap: pp3000_emmc: pp3000 {
pp3000_ap: pp3000_emmc: regulator-pp3000 {
};
pp1500_ap_io: regulator-pp1500-ap-io {
@ -160,7 +160,7 @@
};
/* EC turns on w/ pp3300_usb_en_l */
pp3300_usb: pp3300 {
pp3300_usb: regulator-pp3300 {
};
/* gpio is shared with pp1800_pcie and pinctrl is set there */

@ -92,7 +92,7 @@
};
/* EC turns on pp1800_s3_en */
pp1800_s3: pp1800 {
pp1800_s3: regulator-pp1800 {
};
/* pp3300 children, sorted by name */
@ -109,11 +109,11 @@
};
/* EC turns on pp3300_s0_en */
pp3300_s0: pp3300 {
pp3300_s0: regulator-pp3300 {
};
/* EC turns on pp3300_s3_en */
pp3300_s3: pp3300 {
pp3300_s3: regulator-pp3300 {
};
/*

@ -189,39 +189,39 @@
};
/* EC turns on w/ pp900_ddrpll_en */
pp900_ddrpll: pp900-ap {
pp900_ddrpll: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pll_en */
pp900_pll: pp900-ap {
pp900_pll: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pmu_en */
pp900_pmu: pp900-ap {
pp900_pmu: regulator-pp900-ap {
};
/* EC turns on w/ pp1800_s0_en_l */
pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: pp1800 {
pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: regulator-pp1800 {
};
/* EC turns on w/ pp1800_avdd_en_l */
pp1800_avdd: pp1800 {
pp1800_avdd: regulator-pp1800 {
};
/* EC turns on w/ pp1800_lid_en_l */
pp1800_lid: pp1800_mic: pp1800 {
pp1800_lid: pp1800_mic: regulator-pp1800 {
};
/* EC turns on w/ lpddr_pwr_en */
pp1800_lpddr: pp1800 {
pp1800_lpddr: regulator-pp1800 {
};
/* EC turns on w/ pp1800_pmu_en_l */
pp1800_pmu: pp1800 {
pp1800_pmu: regulator-pp1800 {
};
/* EC turns on w/ pp1800_usb_en_l */
pp1800_usb: pp1800 {
pp1800_usb: regulator-pp1800 {
};
pp3000_sd_slot: regulator-pp3000-sd-slot {
@ -259,11 +259,11 @@
};
/* EC turns on w/ pp3300_trackpad_en_l */
pp3300_trackpad: pp3300-trackpad {
pp3300_trackpad: regulator-pp3300-trackpad {
};
/* EC turns on w/ usb_a_en */
pp5000_usb_a_vbus: pp5000 {
pp5000_usb_a_vbus: regulator-pp5000 {
};
ap_rtc_clk: ap-rtc-clk {

@ -549,10 +549,10 @@
mmu600_pcie: iommu@fc900000 {
compatible = "arm,smmu-v3";
reg = <0x0 0xfc900000 0x0 0x200000>;
interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
interrupts = <GIC_SPI 369 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 371 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 374 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 367 IRQ_TYPE_EDGE_RISING 0>;
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
#iommu-cells = <1>;
};
@ -560,10 +560,10 @@
mmu600_php: iommu@fcb00000 {
compatible = "arm,smmu-v3";
reg = <0x0 0xfcb00000 0x0 0x200000>;
interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
interrupts = <GIC_SPI 381 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 383 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 386 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 379 IRQ_TYPE_EDGE_RISING 0>;
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
#iommu-cells = <1>;
status = "disabled";
@ -2668,9 +2668,9 @@
rockchip,hw-tshut-temp = <120000>;
rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
pinctrl-0 = <&tsadc_gpio_func>;
pinctrl-1 = <&tsadc_shut>;
pinctrl-names = "gpio", "otpout";
pinctrl-0 = <&tsadc_shut_org>;
pinctrl-1 = <&tsadc_gpio_func>;
pinctrl-names = "default", "sleep";
#thermal-sensor-cells = <1>;
status = "disabled";
};

@ -113,7 +113,7 @@
compatible = "regulator-fixed";
regulator-name = "vcc3v3_lcd";
enable-active-high;
gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>;
gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&lcdpwr_en>;
vin-supply = <&vcc3v3_sys>;
@ -241,7 +241,7 @@
&pinctrl {
lcd {
lcdpwr_en: lcdpwr-en {
rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
};
bl_en: bl-en {

@ -213,7 +213,6 @@
interrupt-names = "sys", "pmc", "msg", "legacy", "err",
"dma0", "dma1", "dma2", "dma3";
max-link-speed = <3>;
iommus = <&mmu600_pcie 0x0000>;
num-lanes = <4>;
phys = <&pcie30phy>;
phy-names = "pcie-phy";

@ -23,3 +23,7 @@
vpcie3v3-supply = <&vcc3v3_pcie30>;
status = "okay";
};
&mmu600_pcie {
status = "disabled";
};

@ -1551,6 +1551,8 @@ CONFIG_PWM_VISCONTI=m
CONFIG_SL28CPLD_INTC=y
CONFIG_QCOM_PDC=y
CONFIG_QCOM_MPM=y
CONFIG_TI_SCI_INTR_IRQCHIP=y
CONFIG_TI_SCI_INTA_IRQCHIP=y
CONFIG_RESET_GPIO=m
CONFIG_RESET_IMX7=y
CONFIG_RESET_QCOM_AOSS=y

@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
{
unsigned long stride = huge_page_size(hstate_vma(vma));
if (stride == PMD_SIZE)
__flush_tlb_range(vma, start, end, stride, false, 2);
else if (stride == PUD_SIZE)
__flush_tlb_range(vma, start, end, stride, false, 1);
else
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
switch (stride) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
__flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
break;
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:
__flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
break;
case CONT_PTE_SIZE:
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
break;
default:
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
}
}
#endif /* __ASM_HUGETLB_H */

@ -119,7 +119,7 @@
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
#define TCR_EL2_T0SZ_MASK 0x3f
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_DS TCR_EL2_DS

@ -605,48 +605,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
__cpacr_to_cptr_set(clr, set));\
} while (0)
static __always_inline void kvm_write_cptr_el2(u64 val)
{
if (has_vhe() || has_hvhe())
write_sysreg(val, cpacr_el1);
else
write_sysreg(val, cptr_el2);
}
/* Resets the value of cptr_el2 when returning to the host. */
static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm)
{
u64 val;
if (has_vhe()) {
val = (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN);
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
} else if (has_hvhe()) {
val = CPACR_EL1_FPEN;
if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
val |= CPACR_EL1_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN;
} else {
val = CPTR_NVHE_EL2_RES1;
if (kvm_has_sve(kvm) && guest_owns_fp_regs())
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
}
kvm_write_cptr_el2(val);
}
#ifdef __KVM_NVHE_HYPERVISOR__
#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm))
#else
#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2((v)->kvm)
#endif
/*
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
* format if E2H isn't set.

@ -100,7 +100,7 @@ static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
void *(*to_va)(phys_addr_t phys))
{
phys_addr_t *p = to_va(mc->head);
phys_addr_t *p = to_va(mc->head & PAGE_MASK);
if (!mc->nr_pages)
return NULL;
@ -615,8 +615,6 @@ struct cpu_sve_state {
struct kvm_host_data {
#define KVM_HOST_DATA_FLAG_HAS_SPE 0
#define KVM_HOST_DATA_FLAG_HAS_TRBE 1
#define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2
#define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3
#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4
#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5
unsigned long flags;
@ -624,23 +622,13 @@ struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
/*
* All pointers in this union are hyp VA.
* Hyp VA.
* sve_state is only used in pKVM and if system_supports_sve().
*/
union {
struct user_fpsimd_state *fpsimd_state;
struct cpu_sve_state *sve_state;
};
struct cpu_sve_state *sve_state;
union {
/* HYP VA pointer to the host storage for FPMR */
u64 *fpmr_ptr;
/*
* Used by pKVM only, as it needs to provide storage
* for the host
*/
u64 fpmr;
};
/* Used by pKVM only. */
u64 fpmr;
/* Ownership of the FP regs */
enum {
@ -1271,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
int __init kvm_arm_vmid_alloc_init(void);
void __init kvm_arm_vmid_alloc_free(void);
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)

@ -101,16 +101,18 @@ int populate_cache_leaves(unsigned int cpu)
unsigned int level, idx;
enum cache_type type;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct cacheinfo *infos = this_cpu_ci->info_list;
for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
idx < this_cpu_ci->num_leaves; level++) {
type = get_cache_type(level);
if (type == CACHE_TYPE_SEPARATE) {
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
if (idx + 1 >= this_cpu_ci->num_leaves)
break;
ci_leaf_init(&infos[idx++], CACHE_TYPE_DATA, level);
ci_leaf_init(&infos[idx++], CACHE_TYPE_INST, level);
} else {
ci_leaf_init(this_leaf++, type, level);
ci_leaf_init(&infos[idx++], type, level);
}
}
return 0;

@ -3091,6 +3091,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
HWCAP_CAP(ID_AA64ISAR0_EL1, RNDR, IMP, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(ID_AA64ISAR3_EL1, FPRCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_FPRCVT),
HWCAP_CAP(ID_AA64PFR0_EL1, FP, IMP, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(ID_AA64PFR0_EL1, FP, FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
@ -3180,8 +3181,6 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8MM8, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8MM8),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8MM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8MM4),
HWCAP_CAP(ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM),
HWCAP_CAP(ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES),
HWCAP_CAP(ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA),
@ -3192,6 +3191,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP4),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP2),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8MM8, IMP, CAP_HWCAP, KERNEL_HWCAP_F8MM8),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8MM4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8MM4),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8E4M3, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E4M3),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8E5M2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E5M2),
#ifdef CONFIG_ARM64_POE

@ -1694,31 +1694,6 @@ void fpsimd_signal_preserve_current_state(void)
sve_to_fpsimd(current);
}
/*
* Called by KVM when entering the guest.
*/
void fpsimd_kvm_prepare(void)
{
if (!system_supports_sve())
return;
/*
* KVM does not save host SVE state since we can only enter
* the guest from a syscall so the ABI means that only the
* non-saved SVE state needs to be saved. If we have left
* SVE enabled for performance reasons then update the task
* state to be FPSIMD only.
*/
get_cpu_fpsimd_context();
if (test_and_clear_thread_flag(TIF_SVE)) {
sve_to_fpsimd(current);
current->thread.fp_type = FP_STATE_FPSIMD;
}
put_cpu_fpsimd_context();
}
/*
* Associate current's FPSIMD context with this cpu
* The caller must have ownership of the cpu FPSIMD context before calling

@ -194,12 +194,19 @@ static void amu_fie_setup(const struct cpumask *cpus)
int cpu;
/* We are already set since the last insmod of cpufreq driver */
if (unlikely(cpumask_subset(cpus, amu_fie_cpus)))
if (cpumask_available(amu_fie_cpus) &&
unlikely(cpumask_subset(cpus, amu_fie_cpus)))
return;
for_each_cpu(cpu, cpus) {
for_each_cpu(cpu, cpus)
if (!freq_counters_valid(cpu))
return;
if (!cpumask_available(amu_fie_cpus) &&
!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
WARN_ONCE(1, "Failed to allocate FIE cpumask for CPUs[%*pbl]\n",
cpumask_pr_args(cpus));
return;
}
cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
@ -237,17 +244,8 @@ static struct notifier_block init_amu_fie_notifier = {
static int __init init_amu_fie(void)
{
int ret;
if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
return -ENOMEM;
ret = cpufreq_register_notifier(&init_amu_fie_notifier,
return cpufreq_register_notifier(&init_amu_fie_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
free_cpumask_var(amu_fie_cpus);
return ret;
}
core_initcall(init_amu_fie);

@ -41,6 +41,7 @@ SECTIONS
*/
/DISCARD/ : {
*(.note.GNU-stack .note.gnu.property)
*(.ARM.attributes)
}
.note : { *(.note.*) } :text :note

@ -162,6 +162,7 @@ SECTIONS
/DISCARD/ : {
*(.interp .dynamic)
*(.dynsym .dynstr .hash .gnu.hash)
*(.ARM.attributes)
}
. = KIMAGE_VADDR;

@ -447,21 +447,19 @@ static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
struct arch_timer_context *timer_ctx)
{
int ret;
kvm_timer_update_status(timer_ctx, new_level);
timer_ctx->irq.level = new_level;
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
timer_ctx->irq.level);
if (!userspace_irqchip(vcpu->kvm)) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
timer_irq(timer_ctx),
timer_ctx->irq.level,
timer_ctx);
WARN_ON(ret);
}
if (userspace_irqchip(vcpu->kvm))
return;
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
timer_irq(timer_ctx),
timer_ctx->irq.level,
timer_ctx);
}
/* Only called for a fully emulated timer */
@ -471,10 +469,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
trace_kvm_timer_emulate(ctx, should_fire);
if (should_fire != ctx->irq.level) {
if (should_fire != ctx->irq.level)
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
return;
}
kvm_timer_update_status(ctx, should_fire);
@ -761,21 +757,6 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
timer_irq(map->direct_ptimer),
&arch_timer_irq_ops);
WARN_ON_ONCE(ret);
/*
* The virtual offset behaviour is "interesting", as it
* always applies when HCR_EL2.E2H==0, but only when
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
* track E2H when putting the HV timer in "direct" mode.
*/
if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
struct arch_timer_offset *offs = &map->direct_vtimer->offset;
if (vcpu_el2_e2h_is_set(vcpu))
offs->vcpu_offset = NULL;
else
offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
}
}
}
@ -976,31 +957,21 @@ void kvm_timer_sync_nested(struct kvm_vcpu *vcpu)
* which allows trapping of the timer registers even with NV2.
* Still, this is still worse than FEAT_NV on its own. Meh.
*/
if (!vcpu_el2_e2h_is_set(vcpu)) {
if (cpus_have_final_cap(ARM64_HAS_ECV))
return;
/*
* A non-VHE guest hypervisor doesn't have any direct access
* to its timers: the EL2 registers trap (and the HW is
* fully emulated), while the EL0 registers access memory
* despite the access being notionally direct. Boo.
*
* We update the hardware timer registers with the
* latest value written by the guest to the VNCR page
* and let the hardware take care of the rest.
*/
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CTL_EL0), SYS_CNTV_CTL);
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0), SYS_CNTV_CVAL);
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CTL_EL0), SYS_CNTP_CTL);
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0), SYS_CNTP_CVAL);
} else {
if (!cpus_have_final_cap(ARM64_HAS_ECV)) {
/*
* For a VHE guest hypervisor, the EL2 state is directly
* stored in the host EL1 timers, while the emulated EL0
* stored in the host EL1 timers, while the emulated EL1
* state is stored in the VNCR page. The latter could have
* been updated behind our back, and we must reset the
* emulation of the timers.
*
* A non-VHE guest hypervisor doesn't have any direct access
* to its timers: the EL2 registers trap despite being
* notionally direct (we use the EL1 HW, as for VHE), while
* the EL1 registers access memory.
*
* In both cases, process the emulated timers on each guest
* exit. Boo.
*/
struct timer_map map;
get_timer_map(vcpu, &map);

@ -559,6 +559,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mmu = vcpu->arch.hw_mmu;
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/*
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
* which happens eagerly in VHE.
*
* Also, the VMID allocator only preserves VMIDs that are active at the
* time of rollover, so KVM might need to grab a new VMID for the MMU if
* this is called from kvm_sched_in().
*/
kvm_arm_vmid_update(&mmu->vmid);
/*
* We guarantee that both TLBs and I-cache are private to each
* vcpu. If detecting that a vcpu from the same VM has
@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
preempt_disable();
/*
* The VMID allocator only tracks active VMIDs per
* physical CPU, and therefore the VMID allocated may not be
* preserved on VMID roll-over if the task was preempted,
* making a thread's VMID inactive. So we need to call
* kvm_arm_vmid_update() in non-premptible context.
*/
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
has_vhe())
__load_stage2(vcpu->arch.hw_mmu,
vcpu->arch.hw_mmu->arch);
kvm_pmu_flush_hwstate(vcpu);
local_irq_disable();
@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
{
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
unsigned long tcr, ips;
unsigned long tcr;
/*
* Calculate the raw per-cpu offset without a translation from the
@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
params->mair_el2 = read_sysreg(mair_el1);
tcr = read_sysreg(tcr_el1);
ips = FIELD_GET(TCR_IPS_MASK, tcr);
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
tcr |= TCR_EPD1_MASK;
} else {
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
tcr &= TCR_EL2_MASK;
tcr |= TCR_EL2_RES1;
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
if (lpa2_is_enabled())
tcr |= TCR_EL2_DS;
}
tcr &= ~TCR_T0SZ_MASK;
tcr |= TCR_T0SZ(hyp_va_bits);
tcr &= ~TCR_EL2_PS_MASK;
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
if (lpa2_is_enabled())
tcr |= TCR_EL2_DS;
params->tcr_el2 = tcr;
params->pgd_pa = kvm_mmu_get_httbr();
@ -2290,6 +2287,19 @@ static int __init init_subsystems(void)
break;
case -ENODEV:
case -ENXIO:
/*
* No VGIC? No pKVM for you.
*
* Protected mode assumes that VGICv3 is present, so no point
* in trying to hobble along if vgic initialization fails.
*/
if (is_protected_kvm_enabled())
goto out;
/*
* Otherwise, userspace could choose to implement a GIC for its
* guest on non-cooperative hardware.
*/
vgic_present = false;
err = 0;
break;
@ -2400,6 +2410,13 @@ static void kvm_hyp_init_symbols(void)
kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
kvm_nvhe_sym(__icache_flags) = __icache_flags;
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
/*
* Flush entire BSS since part of its data containing init symbols is read
* while the MMU is off.
*/
kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start),
kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start));
}
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
@ -2461,14 +2478,6 @@ static void finalize_init_hyp_mode(void)
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
kern_hyp_va(sve_state);
}
} else {
for_each_possible_cpu(cpu) {
struct user_fpsimd_state *fpsimd_state;
fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
kern_hyp_va(fpsimd_state);
}
}
}

@ -54,50 +54,18 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
if (!system_supports_fpsimd())
return;
fpsimd_kvm_prepare();
/*
* We will check TIF_FOREIGN_FPSTATE just before entering the
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
* FP_STATE_FREE if the flag set.
* Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
* that the host kernel is responsible for restoring this state upon
* return to userspace, and the hyp code doesn't need to save anything.
*
* When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
* that PSTATE.{SM,ZA} == {0,0}.
*/
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
*host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
fpsimd_save_and_flush_cpu_state();
*host_data_ptr(fp_owner) = FP_STATE_FREE;
host_data_clear_flag(HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
host_data_set_flag(HOST_SVE_ENABLED);
if (system_supports_sme()) {
host_data_clear_flag(HOST_SME_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
host_data_set_flag(HOST_SME_ENABLED);
/*
* If PSTATE.SM is enabled then save any pending FP
* state and disable PSTATE.SM. If we leave PSTATE.SM
* enabled and the guest does not enable SME via
* CPACR_EL1.SMEN then operations that should be valid
* may generate SME traps from EL1 to EL1 which we
* can't intercept and which would confuse the guest.
*
* Do the same for PSTATE.ZA in the case where there
* is state in the registers which has not already
* been saved, this is very unlikely to happen.
*/
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
*host_data_ptr(fp_owner) = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state();
}
}
/*
* If normal guests gain SME support, maintain this behavior for pKVM
* guests, which don't support SME.
*/
WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
read_sysreg_s(SYS_SVCR));
WARN_ON_ONCE(system_supports_sme() && read_sysreg_s(SYS_SVCR));
}
/*
@ -162,52 +130,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save(flags);
/*
* If we have VHE then the Hyp code will reset CPACR_EL1 to
* the default value and we need to reenable SME.
*/
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
if (host_data_test_flag(HOST_SME_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_SMEN);
else
sysreg_clear_set(CPACR_EL1,
CPACR_EL1_SMEN_EL0EN,
CPACR_EL1_SMEN_EL1EN);
isb();
}
if (guest_owns_fp_regs()) {
if (vcpu_has_sve(vcpu)) {
u64 zcr = read_sysreg_el1(SYS_ZCR);
/*
* If the vCPU is in the hyp context then ZCR_EL1 is
* loaded with its vEL2 counterpart.
*/
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
/*
* Restore the VL that was saved when bound to the CPU,
* which is the maximum VL for the guest. Because the
* layout of the data when saving the sve state depends
* on the VL, we need to use a consistent (i.e., the
* maximum) VL.
* Note that this means that at guest exit ZCR_EL1 is
* not necessarily the same as on guest entry.
*
* ZCR_EL2 holds the guest hypervisor's VL when running
* a nested guest, which could be smaller than the
* max for the vCPU. Similar to above, we first need to
* switch to a VL consistent with the layout of the
* vCPU's SVE state. KVM support for NV implies VHE, so
* using the ZCR_EL1 alias is safe.
*/
if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
SYS_ZCR_EL1);
}
/*
* Flush (save and invalidate) the fpsimd/sve state so that if
* the host tries to use fpsimd/sve, it's not using stale data
@ -219,18 +142,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
* when needed.
*/
fpsimd_save_and_flush_cpu_state();
} else if (has_vhe() && system_supports_sve()) {
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
* reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
* for EL0. To avoid spurious traps, restore the trap state
* seen by kvm_arch_vcpu_load_fp():
*/
if (host_data_test_flag(HOST_SVE_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
else
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
}
local_irq_restore(flags);

@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
alternative_else_nop_endif
mrs x1, isr_el1
cbz x1, 1f
// Ensure that __guest_enter() always provides a context
// synchronization event so that callers don't need ISBs for anything
// that would usually be synchonized by the ERET.
isb
mov x0, #ARM_EXCEPTION_IRQ
ret

@ -326,7 +326,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
}
static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
@ -375,7 +375,87 @@ static inline void __hyp_sve_save_host(void)
true);
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (!guest_owns_fp_regs())
return;
if (vcpu_has_sve(vcpu)) {
/* A guest hypervisor may restrict the effective max VL. */
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
else
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (!guest_owns_fp_regs())
return;
/*
* When the guest owns the FP regs, we know that guest+hyp traps for
* any FPSIMD/SVE/SME features exposed to the guest have been disabled
* by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
* prior to __guest_entry(). As __guest_entry() guarantees a context
* synchronization event, we don't need an ISB here to avoid taking
* traps for anything that was exposed to the guest.
*/
if (vcpu_has_sve(vcpu)) {
zcr_el1 = read_sysreg_el1(SYS_ZCR);
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
/*
* The guest's state is always saved using the guest's max VL.
* Ensure that the host has the guest's max VL active such that
* the host can save the guest's state lazily, but don't
* artificially restrict the host to the guest's max VL.
*/
if (has_vhe()) {
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
} else {
zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (system_supports_sve()) {
__hyp_sve_save_host();
/* Re-enable SVE traps if not supported for the guest vcpu. */
if (!vcpu_has_sve(vcpu))
cpacr_clear_set(CPACR_EL1_ZEN, 0);
} else {
__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
}
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
}
/*
* We trap the first access to the FP/SIMD to save the host context and
@ -383,7 +463,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
* If FP/SIMD is not implemented, handle the trap and inject an undefined
* instruction exception to the guest. Similarly for trapped SVE accesses.
*/
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest;
u8 esr_ec;
@ -425,7 +505,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
/* Write out the host state if it's in the registers */
if (host_owns_fp_regs())
if (is_protected_kvm_enabled() && host_owns_fp_regs())
kvm_hyp_save_fpsimd_host(vcpu);
/* Restore the guest state */
@ -501,9 +581,22 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
/* Open-coded version of timer_get_offset() to allow for kern_hyp_va() */
static inline u64 hyp_timer_get_offset(struct arch_timer_context *ctxt)
{
u64 offset = 0;
if (ctxt->offset.vm_offset)
offset += *kern_hyp_va(ctxt->offset.vm_offset);
if (ctxt->offset.vcpu_offset)
offset += *kern_hyp_va(ctxt->offset.vcpu_offset);
return offset;
}
static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
{
return arch_timer_read_cntpct_el0() - timer_get_offset(ctxt);
return arch_timer_read_cntpct_el0() - hyp_timer_get_offset(ctxt);
}
static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu)
@ -587,7 +680,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
return true;
}
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
handle_tx2_tvm(vcpu))
@ -607,7 +700,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
@ -616,19 +709,18 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
#define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault
#define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
@ -658,23 +750,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
/*
* Allow the hypervisor to handle the exit with an exit handler if it has one.
*
* Returns true if the hypervisor handled the exit, and control should go back
* to the guest, or false if it hasn't.
*/
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
exit_handler_fn fn;
fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
if (fn)
return fn(vcpu, exit_code);
@ -704,20 +789,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code
* the guest, false when we should restore the host state and return to the
* main run loop.
*/
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{
/*
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Check whether we want to repaint the state one way or
* another.
*/
early_exit_filter(vcpu, exit_code);
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
@ -747,7 +821,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
goto exit;
/* Check if there's an exit handler and allow it to handle the exit. */
if (kvm_hyp_handle_exit(vcpu, exit_code))
if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
goto guest;
exit:
/* Return to the host kernel and handle the exit */

@ -5,6 +5,7 @@
*/
#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <asm/pgtable-types.h>
#include <asm/kvm_asm.h>
@ -83,7 +84,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
if (system_supports_sve())
__hyp_sve_restore_host();
else
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
__fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
if (has_fpmr)
write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
@ -91,11 +92,34 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
}
static void flush_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
hyp_vcpu->vcpu.arch.debug_owner = host_vcpu->arch.debug_owner;
if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
hyp_vcpu->vcpu.arch.vcpu_debug_state = host_vcpu->arch.vcpu_debug_state;
else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
hyp_vcpu->vcpu.arch.external_debug_state = host_vcpu->arch.external_debug_state;
}
static void sync_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
host_vcpu->arch.vcpu_debug_state = hyp_vcpu->vcpu.arch.vcpu_debug_state;
else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
host_vcpu->arch.external_debug_state = hyp_vcpu->vcpu.arch.external_debug_state;
}
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
fpsimd_sve_flush();
flush_debug_state(hyp_vcpu);
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
@ -123,6 +147,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
unsigned int i;
fpsimd_sve_sync(&hyp_vcpu->vcpu);
sync_debug_state(hyp_vcpu);
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
@ -200,8 +225,12 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
sync_hyp_vcpu(hyp_vcpu);
} else {
struct kvm_vcpu *vcpu = kern_hyp_va(host_vcpu);
/* The host is fully trusted, run its vCPU directly. */
ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
fpsimd_lazy_switch_to_guest(vcpu);
ret = __kvm_vcpu_run(vcpu);
fpsimd_lazy_switch_to_host(vcpu);
}
out:
cpu_reg(host_ctxt, 1) = ret;
@ -651,12 +680,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
case ESR_ELx_EC_SMC64:
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_SVE:
cpacr_clear_set(0, CPACR_EL1_ZEN);
isb();
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
SYS_ZCR_EL2);
break;
case ESR_ELx_EC_IABT_LOW:
case ESR_ELx_EC_DABT_LOW:
handle_host_mem_abort(host_ctxt);

@ -943,10 +943,10 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level);
if (ret)
return ret;
if (level != KVM_PGTABLE_LAST_LEVEL)
return -E2BIG;
if (!kvm_pte_valid(pte))
return -ENOENT;
if (level != KVM_PGTABLE_LAST_LEVEL)
return -E2BIG;
state = guest_get_page_state(pte, ipa);
if (state != PKVM_PAGE_SHARED_BORROWED)
@ -998,44 +998,57 @@ unlock:
return ret;
}
int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot)
static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 ipa = hyp_pfn_to_phys(gfn);
u64 phys;
int ret;
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
if (!IS_ENABLED(CONFIG_NVHE_EL2_DEBUG))
return;
host_lock_component();
guest_lock_component(vm);
ret = __check_host_shared_guest(vm, &phys, ipa);
if (!ret)
ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
guest_unlock_component(vm);
host_unlock_component();
WARN_ON(ret && ret != -ENOENT);
}
int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 ipa = hyp_pfn_to_phys(gfn);
int ret;
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
assert_host_shared_guest(vm, ipa);
guest_lock_component(vm);
ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
guest_unlock_component(vm);
return ret;
}
int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm)
{
u64 ipa = hyp_pfn_to_phys(gfn);
u64 phys;
int ret;
host_lock_component();
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
assert_host_shared_guest(vm, ipa);
guest_lock_component(vm);
ret = __check_host_shared_guest(vm, &phys, ipa);
if (!ret)
ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE);
ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE);
guest_unlock_component(vm);
host_unlock_component();
return ret;
}
@ -1043,18 +1056,15 @@ int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm)
int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm)
{
u64 ipa = hyp_pfn_to_phys(gfn);
u64 phys;
int ret;
host_lock_component();
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
assert_host_shared_guest(vm, ipa);
guest_lock_component(vm);
ret = __check_host_shared_guest(vm, &phys, ipa);
if (!ret)
ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold);
ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold);
guest_unlock_component(vm);
host_unlock_component();
return ret;
}
@ -1063,18 +1073,14 @@ int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 ipa = hyp_pfn_to_phys(gfn);
u64 phys;
int ret;
host_lock_component();
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
assert_host_shared_guest(vm, ipa);
guest_lock_component(vm);
ret = __check_host_shared_guest(vm, &phys, ipa);
if (!ret)
kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
guest_unlock_component(vm);
host_unlock_component();
return ret;
return 0;
}

@ -39,6 +39,9 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
if (!guest_owns_fp_regs())
__activate_traps_fpsimd32(vcpu);
if (has_hvhe()) {
val |= CPACR_EL1_TTA;
@ -47,6 +50,8 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
}
write_sysreg(val, cpacr_el1);
} else {
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
@ -61,12 +66,32 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
if (!guest_owns_fp_regs())
val |= CPTR_EL2_TFP;
write_sysreg(val, cptr_el2);
}
}
if (!guest_owns_fp_regs())
__activate_traps_fpsimd32(vcpu);
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
if (has_hvhe()) {
u64 val = CPACR_EL1_FPEN;
kvm_write_cptr_el2(val);
if (cpus_have_final_cap(ARM64_SVE))
val |= CPACR_EL1_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN;
write_sysreg(val, cpacr_el1);
} else {
u64 val = CPTR_NVHE_EL2_RES1;
if (!cpus_have_final_cap(ARM64_SVE))
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
}
}
static void __activate_traps(struct kvm_vcpu *vcpu)
@ -119,7 +144,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
kvm_reset_cptr_el2(vcpu);
__deactivate_cptr_traps(vcpu);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
}
@ -192,34 +217,6 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_handle_pvm_sysreg(vcpu, exit_code));
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
__hyp_sve_save_host();
/* Re-enable SVE traps if not supported for the guest vcpu. */
if (!vcpu_has_sve(vcpu))
cpacr_clear_set(CPACR_EL1_ZEN, 0);
} else {
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
}
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
u64 val = read_sysreg_s(SYS_FPMR);
if (unlikely(is_protected_kvm_enabled()))
*host_data_ptr(fpmr) = val;
else
**host_data_ptr(fpmr_ptr) = val;
}
}
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
@ -251,19 +248,21 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
return hyp_exit_handlers;
}
/*
* Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
* The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
* guest from dropping to AArch32 EL0 if implemented by the CPU. If the
* hypervisor spots a guest in such a state ensure it is handled, and don't
* trust the host to spot or fix it. The check below is based on the one in
* kvm_arch_vcpu_ioctl_run().
*
* Returns false if the guest ran in AArch32 when it shouldn't have, and
* thus should exit to the host, or true if a the guest run loop can continue.
*/
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Some guests (e.g., protected VMs) are not be allowed to run in
* AArch32. The ARMv8 architecture does not give the hypervisor a
* mechanism to prevent a guest from dropping to AArch32 EL0 if
* implemented by the CPU. If the hypervisor spots a guest in such a
* state ensure it is handled, and don't trust the host to spot or fix
* it. The check below is based on the one in
* kvm_arch_vcpu_ioctl_run().
*/
if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
/*
* As we have caught the guest red-handed, decide that it isn't
@ -276,6 +275,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
}
return __fixup_guest_exit(vcpu, exit_code, handlers);
}
/* Switch to the guest for legacy non-VHE systems */

@ -136,6 +136,16 @@ write:
write_sysreg(val, cpacr_el1);
}
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
u64 val = CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
write_sysreg(val, cpacr_el1);
}
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@ -207,7 +217,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
kvm_reset_cptr_el2(vcpu);
__deactivate_cptr_traps(vcpu);
if (!arm64_kernel_unmapped_at_el0())
host_vectors = __this_cpu_read(this_cpu_vector);
@ -413,14 +423,6 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
if (kvm_has_fpmr(vcpu->kvm))
**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
}
static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
{
int ret = -EINVAL;
@ -538,13 +540,10 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
};
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
return hyp_exit_handlers;
}
synchronize_vcpu_pstate(vcpu, exit_code);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* If we were in HYP context on entry, adjust the PSTATE view
* so that the usual helpers work correctly.
@ -564,6 +563,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
*vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
*vcpu_cpsr(vcpu) |= mode;
}
return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
}
/* Switch to the guest for VHE systems running in EL2 */
@ -578,6 +579,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_save_host_state_vhe(host_ctxt);
fpsimd_lazy_switch_to_guest(vcpu);
/*
* Note that ARM erratum 1165522 requires us to configure both stage 1
* and stage 2 translation for the guest context before we clear
@ -602,6 +605,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__deactivate_traps(vcpu);
fpsimd_lazy_switch_to_host(vcpu);
sysreg_restore_host_state_vhe(host_ctxt);
if (guest_owns_fp_regs())

@ -67,26 +67,27 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
if (!tmp)
return -ENOMEM;
swap(kvm->arch.nested_mmus, tmp);
/*
* If we went through a realocation, adjust the MMU back-pointers in
* the previously initialised kvm_pgtable structures.
*/
if (kvm->arch.nested_mmus != tmp)
for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
tmp[i].pgt->mmu = &tmp[i];
kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
ret = init_nested_s2_mmu(kvm, &tmp[i]);
ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
if (ret) {
for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
kvm_free_stage2_pgd(&tmp[i]);
kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
return ret;
}
kvm->arch.nested_mmus_size = num_mmus;
kvm->arch.nested_mmus = tmp;
return 0;
}

@ -1452,6 +1452,16 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
static bool access_hv_timer(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (!vcpu_el2_e2h_is_set(vcpu))
return undef_access(vcpu, p, r);
return access_arch_timer(vcpu, p, r);
}
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
s64 new, s64 cur)
{
@ -3103,9 +3113,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer },
EL2_REG(CNTHV_CTL_EL2, access_arch_timer, reset_val, 0),
EL2_REG(CNTHV_CVAL_EL2, access_arch_timer, reset_val, 0),
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },

@ -34,9 +34,9 @@
*
* CPU Interface:
*
* - kvm_vgic_vcpu_init(): initialization of static data that
* doesn't depend on any sizing information or emulation type. No
* allocation is allowed there.
* - kvm_vgic_vcpu_init(): initialization of static data that doesn't depend
* on any sizing information. Private interrupts are allocated if not
* already allocated at vgic-creation time.
*/
/* EARLY INIT */
@ -58,6 +58,8 @@ void kvm_vgic_early_init(struct kvm *kvm)
/* CREATION */
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type);
/**
* kvm_vgic_create: triggered by the instantiation of the VGIC device by
* user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
@ -112,6 +114,22 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
goto out_unlock;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
ret = vgic_allocate_private_irqs_locked(vcpu, type);
if (ret)
break;
}
if (ret) {
kvm_for_each_vcpu(i, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
kfree(vgic_cpu->private_irqs);
vgic_cpu->private_irqs = NULL;
}
goto out_unlock;
}
kvm->arch.vgic.in_kernel = true;
kvm->arch.vgic.vgic_model = type;
@ -180,7 +198,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
return 0;
}
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int i;
@ -218,17 +236,28 @@ static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
/* PPIs */
irq->config = VGIC_CONFIG_LEVEL;
}
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V3:
irq->group = 1;
irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
break;
case KVM_DEV_TYPE_ARM_VGIC_V2:
irq->group = 0;
irq->targets = BIT(vcpu->vcpu_id);
break;
}
}
return 0;
}
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu)
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu, u32 type)
{
int ret;
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = vgic_allocate_private_irqs_locked(vcpu);
ret = vgic_allocate_private_irqs_locked(vcpu, type);
mutex_unlock(&vcpu->kvm->arch.config_lock);
return ret;
@ -258,7 +287,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm))
return 0;
ret = vgic_allocate_private_irqs(vcpu);
ret = vgic_allocate_private_irqs(vcpu, dist->vgic_model);
if (ret)
return ret;
@ -295,7 +324,7 @@ int vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int ret = 0, i;
int ret = 0;
unsigned long idx;
lockdep_assert_held(&kvm->arch.config_lock);
@ -315,35 +344,6 @@ int vgic_init(struct kvm *kvm)
if (ret)
goto out;
/* Initialize groups on CPUs created before the VGIC type was known */
kvm_for_each_vcpu(idx, vcpu, kvm) {
ret = vgic_allocate_private_irqs_locked(vcpu);
if (ret)
goto out;
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, i);
switch (dist->vgic_model) {
case KVM_DEV_TYPE_ARM_VGIC_V3:
irq->group = 1;
irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
break;
case KVM_DEV_TYPE_ARM_VGIC_V2:
irq->group = 0;
irq->targets = 1U << idx;
break;
default:
ret = -EINVAL;
}
vgic_put_irq(kvm, irq);
if (ret)
goto out;
}
}
/*
* If we have GICv4.1 enabled, unconditionally request enable the
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only

@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
}
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
{
unsigned long flags;
u64 vmid, old_active_vmid;
bool updated = false;
vmid = atomic64_read(&kvm_vmid->id);
@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
old_active_vmid, vmid))
return false;
return;
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
/* Check that our VMID belongs to the current generation. */
vmid = atomic64_read(&kvm_vmid->id);
if (!vmid_gen_match(vmid)) {
if (!vmid_gen_match(vmid))
vmid = new_vmid(kvm_vmid);
updated = true;
}
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
return updated;
}
/*

@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
{
int contig_ptes = 0;
int contig_ptes = 1;
*pgsize = size;
switch (size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
if (pud_sect_supported())
contig_ptes = 1;
break;
#endif
case PMD_SIZE:
contig_ptes = 1;
break;
case CONT_PMD_SIZE:
*pgsize = PMD_SIZE;
contig_ptes = CONT_PMDS;
@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = PAGE_SIZE;
contig_ptes = CONT_PTES;
break;
default:
WARN_ON(!__hugetlb_valid_size(size));
}
return contig_ptes;
@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
pte_t orig_pte = __ptep_get(ptep);
unsigned long i;
pte_t pte, tmp_pte;
bool present;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
/*
* If HW_AFDBM is enabled, then the HW could turn on
* the dirty or accessed bit for any page in the set,
* so check them all.
*/
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
pte = __ptep_get_and_clear(mm, addr, ptep);
present = pte_present(pte);
while (--ncontig) {
ptep++;
addr += pgsize;
tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
if (present) {
if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte);
if (pte_young(tmp_pte))
pte = pte_mkyoung(pte);
}
}
return orig_pte;
return pte;
}
static pte_t get_clear_contig_flush(struct mm_struct *mm,
@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
__pte_clear(mm, addr, ptep);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
{
int ncontig;
size_t pgsize;
pte_t orig_pte = __ptep_get(ptep);
if (!pte_cont(orig_pte))
return __ptep_get_and_clear(mm, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
ncontig = num_contig_ptes(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
unsigned long psize = huge_page_size(hstate_vma(vma));
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
if (pte_user_exec(__ptep_get(ptep)))
return huge_ptep_clear_flush(vma, addr, ptep);
}
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,

@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
/*
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
* map randomization can be enabled by shrinking the IPA space.
*/
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size -

@ -162,6 +162,13 @@ static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp,
unsigned long next;
unsigned long addr = start;
if (pgd_none(READ_ONCE(*dst_pgdp))) {
dst_p4dp = trans_alloc(info);
if (!dst_p4dp)
return -ENOMEM;
pgd_populate(NULL, dst_pgdp, dst_p4dp);
}
dst_p4dp = p4d_offset(dst_pgdp, start);
src_p4dp = p4d_offset(src_pgdp, start);
do {

@ -76,27 +76,6 @@ extern const char *__cpu_full_name[];
#define cpu_family_string() __cpu_family[raw_smp_processor_id()]
#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
struct seq_file;
struct notifier_block;
extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
#define proc_cpuinfo_notifier(fn, pri) \
({ \
static struct notifier_block fn##_nb = { \
.notifier_call = fn, \
.priority = pri \
}; \
\
register_proc_cpuinfo_notifier(&fn##_nb); \
})
struct proc_cpuinfo_notifier_args {
struct seq_file *m;
unsigned long n;
};
static inline bool cpus_are_siblings(int cpua, int cpub)
{
struct cpuinfo_loongarch *infoa = &cpu_data[cpua];

@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
pte_t clear;
pte_t pte = ptep_get(ptep);
@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
unsigned long sz = huge_page_size(hstate_vma(vma));
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}

@ -77,6 +77,8 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK)
#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR)
struct seq_file;
struct secondary_data {
unsigned long stack;
unsigned long thread_info;

@ -18,16 +18,19 @@
.align 5
SYM_FUNC_START(__arch_cpu_idle)
/* start of rollback region */
LONG_L t0, tp, TI_FLAGS
nop
andi t0, t0, _TIF_NEED_RESCHED
bnez t0, 1f
nop
nop
nop
/* start of idle interrupt region */
ori t0, zero, CSR_CRMD_IE
/* idle instruction needs irq enabled */
csrxchg t0, t0, LOONGARCH_CSR_CRMD
/*
* If an interrupt lands here; between enabling interrupts above and
* going idle on the next instruction, we must *NOT* go idle since the
* interrupt could have set TIF_NEED_RESCHED or caused an timer to need
* reprogramming. Fall through -- see handle_vint() below -- and have
* the idle loop take care of things.
*/
idle 0
/* end of rollback region */
/* end of idle interrupt region */
1: jr ra
SYM_FUNC_END(__arch_cpu_idle)
@ -35,11 +38,10 @@ SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
la_abs t1, __arch_cpu_idle
la_abs t1, 1b
LONG_L t0, sp, PT_ERA
/* 32 byte rollback region */
ori t0, t0, 0x1f
xori t0, t0, 0x1f
/* 3 instructions idle interrupt region */
ori t0, t0, 0b1100
bne t0, t1, 1f
LONG_S t0, sp, PT_ERA
1: move a0, sp

@ -11,7 +11,6 @@
void __cpuidle arch_cpu_idle(void)
{
raw_local_irq_enable();
__arch_cpu_idle(); /* idle instruction needs irq enabled */
__arch_cpu_idle();
raw_local_irq_disable();
}

@ -13,28 +13,12 @@
#include <asm/processor.h>
#include <asm/time.h>
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
}
int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
unsigned int isa = cpu_data[n].isa_level;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
#ifdef CONFIG_SMP
if (!cpu_online(n))
@ -91,20 +75,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
seq_printf(m, "\n");
seq_printf(m, "Hardware Watchpoint\t: %s",
cpu_has_watch ? "yes, " : "no\n");
seq_printf(m, "Hardware Watchpoint\t: %s", str_yes_no(cpu_has_watch));
if (cpu_has_watch) {
seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
seq_printf(m, ", iwatch count: %d, dwatch count: %d",
cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
}
proc_cpuinfo_notifier_args.m = m;
proc_cpuinfo_notifier_args.n = n;
raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
&proc_cpuinfo_notifier_args);
seq_printf(m, "\n");
seq_printf(m, "\n\n");
return 0;
}

@ -33,7 +33,7 @@ void machine_halt(void)
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}
@ -53,7 +53,7 @@ void machine_power_off(void)
#endif
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}
@ -74,6 +74,6 @@ void machine_restart(char *command)
acpi_reboot();
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}

@ -303,9 +303,9 @@ int kvm_arch_enable_virtualization_cpu(void)
* TOE=0: Trap on Exception.
* TIT=0: Trap on Timer.
*/
if (env & CSR_GCFG_GCIP_ALL)
if (env & CSR_GCFG_GCIP_SECURE)
gcfg |= CSR_GCFG_GCI_SECURE;
if (env & CSR_GCFG_MATC_ROOT)
if (env & CSR_GCFG_MATP_ROOT)
gcfg |= CSR_GCFG_MATC_ROOT;
write_csr_gcfg(gcfg);

@ -85,7 +85,7 @@
* Guest CRMD comes from separate GCSR_CRMD register
*/
ori t0, zero, CSR_PRMD_PIE
csrxchg t0, t0, LOONGARCH_CSR_PRMD
csrwr t0, LOONGARCH_CSR_PRMD
/* Set PVM bit to setup ertn to guest context */
ori t0, zero, CSR_GSTAT_PVM

@ -1548,9 +1548,6 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Restore timer state regardless */
kvm_restore_timer(vcpu);
/* Control guest page CCA attribute */
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
/* Restore hardware PMU CSRs */

@ -25,7 +25,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
const u64 *ptr;
u64 data, sum64 = 0;
if (unlikely(len == 0))
if (unlikely(len <= 0))
return 0;
offset = (unsigned long)buff & 7;

@ -3,6 +3,7 @@
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#include <linux/memblock.h>
#include <linux/pagewalk.h>
#include <linux/pgtable.h>
#include <asm/set_memory.h>
@ -167,7 +168,7 @@ bool kernel_page_present(struct page *page)
unsigned long addr = (unsigned long)page_address(page);
if (addr < vm_map_base)
return true;
return memblock_is_memory(__pa(addr));
pgd = pgd_offset_k(addr);
if (pgd_none(pgdp_get(pgd)))

@ -468,6 +468,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
{
int i;
struct section *extab_sec = sec_lookup("__ex_table");
int extab_index = extab_sec ? extab_sec - secs : -1;
/* Walk through the relocations */
for (i = 0; i < ehdr.e_shnum; i++) {
@ -480,6 +482,9 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
if (sec->shdr.sh_type != SHT_REL_TYPE)
continue;
if (sec->shdr.sh_info == extab_index)
continue;
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))

@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
pte_t clear;
pte_t pte = *ptep;
@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
unsigned long sz = huge_page_size(hstate_vma(vma));
/*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}

@ -27,8 +27,8 @@
*/
struct pt_regs {
#ifdef CONFIG_32BIT
/* Pad bytes for argument save space on the stack. */
unsigned long pad0[8];
/* Saved syscall stack arguments; entries 0-3 unused. */
unsigned long args[8];
#endif
/* Saved main processor registers. */

Some files were not shown because too many files have changed in this diff Show More