mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.12-rc3). No conflicts and no adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
9c0fc36ec4
1
.mailmap
1
.mailmap
@ -203,6 +203,7 @@ Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> <ezequiel@collabora.com>
|
||||
Faith Ekstrand <faith.ekstrand@collabora.com> <jason@jlekstrand.net>
|
||||
Faith Ekstrand <faith.ekstrand@collabora.com> <jason.ekstrand@intel.com>
|
||||
Faith Ekstrand <faith.ekstrand@collabora.com> <jason.ekstrand@collabora.com>
|
||||
Fangrui Song <i@maskray.me> <maskray@google.com>
|
||||
Felipe W Damasio <felipewd@terra.com.br>
|
||||
Felix Kuhling <fxkuehl@gmx.de>
|
||||
Felix Moeller <felix@derklecks.de>
|
||||
|
54
CREDITS
54
CREDITS
@ -1358,10 +1358,6 @@ D: Major kbuild rework during the 2.5 cycle
|
||||
D: ISDN Maintainer
|
||||
S: USA
|
||||
|
||||
N: Gerrit Renker
|
||||
E: gerrit@erg.abdn.ac.uk
|
||||
D: DCCP protocol support.
|
||||
|
||||
N: Philip Gladstone
|
||||
E: philip@gladstonefamily.net
|
||||
D: Kernel / timekeeping stuff
|
||||
@ -1677,11 +1673,6 @@ W: http://www.carumba.com/
|
||||
D: bug toaster (A1 sauce makes all the difference)
|
||||
D: Random linux hacker
|
||||
|
||||
N: James Hogan
|
||||
E: jhogan@kernel.org
|
||||
D: Metag architecture maintainer
|
||||
D: TZ1090 SoC maintainer
|
||||
|
||||
N: Tim Hockin
|
||||
E: thockin@hockin.org
|
||||
W: http://www.hockin.org/~thockin
|
||||
@ -1697,6 +1688,11 @@ D: hwmon subsystem maintainer
|
||||
D: i2c-sis96x and i2c-stub SMBus drivers
|
||||
S: USA
|
||||
|
||||
N: James Hogan
|
||||
E: jhogan@kernel.org
|
||||
D: Metag architecture maintainer
|
||||
D: TZ1090 SoC maintainer
|
||||
|
||||
N: Dirk Hohndel
|
||||
E: hohndel@suse.de
|
||||
D: The XFree86[tm] Project
|
||||
@ -1872,6 +1868,10 @@ S: K osmidomkum 723
|
||||
S: 160 00 Praha 6
|
||||
S: Czech Republic
|
||||
|
||||
N: Seth Jennings
|
||||
E: sjenning@redhat.com
|
||||
D: Creation and maintenance of zswap
|
||||
|
||||
N: Jeremy Kerr
|
||||
D: Maintainer of SPU File System
|
||||
|
||||
@ -2188,19 +2188,6 @@ N: Mike Kravetz
|
||||
E: mike.kravetz@oracle.com
|
||||
D: Maintenance and development of the hugetlb subsystem
|
||||
|
||||
N: Seth Jennings
|
||||
E: sjenning@redhat.com
|
||||
D: Creation and maintenance of zswap
|
||||
|
||||
N: Dan Streetman
|
||||
E: ddstreet@ieee.org
|
||||
D: Maintenance and development of zswap
|
||||
D: Creation and maintenance of the zpool API
|
||||
|
||||
N: Vitaly Wool
|
||||
E: vitaly.wool@konsulko.com
|
||||
D: Maintenance and development of zswap
|
||||
|
||||
N: Andreas S. Krebs
|
||||
E: akrebs@altavista.net
|
||||
D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards
|
||||
@ -3191,6 +3178,11 @@ N: Ken Pizzini
|
||||
E: ken@halcyon.com
|
||||
D: CDROM driver "sonycd535" (Sony CDU-535/531)
|
||||
|
||||
N: Mathieu Poirier
|
||||
E: mathieu.poirier@linaro.org
|
||||
D: CoreSight kernel subsystem, Maintainer 2014-2022
|
||||
D: Perf tool support for CoreSight
|
||||
|
||||
N: Stelian Pop
|
||||
E: stelian@popies.net
|
||||
P: 1024D/EDBB6147 7B36 0E07 04BC 11DC A7A0 D3F7 7185 9E7A EDBB 6147
|
||||
@ -3300,6 +3292,10 @@ S: Schlossbergring 9
|
||||
S: 79098 Freiburg
|
||||
S: Germany
|
||||
|
||||
N: Gerrit Renker
|
||||
E: gerrit@erg.abdn.ac.uk
|
||||
D: DCCP protocol support.
|
||||
|
||||
N: Thomas Renninger
|
||||
E: trenn@suse.de
|
||||
D: cpupowerutils
|
||||
@ -3576,11 +3572,6 @@ D: several improvements to system programs
|
||||
S: Oldenburg
|
||||
S: Germany
|
||||
|
||||
N: Mathieu Poirier
|
||||
E: mathieu.poirier@linaro.org
|
||||
D: CoreSight kernel subsystem, Maintainer 2014-2022
|
||||
D: Perf tool support for CoreSight
|
||||
|
||||
N: Robert Schwebel
|
||||
E: robert@schwebel.de
|
||||
W: https://www.schwebel.de
|
||||
@ -3771,6 +3762,11 @@ S: Chr. Winthersvej 1 B, st.th.
|
||||
S: DK-1860 Frederiksberg C
|
||||
S: Denmark
|
||||
|
||||
N: Dan Streetman
|
||||
E: ddstreet@ieee.org
|
||||
D: Maintenance and development of zswap
|
||||
D: Creation and maintenance of the zpool API
|
||||
|
||||
N: Drew Sullivan
|
||||
E: drew@ss.org
|
||||
W: http://www.ss.org/
|
||||
@ -4286,6 +4282,10 @@ S: Pipers Way
|
||||
S: Swindon. SN3 1RJ
|
||||
S: England
|
||||
|
||||
N: Vitaly Wool
|
||||
E: vitaly.wool@konsulko.com
|
||||
D: Maintenance and development of zswap
|
||||
|
||||
N: Chris Wright
|
||||
E: chrisw@sous-sol.org
|
||||
D: hacking on LSM framework and security modules.
|
||||
|
@ -146,6 +146,8 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A715 | #3456084 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
|
||||
@ -186,6 +188,8 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N3 | #3456111 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-V1 | #1619801 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
|
||||
@ -289,3 +293,5 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Microsoft | Azure Cobalt 100| #2253138 | ARM64_ERRATUM_2253138 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Microsoft | Azure Cobalt 100| #3324339 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
@ -102,7 +102,7 @@ properties:
|
||||
default: 2
|
||||
|
||||
interrupts:
|
||||
anyOf:
|
||||
oneOf:
|
||||
- minItems: 1
|
||||
items:
|
||||
- description: TX interrupt
|
||||
|
@ -30,6 +30,7 @@ properties:
|
||||
- qcom,apq8096-sndcard
|
||||
- qcom,qcm6490-idp-sndcard
|
||||
- qcom,qcs6490-rb3gen2-sndcard
|
||||
- qcom,qrb4210-rb2-sndcard
|
||||
- qcom,qrb5165-rb5-sndcard
|
||||
- qcom,sc7180-qdsp6-sndcard
|
||||
- qcom,sc8280xp-sndcard
|
||||
|
@ -302,7 +302,7 @@ allOf:
|
||||
reg-names:
|
||||
items:
|
||||
enum:
|
||||
- scu
|
||||
- sru
|
||||
- ssi
|
||||
- adg
|
||||
# for Gen2/Gen3
|
||||
|
@ -7,12 +7,11 @@ WMI Driver API
|
||||
The WMI driver core supports a more modern bus-based interface for interacting
|
||||
with WMI devices, and an older GUID-based interface. The latter interface is
|
||||
considered to be deprecated, so new WMI drivers should generally avoid it since
|
||||
it has some issues with multiple WMI devices and events sharing the same GUIDs
|
||||
and/or notification IDs. The modern bus-based interface instead maps each
|
||||
WMI device to a :c:type:`struct wmi_device <wmi_device>`, so it supports
|
||||
WMI devices sharing GUIDs and/or notification IDs. Drivers can then register
|
||||
a :c:type:`struct wmi_driver <wmi_driver>`, which will be bound to compatible
|
||||
WMI devices by the driver core.
|
||||
it has some issues with multiple WMI devices sharing the same GUID.
|
||||
The modern bus-based interface instead maps each WMI device to a
|
||||
:c:type:`struct wmi_device <wmi_device>`, so it supports WMI devices sharing the
|
||||
same GUID. Drivers can then register a :c:type:`struct wmi_driver <wmi_driver>`
|
||||
which will be bound to compatible WMI devices by the driver core.
|
||||
|
||||
.. kernel-doc:: include/linux/wmi.h
|
||||
:internal:
|
||||
|
@ -181,7 +181,7 @@ Bridge Operations
|
||||
Bridge Connector Helper
|
||||
-----------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
|
||||
.. kernel-doc:: drivers/gpu/drm/display/drm_bridge_connector.c
|
||||
:doc: overview
|
||||
|
||||
|
||||
@ -204,7 +204,7 @@ MIPI-DSI bridge operation
|
||||
Bridge Connector Helper Reference
|
||||
---------------------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
|
||||
.. kernel-doc:: drivers/gpu/drm/display/drm_bridge_connector.c
|
||||
:export:
|
||||
|
||||
Panel-Bridge Helper Reference
|
||||
|
@ -9,7 +9,7 @@ segments between trusted peers. It adds a new TCP header option with
|
||||
a Message Authentication Code (MAC). MACs are produced from the content
|
||||
of a TCP segment using a hashing function with a password known to both peers.
|
||||
The intent of TCP-AO is to deprecate TCP-MD5 providing better security,
|
||||
key rotation and support for variety of hashing algorithms.
|
||||
key rotation and support for a variety of hashing algorithms.
|
||||
|
||||
1. Introduction
|
||||
===============
|
||||
@ -164,9 +164,9 @@ A: It should not, no action needs to be performed [7.5.2.e]::
|
||||
is not available, no action is required (RNextKeyID of a received
|
||||
segment needs to match the MKT’s SendID).
|
||||
|
||||
Q: How current_key is set and when does it change? It is a user-triggered
|
||||
change, or is it by a request from the remote peer? Is it set by the user
|
||||
explicitly, or by a matching rule?
|
||||
Q: How is current_key set, and when does it change? Is it a user-triggered
|
||||
change, or is it triggered by a request from the remote peer? Is it set by the
|
||||
user explicitly, or by a matching rule?
|
||||
|
||||
A: current_key is set by RNextKeyID [6.1]::
|
||||
|
||||
@ -233,8 +233,8 @@ always have one current_key [3.3]::
|
||||
|
||||
Q: Can a non-TCP-AO connection become a TCP-AO-enabled one?
|
||||
|
||||
A: No: for already established non-TCP-AO connection it would be impossible
|
||||
to switch using TCP-AO as the traffic key generation requires the initial
|
||||
A: No: for an already established non-TCP-AO connection it would be impossible
|
||||
to switch to using TCP-AO, as the traffic key generation requires the initial
|
||||
sequence numbers. Paraphrasing, starting using TCP-AO would require
|
||||
re-establishing the TCP connection.
|
||||
|
||||
@ -292,7 +292,7 @@ no transparency is really needed and modern BGP daemons already have
|
||||
|
||||
Linux provides a set of ``setsockopt()s`` and ``getsockopt()s`` that let
|
||||
userspace manage TCP-AO on a per-socket basis. In order to add/delete MKTs
|
||||
``TCP_AO_ADD_KEY`` and ``TCP_AO_DEL_KEY`` TCP socket options must be used
|
||||
``TCP_AO_ADD_KEY`` and ``TCP_AO_DEL_KEY`` TCP socket options must be used.
|
||||
It is not allowed to add a key on an established non-TCP-AO connection
|
||||
as well as to remove the last key from TCP-AO connection.
|
||||
|
||||
@ -361,7 +361,7 @@ not implemented.
|
||||
4. ``setsockopt()`` vs ``accept()`` race
|
||||
========================================
|
||||
|
||||
In contrast with TCP-MD5 established connection which has just one key,
|
||||
In contrast with an established TCP-MD5 connection which has just one key,
|
||||
TCP-AO connections may have many keys, which means that accepted connections
|
||||
on a listen socket may have any amount of keys as well. As copying all those
|
||||
keys on a first properly signed SYN would make the request socket bigger, that
|
||||
@ -374,7 +374,7 @@ keys from sockets that were already established, but not yet ``accept()``'ed,
|
||||
hanging in the accept queue.
|
||||
|
||||
The reverse is valid as well: if userspace adds a new key for a peer on
|
||||
a listener socket, the established sockets in accept queue won't
|
||||
a listener socket, the established sockets in the accept queue won't
|
||||
have the new keys.
|
||||
|
||||
At this moment, the resolution for the two races:
|
||||
@ -382,7 +382,7 @@ At this moment, the resolution for the two races:
|
||||
and ``setsockopt(TCP_AO_DEL_KEY)`` vs ``accept()`` is delegated to userspace.
|
||||
This means that it's expected that userspace would check the MKTs on the socket
|
||||
that was returned by ``accept()`` to verify that any key rotation that
|
||||
happened on listen socket is reflected on the newly established connection.
|
||||
happened on the listen socket is reflected on the newly established connection.
|
||||
|
||||
This is a similar "do-nothing" approach to TCP-MD5 from the kernel side and
|
||||
may be changed later by introducing new flags to ``tcp_ao_add``
|
||||
|
@ -355,6 +355,8 @@ just do it. As a result, a sequence of smaller series gets merged quicker and
|
||||
with better review coverage. Re-posting large series also increases the mailing
|
||||
list traffic.
|
||||
|
||||
.. _rcs:
|
||||
|
||||
Local variable ordering ("reverse xmas tree", "RCS")
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -391,6 +393,21 @@ APIs and helpers, especially scoped iterators. However, direct use of
|
||||
``__free()`` within networking core and drivers is discouraged.
|
||||
Similar guidance applies to declaring variables mid-function.
|
||||
|
||||
Clean-up patches
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Netdev discourages patches which perform simple clean-ups, which are not in
|
||||
the context of other work. For example:
|
||||
|
||||
* Addressing ``checkpatch.pl`` warnings
|
||||
* Addressing :ref:`Local variable ordering<rcs>` issues
|
||||
* Conversions to device-managed APIs (``devm_`` helpers)
|
||||
|
||||
This is because it is felt that the churn that such changes produce comes
|
||||
at a greater cost than the value of such clean-ups.
|
||||
|
||||
Conversely, spelling and grammar fixes are not discouraged.
|
||||
|
||||
Resending after review
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -66,7 +66,7 @@ BPF scheduler and reverts all tasks back to CFS.
|
||||
.. code-block:: none
|
||||
|
||||
# make -j16 -C tools/sched_ext
|
||||
# tools/sched_ext/scx_simple
|
||||
# tools/sched_ext/build/bin/scx_simple
|
||||
local=0 global=3
|
||||
local=5 global=24
|
||||
local=9 global=44
|
||||
|
@ -8,7 +8,7 @@ Introduction
|
||||
============
|
||||
|
||||
Many Dell notebooks made after ~2020 support a WMI-based interface for
|
||||
retrieving various system data like battery temperature, ePPID, diagostic data
|
||||
retrieving various system data like battery temperature, ePPID, diagnostic data
|
||||
and fan/thermal sensor data.
|
||||
|
||||
This interface is likely used by the `Dell Data Vault` software on Windows,
|
||||
@ -277,7 +277,7 @@ Reverse-Engineering the DDV WMI interface
|
||||
4. Try to deduce the meaning of a certain WMI method by comparing the control
|
||||
flow with other ACPI methods (_BIX or _BIF for battery related methods
|
||||
for example).
|
||||
5. Use the built-in UEFI diagostics to view sensor types/values for fan/thermal
|
||||
5. Use the built-in UEFI diagnostics to view sensor types/values for fan/thermal
|
||||
related methods (sometimes overwriting static ACPI data fields can be used
|
||||
to test different sensor type values, since on some machines this data is
|
||||
not reinitialized upon a warm reset).
|
||||
|
115
MAINTAINERS
115
MAINTAINERS
@ -860,7 +860,7 @@ F: drivers/crypto/allwinner/
|
||||
|
||||
ALLWINNER DMIC DRIVERS
|
||||
M: Ban Tao <fengzheng923@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/allwinner,sun50i-h6-dmic.yaml
|
||||
F: sound/soc/sunxi/sun50i-dmic.c
|
||||
@ -1517,7 +1517,7 @@ F: drivers/iio/gyro/adxrs290.c
|
||||
ANALOG DEVICES INC ASOC CODEC DRIVERS
|
||||
M: Lars-Peter Clausen <lars@metafoo.de>
|
||||
M: Nuno Sá <nuno.sa@analog.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://wiki.analog.com/
|
||||
W: https://ez.analog.com/linux-software-drivers
|
||||
@ -1594,7 +1594,7 @@ F: drivers/rtc/rtc-goldfish.c
|
||||
AOA (Apple Onboard Audio) ALSA DRIVER
|
||||
M: Johannes Berg <johannes@sipsolutions.net>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: sound/aoa/
|
||||
|
||||
@ -2091,7 +2091,7 @@ F: drivers/crypto/amlogic/
|
||||
|
||||
ARM/Amlogic Meson SoC Sound Drivers
|
||||
M: Jerome Brunet <jbrunet@baylibre.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/amlogic*
|
||||
F: sound/soc/meson/
|
||||
@ -2129,7 +2129,7 @@ F: drivers/*/*alpine*
|
||||
ARM/APPLE MACHINE SOUND DRIVERS
|
||||
M: Martin Povišer <povik+lin@cutebit.org>
|
||||
L: asahi@lists.linux.dev
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/adi,ssm3515.yaml
|
||||
F: Documentation/devicetree/bindings/sound/apple,*
|
||||
@ -3732,7 +3732,7 @@ F: arch/arm/boot/dts/microchip/at91-tse850-3.dts
|
||||
|
||||
AXENTIA ASOC DRIVERS
|
||||
M: Peter Rosin <peda@axentia.se>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/axentia,*
|
||||
F: sound/soc/atmel/tse850-pcm5142.c
|
||||
@ -4851,7 +4851,7 @@ F: include/uapi/linux/bsg.h
|
||||
|
||||
BT87X AUDIO DRIVER
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
||||
F: Documentation/sound/cards/bt87x.rst
|
||||
@ -4913,7 +4913,7 @@ F: drivers/net/can/bxcan.c
|
||||
|
||||
C-MEDIA CMI8788 DRIVER
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
||||
F: sound/pci/oxygen/
|
||||
@ -7832,6 +7832,8 @@ F: drivers/gpu/drm/xlnx/
|
||||
DRM GPU SCHEDULER
|
||||
M: Luben Tuikov <ltuikov89@gmail.com>
|
||||
M: Matthew Brost <matthew.brost@intel.com>
|
||||
M: Danilo Krummrich <dakr@kernel.org>
|
||||
M: Philipp Stanner <pstanner@redhat.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
@ -8252,7 +8254,7 @@ F: drivers/edac/ti_edac.c
|
||||
|
||||
EDIROL UA-101/UA-1000 DRIVER
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
||||
F: sound/usb/misc/ua101.c
|
||||
@ -8814,7 +8816,7 @@ F: drivers/net/can/usb/f81604.c
|
||||
FIREWIRE AUDIO DRIVERS and IEC 61883-1/6 PACKET STREAMING ENGINE
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
M: Takashi Sakamoto <o-takashi@sakamocchi.jp>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
||||
F: include/uapi/sound/firewire.h
|
||||
@ -8888,7 +8890,7 @@ F: drivers/input/joystick/fsia6b.c
|
||||
|
||||
FOCUSRITE SCARLETT2 MIXER DRIVER (Scarlett Gen 2+ and Clarett)
|
||||
M: Geoffrey D. Bennett <g@b4.vu>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://github.com/geoffreybennett/scarlett-gen2
|
||||
B: https://github.com/geoffreybennett/scarlett-gen2/issues
|
||||
@ -8912,6 +8914,7 @@ F: include/linux/fortify-string.h
|
||||
F: lib/fortify_kunit.c
|
||||
F: lib/memcpy_kunit.c
|
||||
F: lib/test_fortify/*
|
||||
K: \bunsafe_memcpy\b
|
||||
K: \b__NO_FORTIFY\b
|
||||
|
||||
FPGA DFL DRIVERS
|
||||
@ -9209,7 +9212,7 @@ M: Shengjiu Wang <shengjiu.wang@gmail.com>
|
||||
M: Xiubo Li <Xiubo.Lee@gmail.com>
|
||||
R: Fabio Estevam <festevam@gmail.com>
|
||||
R: Nicolin Chen <nicoleotsuka@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
F: sound/soc/fsl/fsl*
|
||||
@ -9219,7 +9222,7 @@ FREESCALE SOC LPC32XX SOUND DRIVERS
|
||||
M: J.M.B. Downing <jonathan.downing@nautel.com>
|
||||
M: Piotr Wojtaszczyk <piotr.wojtaszczyk@timesys.com>
|
||||
R: Vladimir Zapolskiy <vz@mleia.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/nxp,lpc3220-i2s.yaml
|
||||
@ -9227,7 +9230,7 @@ F: sound/soc/fsl/lpc3xxx-*
|
||||
|
||||
FREESCALE SOC SOUND QMC DRIVER
|
||||
M: Herve Codina <herve.codina@bootlin.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/fsl,qmc-audio.yaml
|
||||
@ -10267,7 +10270,7 @@ F: Documentation/devicetree/bindings/arm/hisilicon/low-pin-count.yaml
|
||||
F: drivers/bus/hisi_lpc.c
|
||||
|
||||
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
|
||||
M: Yisen Zhuang <yisen.zhuang@huawei.com>
|
||||
M: Jian Shen <shenjian15@huawei.com>
|
||||
M: Salil Mehta <salil.mehta@huawei.com>
|
||||
M: Jijie Shao <shaojijie@huawei.com>
|
||||
L: netdev@vger.kernel.org
|
||||
@ -10276,7 +10279,7 @@ W: http://www.hisilicon.com
|
||||
F: drivers/net/ethernet/hisilicon/hns3/
|
||||
|
||||
HISILICON NETWORK SUBSYSTEM DRIVER
|
||||
M: Yisen Zhuang <yisen.zhuang@huawei.com>
|
||||
M: Jian Shen <shenjian15@huawei.com>
|
||||
M: Salil Mehta <salil.mehta@huawei.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -11154,7 +11157,7 @@ F: drivers/iio/pressure/dps310.c
|
||||
|
||||
INFINEON PEB2466 ASoC CODEC
|
||||
M: Herve Codina <herve.codina@bootlin.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/infineon,peb2466.yaml
|
||||
F: sound/soc/codecs/peb2466.c
|
||||
@ -11317,7 +11320,7 @@ M: Bard Liao <yung-chuan.liao@linux.intel.com>
|
||||
M: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
|
||||
M: Kai Vehmanen <kai.vehmanen@linux.intel.com>
|
||||
R: Pierre-Louis Bossart <pierre-louis.bossart@linux.dev>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Supported
|
||||
F: sound/soc/intel/
|
||||
|
||||
@ -11496,7 +11499,7 @@ F: include/uapi/linux/idxd.h
|
||||
|
||||
INTEL IN FIELD SCAN (IFS) DEVICE
|
||||
M: Jithu Joseph <jithu.joseph@intel.com>
|
||||
R: Ashok Raj <ashok.raj@intel.com>
|
||||
R: Ashok Raj <ashok.raj.linux@gmail.com>
|
||||
R: Tony Luck <tony.luck@intel.com>
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/intel/ifs
|
||||
@ -12001,7 +12004,7 @@ F: drivers/tty/ipwireless/
|
||||
|
||||
IRON DEVICE AUDIO CODEC DRIVERS
|
||||
M: Kiseok Jo <kiseok.jo@irondevice.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/irondevice,*
|
||||
F: sound/soc/codecs/sma*
|
||||
@ -12343,6 +12346,7 @@ F: include/linux/randomize_kstack.h
|
||||
F: kernel/configs/hardening.config
|
||||
F: lib/usercopy_kunit.c
|
||||
F: mm/usercopy.c
|
||||
F: security/Kconfig.hardening
|
||||
K: \b(add|choose)_random_kstack_offset\b
|
||||
K: \b__check_(object_size|heap_object)\b
|
||||
K: \b__counted_by\b
|
||||
@ -12459,7 +12463,7 @@ F: virt/kvm/*
|
||||
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
|
||||
M: Marc Zyngier <maz@kernel.org>
|
||||
M: Oliver Upton <oliver.upton@linux.dev>
|
||||
R: James Morse <james.morse@arm.com>
|
||||
R: Joey Gouly <joey.gouly@arm.com>
|
||||
R: Suzuki K Poulose <suzuki.poulose@arm.com>
|
||||
R: Zenghui Yu <yuzenghui@huawei.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
@ -13952,7 +13956,7 @@ F: drivers/media/i2c/max96717.c
|
||||
|
||||
MAX9860 MONO AUDIO VOICE CODEC DRIVER
|
||||
M: Peter Rosin <peda@axentia.se>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/max9860.txt
|
||||
F: sound/soc/codecs/max9860.*
|
||||
@ -15085,7 +15089,7 @@ F: drivers/spi/spi-at91-usart.c
|
||||
|
||||
MICROCHIP AUDIO ASOC DRIVERS
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/sound/atmel*
|
||||
F: Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt
|
||||
@ -15957,7 +15961,7 @@ F: include/linux/mtd/*nand*.h
|
||||
|
||||
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
|
||||
M: Daniel Mack <zonque@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.native-instruments.com
|
||||
F: sound/usb/caiaq/
|
||||
@ -16198,8 +16202,19 @@ F: lib/random32.c
|
||||
F: net/
|
||||
F: tools/net/
|
||||
F: tools/testing/selftests/net/
|
||||
X: Documentation/networking/mac80211-injection.rst
|
||||
X: Documentation/networking/mac80211_hwsim/
|
||||
X: Documentation/networking/regulatory.rst
|
||||
X: include/net/cfg80211.h
|
||||
X: include/net/ieee80211_radiotap.h
|
||||
X: include/net/iw_handler.h
|
||||
X: include/net/mac80211.h
|
||||
X: include/net/wext.h
|
||||
X: net/9p/
|
||||
X: net/bluetooth/
|
||||
X: net/mac80211/
|
||||
X: net/rfkill/
|
||||
X: net/wireless/
|
||||
|
||||
NETWORKING [IPSEC]
|
||||
M: Steffen Klassert <steffen.klassert@secunet.com>
|
||||
@ -16729,7 +16744,7 @@ F: drivers/extcon/extcon-ptn5150.c
|
||||
|
||||
NXP SGTL5000 DRIVER
|
||||
M: Fabio Estevam <festevam@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/fsl,sgtl5000.yaml
|
||||
F: sound/soc/codecs/sgtl5000*
|
||||
@ -16753,7 +16768,7 @@ K: "nxp,tda998x"
|
||||
|
||||
NXP TFA9879 DRIVER
|
||||
M: Peter Rosin <peda@axentia.se>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/nxp,tfa9879.yaml
|
||||
F: sound/soc/codecs/tfa9879*
|
||||
@ -16765,7 +16780,7 @@ F: drivers/nfc/nxp-nci
|
||||
|
||||
NXP/Goodix TFA989X (TFA1) DRIVER
|
||||
M: Stephan Gerhold <stephan@gerhold.net>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
|
||||
F: sound/soc/codecs/tfa989x.c
|
||||
@ -16851,7 +16866,7 @@ F: include/uapi/misc/ocxl.h
|
||||
OMAP AUDIO SUPPORT
|
||||
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
|
||||
M: Jarkko Nikula <jarkko.nikula@bitmer.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
L: linux-omap@vger.kernel.org
|
||||
S: Maintained
|
||||
F: sound/soc/ti/n810.c
|
||||
@ -17408,7 +17423,7 @@ F: include/linux/pm_opp.h
|
||||
|
||||
OPL4 DRIVER
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
||||
F: sound/drivers/opl4/
|
||||
@ -18799,7 +18814,7 @@ F: drivers/crypto/intel/qat/
|
||||
|
||||
QCOM AUDIO (ASoC) DRIVERS
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/soc/qcom/qcom,apr*
|
||||
@ -19661,7 +19676,7 @@ F: drivers/net/ethernet/renesas/rtsn.*
|
||||
|
||||
RENESAS IDT821034 ASoC CODEC
|
||||
M: Herve Codina <herve.codina@bootlin.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/renesas,idt821034.yaml
|
||||
F: sound/soc/codecs/idt821034.c
|
||||
@ -20412,7 +20427,7 @@ F: security/safesetid/
|
||||
|
||||
SAMSUNG AUDIO (ASoC) DRIVERS
|
||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
B: mailto:linux-samsung-soc@vger.kernel.org
|
||||
F: Documentation/devicetree/bindings/sound/samsung*
|
||||
@ -20948,7 +20963,7 @@ F: drivers/media/rc/serial_ir.c
|
||||
|
||||
SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/slimbus/
|
||||
F: drivers/slimbus/
|
||||
@ -21382,7 +21397,7 @@ F: Documentation/devicetree/bindings/i2c/socionext,synquacer-i2c.yaml
|
||||
F: drivers/i2c/busses/i2c-synquacer.c
|
||||
|
||||
SOCIONEXT UNIPHIER SOUND DRIVER
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Orphan
|
||||
F: sound/soc/uniphier/
|
||||
|
||||
@ -21641,7 +21656,7 @@ F: tools/testing/selftests/alsa
|
||||
|
||||
SOUND - COMPRESSED AUDIO
|
||||
M: Vinod Koul <vkoul@kernel.org>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
||||
F: Documentation/sound/designs/compress-offload.rst
|
||||
@ -21704,7 +21719,7 @@ M: Vinod Koul <vkoul@kernel.org>
|
||||
M: Bard Liao <yung-chuan.liao@linux.intel.com>
|
||||
R: Pierre-Louis Bossart <pierre-louis.bossart@linux.dev>
|
||||
R: Sanyog Kale <sanyog.r.kale@intel.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/soundwire.git
|
||||
F: Documentation/driver-api/soundwire/
|
||||
@ -22177,7 +22192,7 @@ F: kernel/static_call.c
|
||||
|
||||
STI AUDIO (ASoC) DRIVERS
|
||||
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
|
||||
F: sound/soc/sti/
|
||||
@ -22198,7 +22213,7 @@ F: drivers/media/usb/stk1160/
|
||||
STM32 AUDIO (ASoC) DRIVERS
|
||||
M: Olivier Moysan <olivier.moysan@foss.st.com>
|
||||
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
|
||||
F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml
|
||||
@ -22901,7 +22916,7 @@ F: drivers/irqchip/irq-xtensa-*
|
||||
|
||||
TEXAS INSTRUMENTS ASoC DRIVERS
|
||||
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml
|
||||
F: sound/soc/ti/
|
||||
@ -22910,7 +22925,7 @@ TEXAS INSTRUMENTS AUDIO (ASoC/HDA) DRIVERS
|
||||
M: Shenghao Ding <shenghao-ding@ti.com>
|
||||
M: Kevin Lu <kevin-lu@ti.com>
|
||||
M: Baojun Xu <baojun.xu@ti.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/tas2552.txt
|
||||
F: Documentation/devicetree/bindings/sound/ti,tas2562.yaml
|
||||
@ -23278,7 +23293,7 @@ F: drivers/soc/ti/*
|
||||
TI LM49xxx FAMILY ASoC CODEC DRIVERS
|
||||
M: M R Swami Reddy <mr.swami.reddy@ti.com>
|
||||
M: Vishwas A Deshpande <vishwas.a.deshpande@ti.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: sound/soc/codecs/isabelle*
|
||||
F: sound/soc/codecs/lm49453*
|
||||
@ -23293,14 +23308,14 @@ F: drivers/iio/adc/ti-lmp92064.c
|
||||
|
||||
TI PCM3060 ASoC CODEC DRIVER
|
||||
M: Kirill Marinushkin <kmarinushkin@birdec.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/pcm3060.txt
|
||||
F: sound/soc/codecs/pcm3060*
|
||||
|
||||
TI TAS571X FAMILY ASoC CODEC DRIVER
|
||||
M: Kevin Cernekee <cernekee@chromium.org>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
F: sound/soc/codecs/tas571x*
|
||||
|
||||
@ -23328,7 +23343,7 @@ F: drivers/iio/adc/ti-tsc2046.c
|
||||
|
||||
TI TWL4030 SERIES SOC CODEC DRIVER
|
||||
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: sound/soc/codecs/twl4030*
|
||||
|
||||
@ -24004,7 +24019,7 @@ F: drivers/usb/storage/
|
||||
|
||||
USB MIDI DRIVER
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
||||
F: sound/usb/midi.*
|
||||
@ -24182,8 +24197,12 @@ F: drivers/usb/host/xhci*
|
||||
|
||||
USER DATAGRAM PROTOCOL (UDP)
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: include/linux/udp.h
|
||||
F: include/net/udp.h
|
||||
F: include/trace/events/udp.h
|
||||
F: include/uapi/linux/udp.h
|
||||
F: net/ipv4/udp.c
|
||||
F: net/ipv6/udp.c
|
||||
|
||||
@ -24664,7 +24683,7 @@ VIRTIO SOUND DRIVER
|
||||
M: Anton Yakovlev <anton.yakovlev@opensynergy.com>
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
L: virtualization@lists.linux.dev
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: include/uapi/linux/virtio_snd.h
|
||||
F: sound/virtio/*
|
||||
@ -25393,7 +25412,7 @@ F: include/xen/interface/io/usbif.h
|
||||
XEN SOUND FRONTEND DRIVER
|
||||
M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Supported
|
||||
F: sound/xen/*
|
||||
|
||||
@ -25409,7 +25428,7 @@ F: include/xen/arm/swiotlb-xen.h
|
||||
F: include/xen/swiotlb-xen.h
|
||||
|
||||
XFS FILESYSTEM
|
||||
M: Chandan Babu R <chandan.babu@oracle.com>
|
||||
M: Carlos Maiolino <cem@kernel.org>
|
||||
R: Darrick J. Wong <djwong@kernel.org>
|
||||
L: linux-xfs@vger.kernel.org
|
||||
S: Supported
|
||||
|
4
Makefile
4
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1645,7 +1645,7 @@ help:
|
||||
echo '* dtbs - Build device tree blobs for enabled boards'; \
|
||||
echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'; \
|
||||
echo ' dt_binding_check - Validate device tree binding documents and examples'; \
|
||||
echo ' dt_binding_schema - Build processed device tree binding schemas'; \
|
||||
echo ' dt_binding_schemas - Build processed device tree binding schemas'; \
|
||||
echo ' dtbs_check - Validate device tree source files';\
|
||||
echo '')
|
||||
|
||||
|
18
arch/Kconfig
18
arch/Kconfig
@ -838,7 +838,7 @@ config CFI_CLANG
|
||||
config CFI_ICALL_NORMALIZE_INTEGERS
|
||||
bool "Normalize CFI tags for integers"
|
||||
depends on CFI_CLANG
|
||||
depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
|
||||
depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS
|
||||
help
|
||||
This option normalizes the CFI tags for integer types so that all
|
||||
integer types of the same size and signedness receive the same CFI
|
||||
@ -851,6 +851,22 @@ config CFI_ICALL_NORMALIZE_INTEGERS
|
||||
|
||||
This option is necessary for using CFI with Rust. If unsure, say N.
|
||||
|
||||
config HAVE_CFI_ICALL_NORMALIZE_INTEGERS
|
||||
def_bool !GCOV_KERNEL && !KASAN
|
||||
depends on CFI_CLANG
|
||||
depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
|
||||
help
|
||||
Is CFI_ICALL_NORMALIZE_INTEGERS supported with the set of compilers
|
||||
currently in use?
|
||||
|
||||
This option defaults to false if GCOV or KASAN is enabled, as there is
|
||||
an LLVM bug that makes normalized integers tags incompatible with
|
||||
KASAN and GCOV. Kconfig currently does not have the infrastructure to
|
||||
detect whether your rustc compiler contains the fix for this bug, so
|
||||
it is assumed that it doesn't. If your compiler has the fix, you can
|
||||
explicitly enable this option in your config file. The Kconfig logic
|
||||
needed to detect this will be added in a future kernel release.
|
||||
|
||||
config CFI_PERMISSIVE
|
||||
bool "Use CFI in permissive mode"
|
||||
depends on CFI_CLANG
|
||||
|
@ -200,7 +200,8 @@ config ARM64
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
|
||||
if $(cc-option,-fpatchable-function-entry=2)
|
||||
if (GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS || \
|
||||
CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS)
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \
|
||||
if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \
|
||||
@ -286,12 +287,10 @@ config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
|
||||
def_bool CC_IS_CLANG
|
||||
# https://github.com/ClangBuiltLinux/linux/issues/1507
|
||||
depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
|
||||
|
||||
config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
|
||||
def_bool CC_IS_GCC
|
||||
depends on $(cc-option,-fpatchable-function-entry=2)
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
|
||||
|
||||
config 64BIT
|
||||
def_bool y
|
||||
@ -1097,6 +1096,7 @@ config ARM64_ERRATUM_3194386
|
||||
* ARM Cortex-A78C erratum 3324346
|
||||
* ARM Cortex-A78C erratum 3324347
|
||||
* ARM Cortex-A710 erratam 3324338
|
||||
* ARM Cortex-A715 errartum 3456084
|
||||
* ARM Cortex-A720 erratum 3456091
|
||||
* ARM Cortex-A725 erratum 3456106
|
||||
* ARM Cortex-X1 erratum 3324344
|
||||
@ -1107,6 +1107,7 @@ config ARM64_ERRATUM_3194386
|
||||
* ARM Cortex-X925 erratum 3324334
|
||||
* ARM Neoverse-N1 erratum 3324349
|
||||
* ARM Neoverse N2 erratum 3324339
|
||||
* ARM Neoverse-N3 erratum 3456111
|
||||
* ARM Neoverse-V1 erratum 3324341
|
||||
* ARM Neoverse V2 erratum 3324336
|
||||
* ARM Neoverse-V3 erratum 3312417
|
||||
|
@ -10,7 +10,7 @@
|
||||
#
|
||||
# Copyright (C) 1995-2001 by Russell King
|
||||
|
||||
LDFLAGS_vmlinux :=--no-undefined -X
|
||||
LDFLAGS_vmlinux :=--no-undefined -X --pic-veneer
|
||||
|
||||
ifeq ($(CONFIG_RELOCATABLE), y)
|
||||
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
|
||||
|
@ -94,6 +94,7 @@
|
||||
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
|
||||
#define ARM_CPU_PART_CORTEX_X925 0xD85
|
||||
#define ARM_CPU_PART_CORTEX_A725 0xD87
|
||||
#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
|
||||
|
||||
#define APM_CPU_PART_XGENE 0x000
|
||||
#define APM_CPU_VAR_POTENZA 0x00
|
||||
@ -176,6 +177,7 @@
|
||||
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
|
||||
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
|
||||
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
|
||||
#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
|
@ -1441,11 +1441,6 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
||||
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
|
||||
})
|
||||
|
||||
#define expand_field_sign(id, fld, val) \
|
||||
(id##_##fld##_SIGNED ? \
|
||||
__expand_field_sign_signed(id, fld, val) : \
|
||||
__expand_field_sign_unsigned(id, fld, val))
|
||||
|
||||
#define get_idreg_field_unsigned(kvm, id, fld) \
|
||||
({ \
|
||||
u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \
|
||||
@ -1461,20 +1456,26 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
||||
#define get_idreg_field_enum(kvm, id, fld) \
|
||||
get_idreg_field_unsigned(kvm, id, fld)
|
||||
|
||||
#define get_idreg_field(kvm, id, fld) \
|
||||
#define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \
|
||||
(get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit))
|
||||
|
||||
#define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \
|
||||
(get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit))
|
||||
|
||||
#define kvm_cmp_feat(kvm, id, fld, op, limit) \
|
||||
(id##_##fld##_SIGNED ? \
|
||||
get_idreg_field_signed(kvm, id, fld) : \
|
||||
get_idreg_field_unsigned(kvm, id, fld))
|
||||
kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \
|
||||
kvm_cmp_feat_unsigned(kvm, id, fld, op, limit))
|
||||
|
||||
#define kvm_has_feat(kvm, id, fld, limit) \
|
||||
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
|
||||
kvm_cmp_feat(kvm, id, fld, >=, limit)
|
||||
|
||||
#define kvm_has_feat_enum(kvm, id, fld, val) \
|
||||
(get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
|
||||
kvm_cmp_feat_unsigned(kvm, id, fld, ==, val)
|
||||
|
||||
#define kvm_has_feat_range(kvm, id, fld, min, max) \
|
||||
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
|
||||
get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
|
||||
(kvm_cmp_feat(kvm, id, fld, >=, min) && \
|
||||
kvm_cmp_feat(kvm, id, fld, <=, max))
|
||||
|
||||
/* Check for a given level of PAuth support */
|
||||
#define kvm_has_pauth(k, l) \
|
||||
|
@ -439,6 +439,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
@ -447,8 +448,10 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
|
||||
MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
|
||||
|
@ -338,7 +338,7 @@ static inline void __hyp_sve_save_host(void)
|
||||
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
|
||||
|
||||
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
|
||||
__sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
|
||||
&sve_state->fpsr,
|
||||
true);
|
||||
|
@ -33,7 +33,7 @@ static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
|
||||
__sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
|
||||
}
|
||||
|
||||
static void __hyp_sve_restore_host(void)
|
||||
@ -45,10 +45,11 @@ static void __hyp_sve_restore_host(void)
|
||||
* the host. The layout of the data when saving the sve state depends
|
||||
* on the VL, so use a consistent (i.e., the maximum) host VL.
|
||||
*
|
||||
* Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
|
||||
* supported by the system (or limited at EL3).
|
||||
* Note that this constrains the PE to the maximum shared VL
|
||||
* that was discovered, if we wish to use larger VLs this will
|
||||
* need to be revisited.
|
||||
*/
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
|
||||
__sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
|
||||
&sve_state->fpsr,
|
||||
true);
|
||||
@ -488,7 +489,8 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
|
||||
case ESR_ELx_EC_SVE:
|
||||
cpacr_clear_set(0, CPACR_ELx_ZEN);
|
||||
isb();
|
||||
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
|
||||
SYS_ZCR_EL2);
|
||||
break;
|
||||
case ESR_ELx_EC_IABT_LOW:
|
||||
case ESR_ELx_EC_DABT_LOW:
|
||||
|
@ -574,12 +574,14 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
|
||||
unlock:
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
|
||||
return ret;
|
||||
}
|
||||
|
||||
hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -22,7 +22,7 @@ endif
|
||||
|
||||
ifneq ($(c-getrandom-y),)
|
||||
CFLAGS_vgetrandom-32.o += -include $(c-getrandom-y)
|
||||
CFLAGS_vgetrandom-64.o += -include $(c-getrandom-y) $(call cc-option, -ffixed-r30)
|
||||
CFLAGS_vgetrandom-64.o += -include $(c-getrandom-y)
|
||||
endif
|
||||
|
||||
# Build rules
|
||||
|
@ -777,8 +777,7 @@ config IRQ_STACKS
|
||||
config THREAD_SIZE_ORDER
|
||||
int "Kernel stack size (in power-of-two numbers of page size)" if VMAP_STACK && EXPERT
|
||||
range 0 4
|
||||
default 1 if 32BIT && !KASAN
|
||||
default 3 if 64BIT && KASAN
|
||||
default 1 if 32BIT
|
||||
default 2
|
||||
help
|
||||
Specify the Pages of thread stack size (from 4KB to 64KB), which also
|
||||
|
@ -13,7 +13,12 @@
|
||||
#include <linux/sizes.h>
|
||||
|
||||
/* thread information allocation */
|
||||
#define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER
|
||||
#ifdef CONFIG_KASAN
|
||||
#define KASAN_STACK_ORDER 1
|
||||
#else
|
||||
#define KASAN_STACK_ORDER 0
|
||||
#endif
|
||||
#define THREAD_SIZE_ORDER (CONFIG_THREAD_SIZE_ORDER + KASAN_STACK_ORDER)
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
|
||||
/*
|
||||
|
@ -16,8 +16,10 @@
|
||||
#include <asm/pci_io.h>
|
||||
|
||||
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
|
||||
#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr
|
||||
void *xlate_dev_mem_ptr(phys_addr_t phys);
|
||||
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||
#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
|
||||
|
||||
#define IO_SPACE_LIMIT 0
|
||||
|
@ -2,6 +2,8 @@
|
||||
#ifndef _ASM_X86_FTRACE_H
|
||||
#define _ASM_X86_FTRACE_H
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifndef CC_USING_FENTRY
|
||||
# error Compiler does not support fentry?
|
||||
|
@ -26,7 +26,7 @@ void __noreturn machine_real_restart(unsigned int type);
|
||||
#define MRR_APM 1
|
||||
|
||||
typedef void (cpu_emergency_virt_cb)(void);
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD)
|
||||
#if IS_ENABLED(CONFIG_KVM_X86)
|
||||
void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback);
|
||||
void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback);
|
||||
void cpu_emergency_disable_virtualization(void);
|
||||
@ -34,7 +34,7 @@ void cpu_emergency_disable_virtualization(void);
|
||||
static inline void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) {}
|
||||
static inline void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) {}
|
||||
static inline void cpu_emergency_disable_virtualization(void) {}
|
||||
#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */
|
||||
#endif /* CONFIG_KVM_X86 */
|
||||
|
||||
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
|
||||
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
|
||||
|
@ -530,7 +530,7 @@ static inline void kb_wait(void)
|
||||
|
||||
static inline void nmi_shootdown_cpus_on_restart(void);
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD)
|
||||
#if IS_ENABLED(CONFIG_KVM_X86)
|
||||
/* RCU-protected callback to disable virtualization prior to reboot. */
|
||||
static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback;
|
||||
|
||||
@ -600,7 +600,7 @@ static void emergency_reboot_disable_virtualization(void)
|
||||
}
|
||||
#else
|
||||
static void emergency_reboot_disable_virtualization(void) { }
|
||||
#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */
|
||||
#endif /* CONFIG_KVM_X86 */
|
||||
|
||||
void __attribute__((weak)) mach_reboot_fixups(void)
|
||||
{
|
||||
|
@ -17,8 +17,8 @@ menuconfig VIRTUALIZATION
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
config KVM_X86
|
||||
def_tristate KVM if KVM_INTEL || KVM_AMD
|
||||
depends on X86_LOCAL_APIC
|
||||
select KVM_COMMON
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
@ -44,7 +44,11 @@ config KVM
|
||||
select HAVE_KVM_PM_NOTIFIER if PM
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select KVM_GENERIC_PRE_FAULT_MEMORY
|
||||
select KVM_GENERIC_PRIVATE_MEM if KVM_SW_PROTECTED_VM
|
||||
select KVM_WERROR if WERROR
|
||||
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
help
|
||||
Support hosting fully virtualized guest machines using hardware
|
||||
virtualization extensions. You will need a fairly recent
|
||||
@ -77,7 +81,6 @@ config KVM_SW_PROTECTED_VM
|
||||
bool "Enable support for KVM software-protected VMs"
|
||||
depends on EXPERT
|
||||
depends on KVM && X86_64
|
||||
select KVM_GENERIC_PRIVATE_MEM
|
||||
help
|
||||
Enable support for KVM software-protected VMs. Currently, software-
|
||||
protected VMs are purely a development and testing vehicle for
|
||||
|
@ -32,7 +32,7 @@ kvm-intel-y += vmx/vmx_onhyperv.o vmx/hyperv_evmcs.o
|
||||
kvm-amd-y += svm/svm_onhyperv.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-$(CONFIG_KVM_X86) += kvm.o
|
||||
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
|
||||
obj-$(CONFIG_KVM_AMD) += kvm-amd.o
|
||||
|
||||
|
@ -1884,10 +1884,14 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp)
|
||||
if (is_obsolete_sp((_kvm), (_sp))) { \
|
||||
} else
|
||||
|
||||
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
|
||||
#define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
|
||||
for_each_valid_sp(_kvm, _sp, \
|
||||
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
|
||||
if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
|
||||
if ((_sp)->gfn != (_gfn)) {} else
|
||||
|
||||
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
|
||||
for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
|
||||
if (!sp_has_gptes(_sp)) {} else
|
||||
|
||||
static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
@ -7047,14 +7051,42 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
||||
kvm_mmu_zap_all(kvm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Zapping leaf SPTEs with memslot range when a memslot is moved/deleted.
|
||||
*
|
||||
* Zapping non-leaf SPTEs, a.k.a. not-last SPTEs, isn't required, worst
|
||||
* case scenario we'll have unused shadow pages lying around until they
|
||||
* are recycled due to age or when the VM is destroyed.
|
||||
*/
|
||||
static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||||
static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
bool flush)
|
||||
{
|
||||
LIST_HEAD(invalid_list);
|
||||
unsigned long i;
|
||||
|
||||
if (list_empty(&kvm->arch.active_mmu_pages))
|
||||
goto out_flush;
|
||||
|
||||
/*
|
||||
* Since accounting information is stored in struct kvm_arch_memory_slot,
|
||||
* shadow pages deletion (e.g. unaccount_shadowed()) requires that all
|
||||
* gfns with a shadow page have a corresponding memslot. Do so before
|
||||
* the memslot goes away.
|
||||
*/
|
||||
for (i = 0; i < slot->npages; i++) {
|
||||
struct kvm_mmu_page *sp;
|
||||
gfn_t gfn = slot->base_gfn + i;
|
||||
|
||||
for_each_gfn_valid_sp(kvm, sp, gfn)
|
||||
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
||||
|
||||
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
|
||||
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
||||
flush = false;
|
||||
cond_resched_rwlock_write(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
|
||||
out_flush:
|
||||
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
||||
}
|
||||
|
||||
static void kvm_mmu_zap_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot)
|
||||
{
|
||||
struct kvm_gfn_range range = {
|
||||
.slot = slot,
|
||||
@ -7062,11 +7094,11 @@ static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *s
|
||||
.end = slot->base_gfn + slot->npages,
|
||||
.may_block = true,
|
||||
};
|
||||
bool flush;
|
||||
|
||||
write_lock(&kvm->mmu_lock);
|
||||
if (kvm_unmap_gfn_range(kvm, &range))
|
||||
kvm_flush_remote_tlbs_memslot(kvm, slot);
|
||||
|
||||
flush = kvm_unmap_gfn_range(kvm, &range);
|
||||
kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush);
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
@ -7082,7 +7114,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
if (kvm_memslot_flush_zap_all(kvm))
|
||||
kvm_mmu_zap_all_fast(kvm);
|
||||
else
|
||||
kvm_mmu_zap_memslot_leafs(kvm, slot);
|
||||
kvm_mmu_zap_memslot(kvm, slot);
|
||||
}
|
||||
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
|
||||
|
@ -56,8 +56,7 @@ new_segment:
|
||||
|
||||
/**
|
||||
* blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
|
||||
* @q: request queue
|
||||
* @bio: bio with integrity metadata attached
|
||||
* @rq: request to map
|
||||
* @sglist: target scatterlist
|
||||
*
|
||||
* Description: Map the integrity vectors in request into a
|
||||
|
@ -3166,7 +3166,7 @@ static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
if (!dname)
|
||||
return 0;
|
||||
|
||||
spin_lock_irq(&ioc->lock);
|
||||
spin_lock(&ioc->lock);
|
||||
seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
|
||||
dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
|
||||
ioc->params.qos[QOS_RPPM] / 10000,
|
||||
@ -3179,7 +3179,7 @@ static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
ioc->params.qos[QOS_MIN] % 10000 / 100,
|
||||
ioc->params.qos[QOS_MAX] / 10000,
|
||||
ioc->params.qos[QOS_MAX] % 10000 / 100);
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
spin_unlock(&ioc->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3366,14 +3366,14 @@ static u64 ioc_cost_model_prfill(struct seq_file *sf,
|
||||
if (!dname)
|
||||
return 0;
|
||||
|
||||
spin_lock_irq(&ioc->lock);
|
||||
spin_lock(&ioc->lock);
|
||||
seq_printf(sf, "%s ctrl=%s model=linear "
|
||||
"rbps=%llu rseqiops=%llu rrandiops=%llu "
|
||||
"wbps=%llu wseqiops=%llu wrandiops=%llu\n",
|
||||
dname, ioc->user_cost_model ? "user" : "auto",
|
||||
u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
|
||||
u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
spin_unlock(&ioc->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf)
|
||||
seg = bridge->domain_nr;
|
||||
|
||||
bus = pbus->number;
|
||||
*sbdf = (seg << 24) | (bus << 16) | dport_dev->devfn;
|
||||
*sbdf = (seg << 24) | (bus << 16) | (dport_dev->devfn << 8);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -703,28 +703,35 @@ static LIST_HEAD(acpi_battery_list);
|
||||
static LIST_HEAD(battery_hook_list);
|
||||
static DEFINE_MUTEX(hook_mutex);
|
||||
|
||||
static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock)
|
||||
static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
|
||||
{
|
||||
struct acpi_battery *battery;
|
||||
|
||||
/*
|
||||
* In order to remove a hook, we first need to
|
||||
* de-register all the batteries that are registered.
|
||||
*/
|
||||
if (lock)
|
||||
mutex_lock(&hook_mutex);
|
||||
list_for_each_entry(battery, &acpi_battery_list, list) {
|
||||
if (!hook->remove_battery(battery->bat, hook))
|
||||
power_supply_changed(battery->bat);
|
||||
}
|
||||
list_del(&hook->list);
|
||||
if (lock)
|
||||
mutex_unlock(&hook_mutex);
|
||||
list_del_init(&hook->list);
|
||||
|
||||
pr_info("extension unregistered: %s\n", hook->name);
|
||||
}
|
||||
|
||||
void battery_hook_unregister(struct acpi_battery_hook *hook)
|
||||
{
|
||||
__battery_hook_unregister(hook, 1);
|
||||
mutex_lock(&hook_mutex);
|
||||
/*
|
||||
* Ignore already unregistered battery hooks. This might happen
|
||||
* if a battery hook was previously unloaded due to an error when
|
||||
* adding a new battery.
|
||||
*/
|
||||
if (!list_empty(&hook->list))
|
||||
battery_hook_unregister_unlocked(hook);
|
||||
|
||||
mutex_unlock(&hook_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(battery_hook_unregister);
|
||||
|
||||
@ -733,7 +740,6 @@ void battery_hook_register(struct acpi_battery_hook *hook)
|
||||
struct acpi_battery *battery;
|
||||
|
||||
mutex_lock(&hook_mutex);
|
||||
INIT_LIST_HEAD(&hook->list);
|
||||
list_add(&hook->list, &battery_hook_list);
|
||||
/*
|
||||
* Now that the driver is registered, we need
|
||||
@ -750,7 +756,7 @@ void battery_hook_register(struct acpi_battery_hook *hook)
|
||||
* hooks.
|
||||
*/
|
||||
pr_err("extension failed to load: %s", hook->name);
|
||||
__battery_hook_unregister(hook, 0);
|
||||
battery_hook_unregister_unlocked(hook);
|
||||
goto end;
|
||||
}
|
||||
|
||||
@ -804,7 +810,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
|
||||
*/
|
||||
pr_err("error in extension, unloading: %s",
|
||||
hook_node->name);
|
||||
__battery_hook_unregister(hook_node, 0);
|
||||
battery_hook_unregister_unlocked(hook_node);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&hook_mutex);
|
||||
@ -837,7 +843,7 @@ static void __exit battery_hook_exit(void)
|
||||
* need to remove the hooks.
|
||||
*/
|
||||
list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) {
|
||||
__battery_hook_unregister(hook, 1);
|
||||
battery_hook_unregister(hook);
|
||||
}
|
||||
mutex_destroy(&hook_mutex);
|
||||
}
|
||||
|
@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook X1704VAP */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1402CBA */
|
||||
.matches = {
|
||||
@ -504,26 +511,26 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook Go E1404GAB */
|
||||
/* Asus ExpertBook B2502CVA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "E1404GAB"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2502CVA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook E1504GA */
|
||||
/* Asus Vivobook Go E1404GA* */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "E1404GA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook E1504GA* */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "E1504GA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook E1504GAB */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "E1504GAB"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook Pro N6506MV */
|
||||
.matches = {
|
||||
|
@ -844,6 +844,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
* controller board in their ACPI tables (and may even have one), but
|
||||
* which need native backlight control nevertheless.
|
||||
*/
|
||||
{
|
||||
/* https://github.com/zabbly/linux/issues/26 */
|
||||
.callback = video_detect_force_native,
|
||||
/* Dell OptiPlex 5480 AIO */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 5480 AIO"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=2303936 */
|
||||
.callback = video_detect_force_native,
|
||||
|
@ -361,6 +361,7 @@ ata_rw_frameinit(struct frame *f)
|
||||
}
|
||||
|
||||
ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
|
||||
dev_hold(t->ifp->nd);
|
||||
skb->dev = t->ifp->nd;
|
||||
}
|
||||
|
||||
@ -401,6 +402,8 @@ aoecmd_ata_rw(struct aoedev *d)
|
||||
__skb_queue_head_init(&queue);
|
||||
__skb_queue_tail(&queue, skb);
|
||||
aoenet_xmit(&queue);
|
||||
} else {
|
||||
dev_put(f->t->ifp->nd);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -483,10 +486,13 @@ resend(struct aoedev *d, struct frame *f)
|
||||
memcpy(h->dst, t->addr, sizeof h->dst);
|
||||
memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
|
||||
|
||||
dev_hold(t->ifp->nd);
|
||||
skb->dev = t->ifp->nd;
|
||||
skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
if (skb == NULL) {
|
||||
dev_put(t->ifp->nd);
|
||||
return;
|
||||
}
|
||||
f->sent = ktime_get();
|
||||
__skb_queue_head_init(&queue);
|
||||
__skb_queue_tail(&queue, skb);
|
||||
@ -617,6 +623,8 @@ probe(struct aoetgt *t)
|
||||
__skb_queue_head_init(&queue);
|
||||
__skb_queue_tail(&queue, skb);
|
||||
aoenet_xmit(&queue);
|
||||
} else {
|
||||
dev_put(f->t->ifp->nd);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1395,6 +1403,7 @@ aoecmd_ata_id(struct aoedev *d)
|
||||
ah->cmdstat = ATA_CMD_ID_ATA;
|
||||
ah->lba3 = 0xa0;
|
||||
|
||||
dev_hold(t->ifp->nd);
|
||||
skb->dev = t->ifp->nd;
|
||||
|
||||
d->rttavg = RTTAVG_INIT;
|
||||
@ -1404,6 +1413,8 @@ aoecmd_ata_id(struct aoedev *d)
|
||||
skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (skb)
|
||||
f->sent = ktime_get();
|
||||
else
|
||||
dev_put(t->ifp->nd);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
@ -4038,16 +4038,29 @@ static void btusb_disconnect(struct usb_interface *intf)
|
||||
static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
{
|
||||
struct btusb_data *data = usb_get_intfdata(intf);
|
||||
int err;
|
||||
|
||||
BT_DBG("intf %p", intf);
|
||||
|
||||
/* Don't suspend if there are connections */
|
||||
if (hci_conn_count(data->hdev))
|
||||
/* Don't auto-suspend if there are connections; external suspend calls
|
||||
* shall never fail.
|
||||
*/
|
||||
if (PMSG_IS_AUTO(message) && hci_conn_count(data->hdev))
|
||||
return -EBUSY;
|
||||
|
||||
if (data->suspend_count++)
|
||||
return 0;
|
||||
|
||||
/* Notify Host stack to suspend; this has to be done before stopping
|
||||
* the traffic since the hci_suspend_dev itself may generate some
|
||||
* traffic.
|
||||
*/
|
||||
err = hci_suspend_dev(data->hdev);
|
||||
if (err) {
|
||||
data->suspend_count--;
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_lock_irq(&data->txlock);
|
||||
if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
|
||||
set_bit(BTUSB_SUSPENDING, &data->flags);
|
||||
@ -4055,6 +4068,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
} else {
|
||||
spin_unlock_irq(&data->txlock);
|
||||
data->suspend_count--;
|
||||
hci_resume_dev(data->hdev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -4175,6 +4189,8 @@ static int btusb_resume(struct usb_interface *intf)
|
||||
spin_unlock_irq(&data->txlock);
|
||||
schedule_work(&data->work);
|
||||
|
||||
hci_resume_dev(data->hdev);
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
|
@ -2006,25 +2006,27 @@ static int virtcons_probe(struct virtio_device *vdev)
|
||||
multiport = true;
|
||||
}
|
||||
|
||||
err = init_vqs(portdev);
|
||||
if (err < 0) {
|
||||
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
|
||||
goto free_chrdev;
|
||||
}
|
||||
|
||||
spin_lock_init(&portdev->ports_lock);
|
||||
INIT_LIST_HEAD(&portdev->ports);
|
||||
INIT_LIST_HEAD(&portdev->list);
|
||||
|
||||
virtio_device_ready(portdev->vdev);
|
||||
|
||||
INIT_WORK(&portdev->config_work, &config_work_handler);
|
||||
INIT_WORK(&portdev->control_work, &control_work_handler);
|
||||
|
||||
if (multiport) {
|
||||
spin_lock_init(&portdev->c_ivq_lock);
|
||||
spin_lock_init(&portdev->c_ovq_lock);
|
||||
}
|
||||
|
||||
err = init_vqs(portdev);
|
||||
if (err < 0) {
|
||||
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
|
||||
goto free_chrdev;
|
||||
}
|
||||
|
||||
virtio_device_ready(portdev->vdev);
|
||||
|
||||
if (multiport) {
|
||||
err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
|
||||
if (err < 0) {
|
||||
dev_err(&vdev->dev,
|
||||
|
@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(hwp_notify_lock);
|
||||
static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
|
||||
static cpumask_t hwp_intr_enable_mask;
|
||||
|
||||
#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
|
||||
@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void)
|
||||
if (!(value & status_mask))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&hwp_notify_lock, flags);
|
||||
raw_spin_lock_irqsave(&hwp_notify_lock, flags);
|
||||
|
||||
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
|
||||
goto ack_intr;
|
||||
@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void)
|
||||
schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
|
||||
msecs_to_jiffies(10));
|
||||
|
||||
spin_unlock_irqrestore(&hwp_notify_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
|
||||
|
||||
return;
|
||||
|
||||
ack_intr:
|
||||
wrmsrl_safe(MSR_HWP_STATUS, 0);
|
||||
spin_unlock_irqrestore(&hwp_notify_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
|
||||
}
|
||||
|
||||
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
|
||||
@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
|
||||
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
|
||||
|
||||
spin_lock_irq(&hwp_notify_lock);
|
||||
raw_spin_lock_irq(&hwp_notify_lock);
|
||||
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
|
||||
spin_unlock_irq(&hwp_notify_lock);
|
||||
raw_spin_unlock_irq(&hwp_notify_lock);
|
||||
|
||||
if (cancel_work)
|
||||
cancel_delayed_work_sync(&cpudata->hwp_notify_work);
|
||||
@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
|
||||
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
|
||||
u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
|
||||
|
||||
spin_lock_irq(&hwp_notify_lock);
|
||||
raw_spin_lock_irq(&hwp_notify_lock);
|
||||
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
|
||||
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
|
||||
spin_unlock_irq(&hwp_notify_lock);
|
||||
raw_spin_unlock_irq(&hwp_notify_lock);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
|
||||
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
|
||||
|
@ -86,7 +86,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
|
||||
nr_pages = 1;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma,
|
||||
ALIGN(vmf->address, fault_size));
|
||||
ALIGN_DOWN(vmf->address, fault_size));
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
|
||||
|
@ -67,9 +67,11 @@ static bool sysfb_unregister(void)
|
||||
void sysfb_disable(struct device *dev)
|
||||
{
|
||||
struct screen_info *si = &screen_info;
|
||||
struct device *parent;
|
||||
|
||||
mutex_lock(&disable_lock);
|
||||
if (!dev || dev == sysfb_parent_dev(si)) {
|
||||
parent = sysfb_parent_dev(si);
|
||||
if (!dev || !parent || dev == parent) {
|
||||
sysfb_unregister();
|
||||
disabled = true;
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
|
||||
* serve as EDMA event triggers.
|
||||
*/
|
||||
|
||||
static void gpio_irq_disable(struct irq_data *d)
|
||||
static void gpio_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct davinci_gpio_regs __iomem *g = irq2regs(d);
|
||||
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
|
||||
@ -262,7 +262,7 @@ static void gpio_irq_disable(struct irq_data *d)
|
||||
writel_relaxed(mask, &g->clr_rising);
|
||||
}
|
||||
|
||||
static void gpio_irq_enable(struct irq_data *d)
|
||||
static void gpio_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct davinci_gpio_regs __iomem *g = irq2regs(d);
|
||||
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
|
||||
@ -288,8 +288,8 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger)
|
||||
|
||||
static struct irq_chip gpio_irqchip = {
|
||||
.name = "GPIO",
|
||||
.irq_enable = gpio_irq_enable,
|
||||
.irq_disable = gpio_irq_disable,
|
||||
.irq_unmask = gpio_irq_unmask,
|
||||
.irq_mask = gpio_irq_mask,
|
||||
.irq_set_type = gpio_irq_type,
|
||||
.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
@ -472,7 +472,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
|
||||
if (chips->gpio_unbanked) {
|
||||
if (!chips->gpio_unbanked) {
|
||||
irq = devm_irq_alloc_descs(dev, -1, 0, ngpio, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "Couldn't allocate IRQ numbers\n");
|
||||
|
@ -114,12 +114,12 @@ const char *gpiod_get_label(struct gpio_desc *desc)
|
||||
srcu_read_lock_held(&desc->gdev->desc_srcu));
|
||||
|
||||
if (test_bit(FLAG_USED_AS_IRQ, &flags))
|
||||
return label->str ?: "interrupt";
|
||||
return label ? label->str : "interrupt";
|
||||
|
||||
if (!test_bit(FLAG_REQUESTED, &flags))
|
||||
return NULL;
|
||||
|
||||
return label->str;
|
||||
return label ? label->str : NULL;
|
||||
}
|
||||
|
||||
static void desc_free_label(struct rcu_head *rh)
|
||||
|
@ -770,6 +770,12 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
|
||||
if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
|
||||
DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
|
||||
return;
|
||||
}
|
||||
|
||||
link_index = notify->link_index;
|
||||
link = adev->dm.dc->links[link_index];
|
||||
dev = adev->dm.ddev;
|
||||
@ -2026,7 +2032,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
|
||||
}
|
||||
|
||||
if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE)
|
||||
if (adev->dm.dc->caps.ips_support &&
|
||||
adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL)
|
||||
adev->dm.idle_workqueue = idle_create_workqueue(adev);
|
||||
|
||||
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
|
||||
@ -6735,12 +6742,21 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
||||
stream->signal == SIGNAL_TYPE_EDP) {
|
||||
const struct dc_edid_caps *edid_caps;
|
||||
unsigned int disable_colorimetry = 0;
|
||||
|
||||
if (aconnector->dc_sink) {
|
||||
edid_caps = &aconnector->dc_sink->edid_caps;
|
||||
disable_colorimetry = edid_caps->panel_patch.disable_colorimetry;
|
||||
}
|
||||
|
||||
//
|
||||
// should decide stream support vsc sdp colorimetry capability
|
||||
// before building vsc info packet
|
||||
//
|
||||
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
|
||||
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
|
||||
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
|
||||
!disable_colorimetry;
|
||||
|
||||
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
|
||||
tf = TRANSFER_FUNC_GAMMA_22;
|
||||
|
@ -73,6 +73,10 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
|
||||
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
|
||||
edid_caps->panel_patch.remove_sink_ext_caps = true;
|
||||
break;
|
||||
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
|
||||
DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
|
||||
edid_caps->panel_patch.disable_colorimetry = true;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
@ -1027,6 +1027,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
int var_pbn;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled
|
||||
@ -1057,13 +1058,18 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
break;
|
||||
|
||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
|
||||
var_pbn = vars[next_index].pbn;
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
|
||||
__func__, __LINE__, next_index, ret);
|
||||
vars[next_index].pbn = var_pbn;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
if (ret == 0) {
|
||||
@ -1071,14 +1077,17 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
vars[next_index].dsc_enabled = false;
|
||||
vars[next_index].bpp_x16 = 0;
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index);
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
|
||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index);
|
||||
vars[next_index].pbn = var_pbn;
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
|
||||
__func__, __LINE__, next_index, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
tried[next_index] = true;
|
||||
|
@ -178,6 +178,7 @@ struct dc_panel_patch {
|
||||
unsigned int skip_avmute;
|
||||
unsigned int mst_start_top_delay;
|
||||
unsigned int remove_sink_ext_caps;
|
||||
unsigned int disable_colorimetry;
|
||||
};
|
||||
|
||||
struct dc_edid_caps {
|
||||
|
@ -303,7 +303,6 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m
|
||||
if (project == dml_project_dcn35 ||
|
||||
project == dml_project_dcn351) {
|
||||
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
|
||||
policy->EnhancedPrefetchScheduleAccelerationFinal = 0;
|
||||
policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/
|
||||
policy->UseOnlyMaxPrefetchModes = 1;
|
||||
}
|
||||
|
@ -766,6 +766,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_dmub_reallow_idle = false,
|
||||
.static_screen_wait_frames = 2,
|
||||
.notify_dpia_hr_bw = true,
|
||||
.min_disp_clk_khz = 50000,
|
||||
};
|
||||
|
||||
static const struct dc_panel_config panel_config_defaults = {
|
||||
|
@ -6083,6 +6083,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||
struct drm_dp_aux *immediate_upstream_aux;
|
||||
struct drm_dp_mst_port *fec_port;
|
||||
struct drm_dp_desc desc = {};
|
||||
u8 upstream_dsc;
|
||||
u8 endpoint_fec;
|
||||
u8 endpoint_dsc;
|
||||
|
||||
@ -6109,8 +6110,6 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||
|
||||
/* DP-to-DP peer device */
|
||||
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
|
||||
u8 upstream_dsc;
|
||||
|
||||
if (drm_dp_dpcd_read(&port->aux,
|
||||
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
|
||||
return NULL;
|
||||
@ -6156,6 +6155,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
|
||||
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
|
||||
|
||||
if (drm_dp_dpcd_read(immediate_upstream_aux,
|
||||
DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
|
||||
return NULL;
|
||||
|
||||
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
|
||||
return NULL;
|
||||
|
||||
if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
|
||||
return NULL;
|
||||
|
||||
|
@ -521,8 +521,6 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
|
||||
|
||||
#define HDMI_MAX_INFOFRAME_SIZE 29
|
||||
|
||||
static int clear_device_infoframe(struct drm_connector *connector,
|
||||
enum hdmi_infoframe_type type)
|
||||
{
|
||||
@ -563,7 +561,7 @@ static int write_device_infoframe(struct drm_connector *connector,
|
||||
{
|
||||
const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs;
|
||||
struct drm_device *dev = connector->dev;
|
||||
u8 buffer[HDMI_MAX_INFOFRAME_SIZE];
|
||||
u8 buffer[HDMI_INFOFRAME_SIZE(MAX)];
|
||||
int ret;
|
||||
int len;
|
||||
|
||||
|
@ -543,7 +543,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
|
||||
&state->fb_damage_clips,
|
||||
val,
|
||||
-1,
|
||||
sizeof(struct drm_rect),
|
||||
sizeof(struct drm_mode_rect),
|
||||
&replaced);
|
||||
return ret;
|
||||
} else if (property == plane->scaling_filter_property) {
|
||||
|
@ -520,8 +520,6 @@ static const struct file_operations drm_connector_fops = {
|
||||
.write = connector_write
|
||||
};
|
||||
|
||||
#define HDMI_MAX_INFOFRAME_SIZE 29
|
||||
|
||||
static ssize_t
|
||||
audio_infoframe_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos)
|
||||
{
|
||||
@ -579,7 +577,7 @@ static ssize_t _f##_read_infoframe(struct file *filp, \
|
||||
struct drm_connector *connector; \
|
||||
union hdmi_infoframe *frame; \
|
||||
struct drm_device *dev; \
|
||||
u8 buf[HDMI_MAX_INFOFRAME_SIZE]; \
|
||||
u8 buf[HDMI_INFOFRAME_SIZE(MAX)]; \
|
||||
ssize_t len = 0; \
|
||||
\
|
||||
connector = filp->private_data; \
|
||||
|
@ -1131,7 +1131,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
||||
GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
|
||||
}
|
||||
|
||||
if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
|
||||
if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
|
||||
intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
|
||||
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
|
||||
|
||||
|
@ -1383,6 +1383,7 @@ static const struct file_operations panthor_drm_driver_fops = {
|
||||
.read = drm_read,
|
||||
.llseek = noop_llseek,
|
||||
.mmap = panthor_mmap,
|
||||
.fop_flags = FOP_UNSIGNED_OFFSET,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -1251,9 +1251,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
|
||||
goto err_cleanup;
|
||||
}
|
||||
|
||||
/* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
|
||||
* pre-allocated BO if the <BO,VM> association exists. Given we
|
||||
* only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
|
||||
* be called immediately, and we have to hold the VM resv lock when
|
||||
* calling this function.
|
||||
*/
|
||||
dma_resv_lock(panthor_vm_resv(vm), NULL);
|
||||
mutex_lock(&bo->gpuva_list_lock);
|
||||
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
|
||||
mutex_unlock(&bo->gpuva_list_lock);
|
||||
dma_resv_unlock(panthor_vm_resv(vm));
|
||||
|
||||
/* If the a vm_bo for this <VM,BO> combination exists, it already
|
||||
* retains a pin ref, and we can release the one we took earlier.
|
||||
|
@ -1103,7 +1103,13 @@ cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs
|
||||
list_move_tail(&group->wait_node,
|
||||
&group->ptdev->scheduler->groups.waiting);
|
||||
}
|
||||
group->blocked_queues |= BIT(cs_id);
|
||||
|
||||
/* The queue is only blocked if there's no deferred operation
|
||||
* pending, which can be checked through the scoreboard status.
|
||||
*/
|
||||
if (!cs_iface->output->status_scoreboards)
|
||||
group->blocked_queues |= BIT(cs_id);
|
||||
|
||||
queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
|
||||
queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
|
||||
status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
|
||||
@ -2046,6 +2052,7 @@ static void
|
||||
tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||
struct panthor_sched_tick_ctx *ctx)
|
||||
{
|
||||
struct panthor_device *ptdev = sched->ptdev;
|
||||
struct panthor_group *group, *tmp;
|
||||
u32 i;
|
||||
|
||||
@ -2054,7 +2061,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||
/* If everything went fine, we should only have groups
|
||||
* to be terminated in the old_groups lists.
|
||||
*/
|
||||
drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
|
||||
drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
|
||||
group_can_run(group));
|
||||
|
||||
if (!group_can_run(group)) {
|
||||
@ -2077,7 +2084,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||
/* If everything went fine, the groups to schedule lists should
|
||||
* be empty.
|
||||
*/
|
||||
drm_WARN_ON(&group->ptdev->base,
|
||||
drm_WARN_ON(&ptdev->base,
|
||||
!ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
|
||||
|
||||
list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
|
||||
@ -3242,6 +3249,18 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
|
||||
u32 group_handle)
|
||||
{
|
||||
struct panthor_group *group;
|
||||
|
||||
xa_lock(&pool->xa);
|
||||
group = group_get(xa_load(&pool->xa, group_handle));
|
||||
xa_unlock(&pool->xa);
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
int panthor_group_get_state(struct panthor_file *pfile,
|
||||
struct drm_panthor_group_get_state *get_state)
|
||||
{
|
||||
@ -3253,7 +3272,7 @@ int panthor_group_get_state(struct panthor_file *pfile,
|
||||
if (get_state->pad)
|
||||
return -EINVAL;
|
||||
|
||||
group = group_get(xa_load(&gpool->xa, get_state->group_handle));
|
||||
group = group_from_handle(gpool, get_state->group_handle);
|
||||
if (!group)
|
||||
return -EINVAL;
|
||||
|
||||
@ -3384,7 +3403,7 @@ panthor_job_create(struct panthor_file *pfile,
|
||||
job->call_info.latest_flush = qsubmit->latest_flush;
|
||||
INIT_LIST_HEAD(&job->node);
|
||||
|
||||
job->group = group_get(xa_load(&gpool->xa, group_handle));
|
||||
job->group = group_from_handle(gpool, group_handle);
|
||||
if (!job->group) {
|
||||
ret = -EINVAL;
|
||||
goto err_put_job;
|
||||
@ -3424,13 +3443,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched
|
||||
{
|
||||
struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
|
||||
|
||||
/* Still not sure why we want USAGE_WRITE for external objects, since I
|
||||
* was assuming this would be handled through explicit syncs being imported
|
||||
* to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
|
||||
* seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
|
||||
*/
|
||||
panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
|
||||
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
|
||||
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
|
||||
}
|
||||
|
||||
void panthor_sched_unplug(struct panthor_device *ptdev)
|
||||
|
@ -133,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
|
||||
{
|
||||
WARN_ON(!num_sched_list || !sched_list);
|
||||
|
||||
spin_lock(&entity->rq_lock);
|
||||
entity->sched_list = sched_list;
|
||||
entity->num_sched_list = num_sched_list;
|
||||
spin_unlock(&entity->rq_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
|
||||
|
||||
@ -380,7 +382,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
|
||||
container_of(cb, struct drm_sched_entity, cb);
|
||||
|
||||
drm_sched_entity_clear_dep(f, cb);
|
||||
drm_sched_wakeup(entity->rq->sched, entity);
|
||||
drm_sched_wakeup(entity->rq->sched);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -597,6 +599,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
||||
|
||||
/* first job wakes up scheduler */
|
||||
if (first) {
|
||||
struct drm_gpu_scheduler *sched;
|
||||
struct drm_sched_rq *rq;
|
||||
|
||||
/* Add the entity to the run queue */
|
||||
spin_lock(&entity->rq_lock);
|
||||
if (entity->stopped) {
|
||||
@ -606,13 +611,16 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
||||
return;
|
||||
}
|
||||
|
||||
drm_sched_rq_add_entity(entity->rq, entity);
|
||||
rq = entity->rq;
|
||||
sched = rq->sched;
|
||||
|
||||
drm_sched_rq_add_entity(rq, entity);
|
||||
spin_unlock(&entity->rq_lock);
|
||||
|
||||
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
|
||||
drm_sched_rq_update_fifo(entity, submit_ts);
|
||||
|
||||
drm_sched_wakeup(entity->rq->sched, entity);
|
||||
drm_sched_wakeup(sched);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_push_job);
|
||||
|
@ -1013,15 +1013,12 @@ EXPORT_SYMBOL(drm_sched_job_cleanup);
|
||||
/**
|
||||
* drm_sched_wakeup - Wake up the scheduler if it is ready to queue
|
||||
* @sched: scheduler instance
|
||||
* @entity: the scheduler entity
|
||||
*
|
||||
* Wake up the scheduler if we can queue jobs.
|
||||
*/
|
||||
void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_entity *entity)
|
||||
void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
if (drm_sched_can_queue(sched, entity))
|
||||
drm_sched_run_job_queue(sched);
|
||||
drm_sched_run_job_queue(sched);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -139,7 +139,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
|
||||
flags |= VBOX_MOUSE_POINTER_VISIBLE;
|
||||
}
|
||||
|
||||
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
|
||||
/*
|
||||
* The 4 extra bytes come from switching struct vbva_mouse_pointer_shape
|
||||
* from having a 4 bytes fixed array at the end to using a proper VLA
|
||||
* at the end. These 4 extra bytes were not subtracted from sizeof(*p)
|
||||
* before the switch to the VLA, so this way the behavior is unchanged.
|
||||
* Chances are these 4 extra bytes are not necessary but they are kept
|
||||
* to avoid regressions.
|
||||
*/
|
||||
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA,
|
||||
VBVA_MOUSE_POINTER_SHAPE);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape {
|
||||
* Bytes in the gap between the AND and the XOR mask are undefined.
|
||||
* XOR mask scanlines have no gap between them and size of XOR mask is:
|
||||
* xor_len = width * 4 * height.
|
||||
*
|
||||
* Preallocate 4 bytes for accessing actual data as p->data.
|
||||
*/
|
||||
u8 data[4];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
/* pointer is visible */
|
||||
|
@ -169,6 +169,8 @@
|
||||
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
|
||||
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
|
||||
|
||||
#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8)
|
||||
|
||||
#define VF_PREEMPTION XE_REG(0x83a4, XE_REG_OPTION_MASKED)
|
||||
#define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0)
|
||||
|
||||
@ -378,6 +380,9 @@
|
||||
#define L3SQCREG3 XE_REG_MCR(0xb108)
|
||||
#define COMPPWOVERFETCHEN REG_BIT(28)
|
||||
|
||||
#define SCRATCH3_LBCF XE_REG_MCR(0xb154)
|
||||
#define RWFLUSHALLEN REG_BIT(17)
|
||||
|
||||
#define XEHP_L3SQCREG5 XE_REG_MCR(0xb158)
|
||||
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
|
||||
|
||||
@ -391,6 +396,12 @@
|
||||
#define SCRATCH1LPFC XE_REG(0xb474)
|
||||
#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0)
|
||||
|
||||
#define XE2LPM_L3SQCREG2 XE_REG_MCR(0xb604)
|
||||
|
||||
#define XE2LPM_L3SQCREG3 XE_REG_MCR(0xb608)
|
||||
|
||||
#define XE2LPM_SCRATCH3_LBCF XE_REG_MCR(0xb654)
|
||||
|
||||
#define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658)
|
||||
|
||||
#define XE2_TDF_CTRL XE_REG(0xb418)
|
||||
|
@ -680,8 +680,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
||||
tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
|
||||
(ttm->page_flags & TTM_TT_FLAG_SWAPPED));
|
||||
|
||||
move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared) :
|
||||
(!mem_type_is_vram(old_mem_type) && !tt_has_data);
|
||||
move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
|
||||
(!mem_type_is_vram(old_mem_type) && !tt_has_data));
|
||||
|
||||
needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
|
||||
(!ttm && ttm_bo->type == ttm_bo_type_device);
|
||||
|
@ -171,10 +171,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
|
||||
xe_exec_queue_kill(q);
|
||||
xe_exec_queue_put(q);
|
||||
}
|
||||
mutex_lock(&xef->vm.lock);
|
||||
xa_for_each(&xef->vm.xa, idx, vm)
|
||||
xe_vm_close_and_put(vm);
|
||||
mutex_unlock(&xef->vm.lock);
|
||||
|
||||
xe_file_put(xef);
|
||||
|
||||
@ -298,6 +296,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
|
||||
if (xe->unordered_wq)
|
||||
destroy_workqueue(xe->unordered_wq);
|
||||
|
||||
if (xe->destroy_wq)
|
||||
destroy_workqueue(xe->destroy_wq);
|
||||
|
||||
ttm_device_fini(&xe->ttm);
|
||||
}
|
||||
|
||||
@ -336,9 +337,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
|
||||
|
||||
init_waitqueue_head(&xe->ufence_wq);
|
||||
|
||||
err = drmm_mutex_init(&xe->drm, &xe->usm.lock);
|
||||
if (err)
|
||||
goto err;
|
||||
init_rwsem(&xe->usm.lock);
|
||||
|
||||
xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
|
||||
|
||||
@ -363,8 +362,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
|
||||
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
|
||||
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
|
||||
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
|
||||
xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0);
|
||||
if (!xe->ordered_wq || !xe->unordered_wq ||
|
||||
!xe->preempt_fence_wq) {
|
||||
!xe->preempt_fence_wq || !xe->destroy_wq) {
|
||||
/*
|
||||
* Cleanup done in xe_device_destroy via
|
||||
* drmm_add_action_or_reset register above
|
||||
|
@ -369,7 +369,7 @@ struct xe_device {
|
||||
/** @usm.next_asid: next ASID, used to cyclical alloc asids */
|
||||
u32 next_asid;
|
||||
/** @usm.lock: protects UM state */
|
||||
struct mutex lock;
|
||||
struct rw_semaphore lock;
|
||||
} usm;
|
||||
|
||||
/** @pinned: pinned BO state */
|
||||
@ -396,6 +396,9 @@ struct xe_device {
|
||||
/** @unordered_wq: used to serialize unordered work, mostly display */
|
||||
struct workqueue_struct *unordered_wq;
|
||||
|
||||
/** @destroy_wq: used to serialize user destroy work, like queue */
|
||||
struct workqueue_struct *destroy_wq;
|
||||
|
||||
/** @tiles: device tiles */
|
||||
struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
|
||||
|
||||
@ -567,15 +570,23 @@ struct xe_file {
|
||||
struct {
|
||||
/** @vm.xe: xarray to store VMs */
|
||||
struct xarray xa;
|
||||
/** @vm.lock: protects file VM state */
|
||||
/**
|
||||
* @vm.lock: Protects VM lookup + reference and removal a from
|
||||
* file xarray. Not an intended to be an outer lock which does
|
||||
* thing while being held.
|
||||
*/
|
||||
struct mutex lock;
|
||||
} vm;
|
||||
|
||||
/** @exec_queue: Submission exec queue state for file */
|
||||
struct {
|
||||
/** @exec_queue.xe: xarray to store engines */
|
||||
/** @exec_queue.xa: xarray to store exece queues */
|
||||
struct xarray xa;
|
||||
/** @exec_queue.lock: protects file engine state */
|
||||
/**
|
||||
* @exec_queue.lock: Protects exec queue lookup + reference and
|
||||
* removal a frommfile xarray. Not an intended to be an outer
|
||||
* lock which does thing while being held.
|
||||
*/
|
||||
struct mutex lock;
|
||||
} exec_queue;
|
||||
|
||||
|
@ -283,8 +283,15 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
|
||||
|
||||
/* Accumulate all the exec queues from this client */
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
xa_for_each(&xef->exec_queue.xa, i, q)
|
||||
xa_for_each(&xef->exec_queue.xa, i, q) {
|
||||
xe_exec_queue_get(q);
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
|
||||
xe_exec_queue_update_run_ticks(q);
|
||||
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
xe_exec_queue_put(q);
|
||||
}
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
|
||||
/* Get the total GPU cycles */
|
||||
|
@ -635,14 +635,14 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
q->xef = xe_file_get(xef);
|
||||
|
||||
/* user id alloc must always be last in ioctl to prevent UAF */
|
||||
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
if (err)
|
||||
goto kill_exec_queue;
|
||||
|
||||
args->exec_queue_id = id;
|
||||
q->xef = xe_file_get(xef);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -90,6 +90,11 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
|
||||
cancel_work_sync(&sched->work_process_msg);
|
||||
}
|
||||
|
||||
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
|
||||
{
|
||||
drm_sched_resume_timeout(&sched->base, sched->base.timeout);
|
||||
}
|
||||
|
||||
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
|
||||
struct xe_sched_msg *msg)
|
||||
{
|
||||
|
@ -22,6 +22,8 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched);
|
||||
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
|
||||
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
|
||||
|
||||
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
|
||||
|
||||
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
|
||||
struct xe_sched_msg *msg);
|
||||
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
|
||||
|
@ -237,11 +237,11 @@ int xe_gt_freq_init(struct xe_gt *gt)
|
||||
if (!gt->freq)
|
||||
return -ENOMEM;
|
||||
|
||||
err = devm_add_action(xe->drm.dev, freq_fini, gt->freq);
|
||||
err = sysfs_create_files(gt->freq, freq_attrs);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = sysfs_create_files(gt->freq, freq_attrs);
|
||||
err = devm_add_action_or_reset(xe->drm.dev, freq_fini, gt->freq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -439,7 +439,7 @@ void xe_gt_mcr_init(struct xe_gt *gt)
|
||||
if (gt->info.type == XE_GT_TYPE_MEDIA) {
|
||||
drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13);
|
||||
|
||||
if (MEDIA_VER(xe) >= 20) {
|
||||
if (MEDIA_VERx100(xe) >= 1301) {
|
||||
gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
|
||||
gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table;
|
||||
} else {
|
||||
|
@ -185,6 +185,21 @@ unlock_dma_resv:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
|
||||
{
|
||||
struct xe_vm *vm;
|
||||
|
||||
down_read(&xe->usm.lock);
|
||||
vm = xa_load(&xe->usm.asid_to_vm, asid);
|
||||
if (vm && xe_vm_in_fault_mode(vm))
|
||||
xe_vm_get(vm);
|
||||
else
|
||||
vm = ERR_PTR(-EINVAL);
|
||||
up_read(&xe->usm.lock);
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
|
||||
{
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
@ -197,16 +212,9 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
|
||||
if (pf->trva_fault)
|
||||
return -EFAULT;
|
||||
|
||||
/* ASID to VM */
|
||||
mutex_lock(&xe->usm.lock);
|
||||
vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
|
||||
if (vm && xe_vm_in_fault_mode(vm))
|
||||
xe_vm_get(vm);
|
||||
else
|
||||
vm = NULL;
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
if (!vm)
|
||||
return -EINVAL;
|
||||
vm = asid_to_vm(xe, pf->asid);
|
||||
if (IS_ERR(vm))
|
||||
return PTR_ERR(vm);
|
||||
|
||||
/*
|
||||
* TODO: Change to read lock? Using write lock for simplicity.
|
||||
@ -548,14 +556,9 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
|
||||
if (acc->access_type != ACC_TRIGGER)
|
||||
return -EINVAL;
|
||||
|
||||
/* ASID to VM */
|
||||
mutex_lock(&xe->usm.lock);
|
||||
vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
|
||||
if (vm)
|
||||
xe_vm_get(vm);
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
if (!vm || !xe_vm_in_fault_mode(vm))
|
||||
return -EINVAL;
|
||||
vm = asid_to_vm(xe, acc->asid);
|
||||
if (IS_ERR(vm))
|
||||
return PTR_ERR(vm);
|
||||
|
||||
down_read(&vm->lock);
|
||||
|
||||
|
@ -51,5 +51,5 @@ int xe_gt_sysfs_init(struct xe_gt *gt)
|
||||
|
||||
gt->sysfs = &kg->base;
|
||||
|
||||
return devm_add_action(xe->drm.dev, gt_sysfs_fini, gt);
|
||||
return devm_add_action_or_reset(xe->drm.dev, gt_sysfs_fini, gt);
|
||||
}
|
||||
|
@ -276,10 +276,26 @@ static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void xe_guc_submit_fini(struct xe_guc *guc)
|
||||
{
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
struct xe_gt *gt = guc_to_gt(guc);
|
||||
int ret;
|
||||
|
||||
ret = wait_event_timeout(guc->submission_state.fini_wq,
|
||||
xa_empty(&guc->submission_state.exec_queue_lookup),
|
||||
HZ * 5);
|
||||
|
||||
drain_workqueue(xe->destroy_wq);
|
||||
|
||||
xe_gt_assert(gt, ret);
|
||||
}
|
||||
|
||||
static void guc_submit_fini(struct drm_device *drm, void *arg)
|
||||
{
|
||||
struct xe_guc *guc = arg;
|
||||
|
||||
xe_guc_submit_fini(guc);
|
||||
xa_destroy(&guc->submission_state.exec_queue_lookup);
|
||||
free_submit_wq(guc);
|
||||
}
|
||||
@ -290,9 +306,15 @@ static void guc_submit_wedged_fini(void *arg)
|
||||
struct xe_exec_queue *q;
|
||||
unsigned long index;
|
||||
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
|
||||
if (exec_queue_wedged(q))
|
||||
mutex_lock(&guc->submission_state.lock);
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
|
||||
if (exec_queue_wedged(q)) {
|
||||
mutex_unlock(&guc->submission_state.lock);
|
||||
xe_exec_queue_put(q);
|
||||
mutex_lock(&guc->submission_state.lock);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&guc->submission_state.lock);
|
||||
}
|
||||
|
||||
static const struct xe_exec_queue_ops guc_exec_queue_ops;
|
||||
@ -345,6 +367,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
|
||||
|
||||
xa_init(&guc->submission_state.exec_queue_lookup);
|
||||
|
||||
init_waitqueue_head(&guc->submission_state.fini_wq);
|
||||
|
||||
primelockdep(guc);
|
||||
|
||||
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
|
||||
@ -361,6 +385,9 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
|
||||
|
||||
xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
|
||||
q->guc->id, q->width);
|
||||
|
||||
if (xa_empty(&guc->submission_state.exec_queue_lookup))
|
||||
wake_up(&guc->submission_state.fini_wq);
|
||||
}
|
||||
|
||||
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
@ -1268,13 +1295,16 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
|
||||
|
||||
static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
|
||||
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
|
||||
|
||||
/* We must block on kernel engines so slabs are empty on driver unload */
|
||||
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
|
||||
__guc_exec_queue_fini_async(&q->guc->fini_async);
|
||||
else
|
||||
queue_work(system_wq, &q->guc->fini_async);
|
||||
queue_work(xe->destroy_wq, &q->guc->fini_async);
|
||||
}
|
||||
|
||||
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
@ -1796,6 +1826,7 @@ static void guc_exec_queue_start(struct xe_exec_queue *q)
|
||||
}
|
||||
|
||||
xe_sched_submission_start(sched);
|
||||
xe_sched_submission_resume_tdr(sched);
|
||||
}
|
||||
|
||||
int xe_guc_submit_start(struct xe_guc *guc)
|
||||
|
@ -81,6 +81,8 @@ struct xe_guc {
|
||||
#endif
|
||||
/** @submission_state.enabled: submission is enabled */
|
||||
bool enabled;
|
||||
/** @submission_state.fini_wq: submit fini wait queue */
|
||||
wait_queue_head_t fini_wq;
|
||||
} submission_state;
|
||||
/** @hwconfig: Hardware config state */
|
||||
struct {
|
||||
|
@ -709,8 +709,7 @@ static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
|
||||
{
|
||||
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
||||
regs_offset + CTX_CONTEXT_CONTROL,
|
||||
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
|
||||
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
|
||||
},
|
||||
};
|
||||
struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
|
||||
@ -742,10 +741,8 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
|
||||
{
|
||||
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
||||
regs_offset + CTX_CONTEXT_CONTROL,
|
||||
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
|
||||
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
|
||||
_MASKED_FIELD(CTX_CTRL_RUN_ALONE,
|
||||
enable ? CTX_CTRL_RUN_ALONE : 0),
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
|
||||
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
|
||||
},
|
||||
};
|
||||
struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
|
||||
|
@ -924,6 +924,8 @@ static int xe_pci_resume(struct device *dev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pci_restore_state(pdev);
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -2188,5 +2188,5 @@ void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
|
||||
pt_op->num_entries);
|
||||
}
|
||||
|
||||
xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
|
||||
xe_pt_update_ops_fini(tile, vops);
|
||||
}
|
||||
|
@ -42,20 +42,48 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
|
||||
XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX),
|
||||
SET(CCCHKNREG1, L3CMPCTRL))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: Compression Overfetch - media"),
|
||||
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||
XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX),
|
||||
SET(XE2LPM_CCCHKNREG1, L3CMPCTRL))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
|
||||
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
||||
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"),
|
||||
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
|
||||
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
||||
XE_RTP_ACTIONS(SET(L3SQCREG2,
|
||||
COMPMEMRD256BOVRFETCHEN))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"),
|
||||
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2,
|
||||
COMPMEMRD256BOVRFETCHEN))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: Stateless compression control"),
|
||||
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
||||
XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
|
||||
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: Stateless compression control - media"),
|
||||
XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)),
|
||||
XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
|
||||
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: L3 RW flush all Cache"),
|
||||
XE_RTP_RULES(GRAPHICS_VERSION(2004)),
|
||||
XE_RTP_ACTIONS(SET(SCRATCH3_LBCF, RWFLUSHALLEN))
|
||||
},
|
||||
{ XE_RTP_NAME("Tuning: L3 RW flush all cache - media"),
|
||||
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||
XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN))
|
||||
},
|
||||
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -1613,7 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
||||
|
||||
up_write(&vm->lock);
|
||||
|
||||
mutex_lock(&xe->usm.lock);
|
||||
down_write(&xe->usm.lock);
|
||||
if (vm->usm.asid) {
|
||||
void *lookup;
|
||||
|
||||
@ -1623,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
||||
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
|
||||
xe_assert(xe, lookup == vm);
|
||||
}
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
up_write(&xe->usm.lock);
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
xe_range_fence_tree_fini(&vm->rftree[id]);
|
||||
@ -1765,25 +1765,18 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
if (IS_ERR(vm))
|
||||
return PTR_ERR(vm);
|
||||
|
||||
mutex_lock(&xef->vm.lock);
|
||||
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
||||
mutex_unlock(&xef->vm.lock);
|
||||
if (err)
|
||||
goto err_close_and_put;
|
||||
|
||||
if (xe->info.has_asid) {
|
||||
mutex_lock(&xe->usm.lock);
|
||||
down_write(&xe->usm.lock);
|
||||
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
|
||||
XA_LIMIT(1, XE_MAX_ASID - 1),
|
||||
&xe->usm.next_asid, GFP_KERNEL);
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
up_write(&xe->usm.lock);
|
||||
if (err < 0)
|
||||
goto err_free_id;
|
||||
goto err_close_and_put;
|
||||
|
||||
vm->usm.asid = asid;
|
||||
}
|
||||
|
||||
args->vm_id = id;
|
||||
vm->xef = xe_file_get(xef);
|
||||
|
||||
/* Record BO memory for VM pagetable created against client */
|
||||
@ -1796,12 +1789,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
|
||||
#endif
|
||||
|
||||
/* user id alloc must always be last in ioctl to prevent UAF */
|
||||
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_close_and_put;
|
||||
|
||||
args->vm_id = id;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_id:
|
||||
mutex_lock(&xef->vm.lock);
|
||||
xa_erase(&xef->vm.xa, id);
|
||||
mutex_unlock(&xef->vm.lock);
|
||||
err_close_and_put:
|
||||
xe_vm_close_and_put(vm);
|
||||
|
||||
|
@ -2395,7 +2395,7 @@ static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev)
|
||||
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
|
||||
|
||||
if (!stm32f7_i2c_is_slave_registered(i2c_dev))
|
||||
clk_disable_unprepare(i2c_dev->clk);
|
||||
clk_disable(i2c_dev->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2406,9 +2406,9 @@ static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev)
|
||||
int ret;
|
||||
|
||||
if (!stm32f7_i2c_is_slave_registered(i2c_dev)) {
|
||||
ret = clk_prepare_enable(i2c_dev->clk);
|
||||
ret = clk_enable(i2c_dev->clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to prepare_enable clock\n");
|
||||
dev_err(dev, "failed to enable clock\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -258,7 +258,6 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
|
||||
int lcpu;
|
||||
|
||||
BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
|
||||
preempt_disable();
|
||||
bs = gru_lock_kernel_context(-1);
|
||||
lcpu = uv_blade_processor_id();
|
||||
*cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
|
||||
@ -272,7 +271,6 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
|
||||
static void gru_free_cpu_resources(void *cb, void *dsr)
|
||||
{
|
||||
gru_unlock_kernel_context(uv_numa_blade_id());
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -937,10 +937,8 @@ vm_fault_t gru_fault(struct vm_fault *vmf)
|
||||
|
||||
again:
|
||||
mutex_lock(>s->ts_ctxlock);
|
||||
preempt_disable();
|
||||
|
||||
if (gru_check_context_placement(gts)) {
|
||||
preempt_enable();
|
||||
mutex_unlock(>s->ts_ctxlock);
|
||||
gru_unload_context(gts, 1);
|
||||
return VM_FAULT_NOPAGE;
|
||||
@ -949,7 +947,6 @@ again:
|
||||
if (!gts->ts_gru) {
|
||||
STAT(load_user_context);
|
||||
if (!gru_assign_gru_context(gts)) {
|
||||
preempt_enable();
|
||||
mutex_unlock(>s->ts_ctxlock);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
|
||||
@ -965,7 +962,6 @@ again:
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
mutex_unlock(>s->ts_ctxlock);
|
||||
|
||||
return VM_FAULT_NOPAGE;
|
||||
|
@ -65,7 +65,6 @@ static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state
|
||||
struct gru_tlb_global_handle *tgh;
|
||||
int n;
|
||||
|
||||
preempt_disable();
|
||||
if (uv_numa_blade_id() == gru->gs_blade_id)
|
||||
n = get_on_blade_tgh(gru);
|
||||
else
|
||||
@ -79,7 +78,6 @@ static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state
|
||||
static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
|
||||
{
|
||||
unlock_tgh_handle(tgh);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/phylink.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_bridge.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/dsa.h>
|
||||
|
||||
#include "b53_regs.h"
|
||||
@ -224,6 +225,9 @@ static const struct b53_mib_desc b53_mibs_58xx[] = {
|
||||
|
||||
#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
|
||||
|
||||
#define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
|
||||
#define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
|
||||
|
||||
static int b53_do_vlan_op(struct b53_device *dev, u8 op)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -2254,20 +2258,25 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
|
||||
bool allow_10_100;
|
||||
|
||||
if (is5325(dev) || is5365(dev))
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
|
||||
if (!dsa_is_cpu_port(ds, port))
|
||||
return 0;
|
||||
|
||||
enable_jumbo = (mtu >= JMS_MIN_SIZE);
|
||||
allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
|
||||
enable_jumbo = (mtu > ETH_DATA_LEN);
|
||||
allow_10_100 = !is63xx(dev);
|
||||
|
||||
return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
|
||||
}
|
||||
|
||||
static int b53_get_max_mtu(struct dsa_switch *ds, int port)
|
||||
{
|
||||
return JMS_MAX_SIZE;
|
||||
struct b53_device *dev = ds->priv;
|
||||
|
||||
if (is5325(dev) || is5365(dev))
|
||||
return B53_MAX_MTU_25;
|
||||
|
||||
return B53_MAX_MTU;
|
||||
}
|
||||
|
||||
static const struct phylink_mac_ops b53_phylink_mac_ops = {
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/of.h>
|
||||
@ -839,6 +840,8 @@ static void lan9303_handle_reset(struct lan9303 *chip)
|
||||
if (!chip->reset_gpio)
|
||||
return;
|
||||
|
||||
gpiod_set_value_cansleep(chip->reset_gpio, 1);
|
||||
|
||||
if (chip->reset_duration != 0)
|
||||
msleep(chip->reset_duration);
|
||||
|
||||
@ -864,8 +867,34 @@ static int lan9303_disable_processing(struct lan9303 *chip)
|
||||
static int lan9303_check_device(struct lan9303 *chip)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
u32 reg;
|
||||
|
||||
/* In I2C-managed configurations this polling loop will clash with
|
||||
* switch's reading of EEPROM right after reset and this behaviour is
|
||||
* not configurable. While lan9303_read() already has quite long retry
|
||||
* timeout, seems not all cases are being detected as arbitration error.
|
||||
*
|
||||
* According to datasheet, EEPROM loader has 30ms timeout (in case of
|
||||
* missing EEPROM).
|
||||
*
|
||||
* Loading of the largest supported EEPROM is expected to take at least
|
||||
* 5.9s.
|
||||
*/
|
||||
err = read_poll_timeout(lan9303_read, ret,
|
||||
!ret && reg & LAN9303_HW_CFG_READY,
|
||||
20000, 6000000, false,
|
||||
chip->regmap, LAN9303_HW_CFG, ®);
|
||||
if (ret) {
|
||||
dev_err(chip->dev, "failed to read HW_CFG reg: %pe\n",
|
||||
ERR_PTR(ret));
|
||||
return ret;
|
||||
}
|
||||
if (err) {
|
||||
dev_err(chip->dev, "HW_CFG not ready: 0x%08x\n", reg);
|
||||
return err;
|
||||
}
|
||||
|
||||
ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, ®);
|
||||
if (ret) {
|
||||
dev_err(chip->dev, "failed to read chip revision register: %d\n",
|
||||
|
@ -3157,7 +3157,6 @@ static int sja1105_setup(struct dsa_switch *ds)
|
||||
* TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
|
||||
*/
|
||||
ds->vlan_filtering_is_global = true;
|
||||
ds->untag_bridge_pvid = true;
|
||||
ds->fdb_isolation = true;
|
||||
ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
|
||||
|
||||
|
@ -318,11 +318,11 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
|
||||
* from the ADIN1110 frame header.
|
||||
*/
|
||||
if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
|
||||
round_len = adin1110_round_len(frame_size);
|
||||
if (round_len < 0)
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
|
||||
frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
|
||||
memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
|
||||
|
@ -105,10 +105,6 @@ static struct net_device * __init mvme147lance_probe(void)
|
||||
macaddr[3] = address&0xff;
|
||||
eth_hw_addr_set(dev, macaddr);
|
||||
|
||||
printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
|
||||
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
|
||||
dev->dev_addr);
|
||||
|
||||
lp = netdev_priv(dev);
|
||||
lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
|
||||
if (!lp->ram) {
|
||||
@ -138,6 +134,9 @@ static struct net_device * __init mvme147lance_probe(void)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
netdev_info(dev, "MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
|
||||
dev->base_addr, MVME147_LANCE_IRQ, dev->dev_addr);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
@ -1906,7 +1906,12 @@ static int ftgmac100_probe(struct platform_device *pdev)
|
||||
goto err_phy_connect;
|
||||
}
|
||||
|
||||
phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, NULL);
|
||||
phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np);
|
||||
if (IS_ERR(phydev)) {
|
||||
dev_err(&pdev->dev, "failed to register fixed PHY device\n");
|
||||
err = PTR_ERR(phydev);
|
||||
goto err_phy_connect;
|
||||
}
|
||||
err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
|
||||
PHY_INTERFACE_MODE_MII);
|
||||
if (err) {
|
||||
|
@ -1077,7 +1077,8 @@ fec_restart(struct net_device *ndev)
|
||||
u32 rcntl = OPT_FRAME_SIZE | 0x04;
|
||||
u32 ecntl = FEC_ECR_ETHEREN;
|
||||
|
||||
fec_ptp_save_state(fep);
|
||||
if (fep->bufdesc_ex)
|
||||
fec_ptp_save_state(fep);
|
||||
|
||||
/* Whack a reset. We should wait for this.
|
||||
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
|
||||
@ -1340,7 +1341,8 @@ fec_stop(struct net_device *ndev)
|
||||
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
|
||||
}
|
||||
|
||||
fec_ptp_save_state(fep);
|
||||
if (fep->bufdesc_ex)
|
||||
fec_ptp_save_state(fep);
|
||||
|
||||
/* Whack a reset. We should wait for this.
|
||||
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
|
||||
|
@ -578,7 +578,7 @@ static int mal_probe(struct platform_device *ofdev)
|
||||
printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
|
||||
ofdev->dev.of_node);
|
||||
err = -ENODEV;
|
||||
goto fail;
|
||||
goto fail_unmap;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -742,6 +742,8 @@ static void mal_remove(struct platform_device *ofdev)
|
||||
|
||||
free_netdev(mal->dummy_dev);
|
||||
|
||||
dcr_unmap(mal->dcr_host, 0x100);
|
||||
|
||||
dma_free_coherent(&ofdev->dev,
|
||||
sizeof(struct mal_descriptor) *
|
||||
(NUM_TX_BUFF * mal->num_tx_chans +
|
||||
|
@ -2473,9 +2473,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
/* if we are going to send_subcrq_direct this then we need to
|
||||
* update the checksum before copying the data into ltb. Essentially
|
||||
* these packets force disable CSO so that we can guarantee that
|
||||
* FW does not need header info and we can send direct.
|
||||
* FW does not need header info and we can send direct. Also, vnic
|
||||
* server must be able to xmit standard packets without header data
|
||||
*/
|
||||
if (!skb_is_gso(skb) && !ind_bufp->index && !netdev_xmit_more()) {
|
||||
if (*hdrs == 0 && !skb_is_gso(skb) &&
|
||||
!ind_bufp->index && !netdev_xmit_more()) {
|
||||
use_scrq_send_direct = true;
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
skb_checksum_help(skb))
|
||||
|
@ -108,8 +108,8 @@ struct e1000_hw;
|
||||
#define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8
|
||||
#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
|
||||
#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
|
||||
#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
|
||||
#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
|
||||
#define E1000_DEV_ID_PCH_ADP_I219_LM19 0x550C
|
||||
#define E1000_DEV_ID_PCH_ADP_I219_V19 0x550D
|
||||
#define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E
|
||||
#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
|
||||
#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
|
||||
|
@ -7904,10 +7904,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM19), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V19), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp },
|
||||
|
@ -1734,6 +1734,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
|
||||
struct hlist_node *h;
|
||||
int bkt;
|
||||
|
||||
lockdep_assert_held(&vsi->mac_filter_hash_lock);
|
||||
if (vsi->info.pvid)
|
||||
return i40e_add_filter(vsi, macaddr,
|
||||
le16_to_cpu(vsi->info.pvid));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user