Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.14-rc6).

Conflicts:

net/ethtool/cabletest.c
  2bcf4772e45a ("net: ethtool: try to protect all callback with netdev instance lock")
  637399bf7e77 ("net: ethtool: netlink: Allow NULL nlattrs when getting a phy_device")

No Adjacent changes.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-03-06 13:01:27 -08:00
commit 2525e16a2b
206 changed files with 2554 additions and 1192 deletions

View File

@ -88,7 +88,6 @@ Antonio Quartulli <antonio@mandelbit.com> <antonio@open-mesh.com>
Antonio Quartulli <antonio@mandelbit.com> <antonio.quartulli@open-mesh.com>
Antonio Quartulli <antonio@mandelbit.com> <ordex@autistici.org>
Antonio Quartulli <antonio@mandelbit.com> <ordex@ritirata.org>
Antonio Quartulli <antonio@mandelbit.com> <antonio@openvpn.net>
Antonio Quartulli <antonio@mandelbit.com> <a@unstable.cc>
Anup Patel <anup@brainfault.org> <anup.patel@wdc.com>
Archit Taneja <archit@ti.com>
@ -522,6 +521,7 @@ Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
Natalie Vock <natalie.vock@gmx.de> <friedrich.vock@gmx.de>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
@ -613,6 +613,8 @@ Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>
Rodrigo Siqueira <siqueira@igalia.com> <Rodrigo.Siqueira@amd.com>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>

View File

@ -63,8 +63,8 @@ what id ``k11000`` corresponds to in the second or third idmapping. The
straightforward algorithm to use is to apply the inverse of the first idmapping,
mapping ``k11000`` up to ``u1000``. Afterwards, we can map ``u1000`` down using
either the second idmapping mapping or third idmapping mapping. The second
idmapping would map ``u1000`` down to ``21000``. The third idmapping would map
``u1000`` down to ``u31000``.
idmapping would map ``u1000`` down to ``k21000``. The third idmapping would map
``u1000`` down to ``k31000``.
If we were given the same task for the following three idmappings::

View File

@ -1046,14 +1046,14 @@ F: drivers/crypto/ccp/hsti.*
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>
M: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
R: Rodrigo Siqueira <siqueira@igalia.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
AMD DISPLAY CORE - DML
M: Chaitanya Dhere <chaitanya.dhere@amd.com>
M: Austin Zheng <austin.zheng@amd.com>
M: Jun Lei <jun.lei@amd.com>
S: Supported
F: drivers/gpu/drm/amd/display/dc/dml/
@ -5775,6 +5775,7 @@ X: drivers/clk/clkdev.c
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
M: Steve French <sfrench@samba.org>
M: Steve French <smfrench@gmail.com>
R: Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
R: Ronnie Sahlberg <ronniesahlberg@gmail.com> (directory leases, sparse files)
R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
@ -5926,6 +5927,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM)
M: Maarten Lankhorst <dev@lankhorst.se>
M: Maxime Ripard <mripard@kernel.org>
M: Natalie Vock <natalie.vock@gmx.de>
L: cgroups@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: include/linux/cgroup_dmem.h
F: kernel/cgroup/dmem.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
@ -12645,7 +12657,9 @@ F: tools/testing/selftests/
KERNEL SMB3 SERVER (KSMBD)
M: Namjae Jeon <linkinjeon@kernel.org>
M: Namjae Jeon <linkinjeon@samba.org>
M: Steve French <sfrench@samba.org>
M: Steve French <smfrench@gmail.com>
R: Sergey Senozhatsky <senozhatsky@chromium.org>
R: Tom Talpey <tom@talpey.com>
L: linux-cifs@vger.kernel.org
@ -19657,7 +19671,6 @@ F: drivers/net/wireless/quantenna
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
M: Xinhui Pan <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
B: https://gitlab.freedesktop.org/drm/amd/-/issues

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
{
unsigned long stride = huge_page_size(hstate_vma(vma));
if (stride == PMD_SIZE)
__flush_tlb_range(vma, start, end, stride, false, 2);
else if (stride == PUD_SIZE)
__flush_tlb_range(vma, start, end, stride, false, 1);
else
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
switch (stride) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
__flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
break;
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:
__flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
break;
case CONT_PTE_SIZE:
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
break;
default:
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
}
}
#endif /* __ASM_HUGETLB_H */

View File

@ -119,7 +119,7 @@
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
#define TCR_EL2_T0SZ_MASK 0x3f
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_DS TCR_EL2_DS

View File

@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
int __init kvm_arm_vmid_alloc_init(void);
void __init kvm_arm_vmid_alloc_free(void);
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)

View File

@ -559,6 +559,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mmu = vcpu->arch.hw_mmu;
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/*
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
* which happens eagerly in VHE.
*
* Also, the VMID allocator only preserves VMIDs that are active at the
* time of rollover, so KVM might need to grab a new VMID for the MMU if
* this is called from kvm_sched_in().
*/
kvm_arm_vmid_update(&mmu->vmid);
/*
* We guarantee that both TLBs and I-cache are private to each
* vcpu. If detecting that a vcpu from the same VM has
@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
preempt_disable();
/*
* The VMID allocator only tracks active VMIDs per
* physical CPU, and therefore the VMID allocated may not be
* preserved on VMID roll-over if the task was preempted,
* making a thread's VMID inactive. So we need to call
* kvm_arm_vmid_update() in non-premptible context.
*/
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
has_vhe())
__load_stage2(vcpu->arch.hw_mmu,
vcpu->arch.hw_mmu->arch);
kvm_pmu_flush_hwstate(vcpu);
local_irq_disable();
@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
{
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
unsigned long tcr, ips;
unsigned long tcr;
/*
* Calculate the raw per-cpu offset without a translation from the
@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
params->mair_el2 = read_sysreg(mair_el1);
tcr = read_sysreg(tcr_el1);
ips = FIELD_GET(TCR_IPS_MASK, tcr);
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
tcr |= TCR_EPD1_MASK;
} else {
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
tcr &= TCR_EL2_MASK;
tcr |= TCR_EL2_RES1;
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
if (lpa2_is_enabled())
tcr |= TCR_EL2_DS;
}
tcr &= ~TCR_T0SZ_MASK;
tcr |= TCR_T0SZ(hyp_va_bits);
tcr &= ~TCR_EL2_PS_MASK;
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
if (lpa2_is_enabled())
tcr |= TCR_EL2_DS;
params->tcr_el2 = tcr;
params->pgd_pa = kvm_mmu_get_httbr();

View File

@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
}
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
{
unsigned long flags;
u64 vmid, old_active_vmid;
bool updated = false;
vmid = atomic64_read(&kvm_vmid->id);
@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
old_active_vmid, vmid))
return false;
return;
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
/* Check that our VMID belongs to the current generation. */
vmid = atomic64_read(&kvm_vmid->id);
if (!vmid_gen_match(vmid)) {
if (!vmid_gen_match(vmid))
vmid = new_vmid(kvm_vmid);
updated = true;
}
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
return updated;
}
/*

View File

@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
{
int contig_ptes = 0;
int contig_ptes = 1;
*pgsize = size;
switch (size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
if (pud_sect_supported())
contig_ptes = 1;
break;
#endif
case PMD_SIZE:
contig_ptes = 1;
break;
case CONT_PMD_SIZE:
*pgsize = PMD_SIZE;
contig_ptes = CONT_PMDS;
@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = PAGE_SIZE;
contig_ptes = CONT_PTES;
break;
default:
WARN_ON(!__hugetlb_valid_size(size));
}
return contig_ptes;
@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
pte_t orig_pte = __ptep_get(ptep);
unsigned long i;
pte_t pte, tmp_pte;
bool present;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
/*
* If HW_AFDBM is enabled, then the HW could turn on
* the dirty or accessed bit for any page in the set,
* so check them all.
*/
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
pte = __ptep_get_and_clear(mm, addr, ptep);
present = pte_present(pte);
while (--ncontig) {
ptep++;
addr += pgsize;
tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
if (present) {
if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte);
if (pte_young(tmp_pte))
pte = pte_mkyoung(pte);
}
}
return orig_pte;
return pte;
}
static pte_t get_clear_contig_flush(struct mm_struct *mm,
@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
__pte_clear(mm, addr, ptep);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
{
int ncontig;
size_t pgsize;
pte_t orig_pte = __ptep_get(ptep);
if (!pte_cont(orig_pte))
return __ptep_get_and_clear(mm, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
ncontig = num_contig_ptes(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
unsigned long psize = huge_page_size(hstate_vma(vma));
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
if (pte_user_exec(__ptep_get(ptep)))
return huge_ptep_clear_flush(vma, addr, ptep);
}
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,

View File

@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
/*
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
* map randomization can be enabled by shrinking the IPA space.
*/
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size -

View File

@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
pte_t clear;
pte_t pte = ptep_get(ptep);
@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
unsigned long sz = huge_page_size(hstate_vma(vma));
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}

View File

@ -468,6 +468,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
{
int i;
struct section *extab_sec = sec_lookup("__ex_table");
int extab_index = extab_sec ? extab_sec - secs : -1;
/* Walk through the relocations */
for (i = 0; i < ehdr.e_shnum; i++) {
@ -480,6 +482,9 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
if (sec->shdr.sh_type != SHT_REL_TYPE)
continue;
if (sec->shdr.sh_info == extab_index)
continue;
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))

View File

@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
pte_t clear;
pte_t pte = *ptep;
@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
unsigned long sz = huge_page_size(hstate_vma(vma));
/*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}

View File

@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,

View File

@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
pte_t *ptep, unsigned long sz)
{
pte_t entry;

View File

@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
}
@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
unsigned long sz = huge_page_size(hstate_vma(vma));
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_hugetlb_page(vma, addr);
return pte;
}

View File

@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
unsigned long addr, pte_t *ptep,
unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,

View File

@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
if (imsic->vsfile_cpu >= 0) {
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
kvm_vcpu_kick(vcpu);
} else {
eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);

View File

@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/wordpart.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>
@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
if (!target_vcpu)
return SBI_ERR_INVALID_PARAM;
if (!kvm_riscv_vcpu_stopped(target_vcpu))
return SBI_HSM_STATE_STARTED;
else if (vcpu->stat.generic.blocking)
if (kvm_riscv_vcpu_stopped(target_vcpu))
return SBI_HSM_STATE_STOPPED;
else if (target_vcpu->stat.generic.blocking)
return SBI_HSM_STATE_SUSPENDED;
else
return SBI_HSM_STATE_STOPPED;
return SBI_HSM_STATE_STARTED;
}
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
return 0;
case SBI_EXT_HSM_HART_SUSPEND:
switch (cp->a0) {
switch (lower_32_bits(cp->a0)) {
case SBI_HSM_SUSPEND_RET_DEFAULT:
kvm_riscv_vcpu_wfi(vcpu);
break;

View File

@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
u64 next_cycle;
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
unsigned long hart_bit = 0, sentmask = 0;
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (hbase != -1UL) {
if (tmp->vcpu_id < hbase)
continue;
if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
hart_bit = tmp->vcpu_id - hbase;
if (hart_bit >= __riscv_xlen)
goto done;
if (!(hmask & (1UL << hart_bit)))
continue;
}
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
if (ret < 0)
break;
sentmask |= 1UL << hart_bit;
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
}
done:
if (hbase != -1UL && (hmask ^ sentmask))
retdata->err_val = SBI_ERR_INVALID_PARAM;
return ret;
}

View File

@ -4,6 +4,7 @@
*/
#include <linux/kvm_host.h>
#include <linux/wordpart.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/sbi.h>
@ -19,7 +20,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
switch (funcid) {
case SBI_EXT_SUSP_SYSTEM_SUSPEND:
if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}

View File

@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
pte_t *ptep, unsigned long sz)
{
pte_t orig_pte = ptep_get(ptep);
int pte_num;

View File

@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
return __huge_ptep_get_and_clear(mm, addr, ptep);
}
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
__huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}

View File

@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
return __rste_to_pte(pte_val(*ptep));
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(mm, addr, ptep);
pmd_t *pmdp = (pmd_t *) ptep;

View File

@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,

View File

@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
pte_t *ptep, unsigned long sz)
{
unsigned int i, nptes, orig_shift, shift;
unsigned long size;

View File

@ -1341,6 +1341,7 @@ config X86_REBOOTFIXUPS
config MICROCODE
def_bool y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
config MICROCODE_INITRD32
def_bool y

View File

@ -190,6 +190,7 @@ static __always_inline bool int80_is_external(void)
/**
* do_int80_emulation - 32-bit legacy syscall C entry from asm
* @regs: syscall arguments in struct pt_args on the stack.
*
* This entry point can be used by 32-bit and 64-bit programs to perform
* 32-bit system calls. Instances of INT $0x80 can be found inline in

View File

@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == event->pmu->type)
event->hw.config |= x86_pmu_get_event_config(event);
if (event->attr.sample_period && x86_pmu.limit_period) {
if (!event->attr.freq && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
x86_pmu.limit_period(event, &left);
if (left > event->attr.sample_period)

View File

@ -3952,6 +3952,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
}
static u64 intel_pmu_freq_start_period(struct perf_event *event)
{
int type = event->attr.type;
u64 config, factor;
s64 start;
/*
* The 127 is the lowest possible recommended SAV (sample after value)
* for a 4000 freq (default freq), according to the event list JSON file.
* Also, assume the workload is idle 50% time.
*/
factor = 64 * 4000;
if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
goto end;
/*
* The estimation of the start period in the freq mode is
* based on the below assumption.
*
* For a cycles or an instructions event, 1GHZ of the
* underlying platform, 1 IPC. The workload is idle 50% time.
* The start period = 1,000,000,000 * 1 / freq / 2.
* = 500,000,000 / freq
*
* Usually, the branch-related events occur less than the
* instructions event. According to the Intel event list JSON
* file, the SAV (sample after value) of a branch-related event
* is usually 1/4 of an instruction event.
* The start period of branch-related events = 125,000,000 / freq.
*
* The cache-related events occurs even less. The SAV is usually
* 1/20 of an instruction event.
* The start period of cache-related events = 25,000,000 / freq.
*/
config = event->attr.config & PERF_HW_EVENT_MASK;
if (type == PERF_TYPE_HARDWARE) {
switch (config) {
case PERF_COUNT_HW_CPU_CYCLES:
case PERF_COUNT_HW_INSTRUCTIONS:
case PERF_COUNT_HW_BUS_CYCLES:
case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
case PERF_COUNT_HW_REF_CPU_CYCLES:
factor = 500000000;
break;
case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
case PERF_COUNT_HW_BRANCH_MISSES:
factor = 125000000;
break;
case PERF_COUNT_HW_CACHE_REFERENCES:
case PERF_COUNT_HW_CACHE_MISSES:
factor = 25000000;
break;
default:
goto end;
}
}
if (type == PERF_TYPE_HW_CACHE)
factor = 25000000;
end:
/*
* Usually, a prime or a number with less factors (close to prime)
* is chosen as an SAV, which makes it less likely that the sampling
* period synchronizes with some periodic event in the workload.
* Minus 1 to make it at least avoiding values near power of twos
* for the default freq.
*/
start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
if (start > x86_pmu.max_period)
start = x86_pmu.max_period;
if (x86_pmu.limit_period)
x86_pmu.limit_period(event, &start);
return start;
}
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@ -3963,6 +4042,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (ret)
return ret;
if (event->attr.freq && event->attr.sample_freq) {
event->hw.sample_period = intel_pmu_freq_start_period(event);
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.sample_period);
}
if (event->attr.precise_ip) {
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
return -EINVAL;

View File

@ -879,6 +879,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
{},
};

View File

@ -153,8 +153,8 @@ static void geode_configure(void)
u8 ccr3;
local_irq_save(flags);
/* Suspend on halt power saving and enable #SUSP pin */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
/* Suspend on halt power saving */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */

View File

@ -23,14 +23,18 @@
#include <linux/earlycpio.h>
#include <linux/firmware.h>
#include <linux/bsearch.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <crypto/sha2.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
#include <asm/setup.h>
#include <asm/cpu.h>
#include <asm/msr.h>
@ -145,6 +149,107 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
*/
static u32 bsp_cpuid_1_eax __ro_after_init;
static bool sha_check = true;
struct patch_digest {
u32 patch_id;
u8 sha256[SHA256_DIGEST_SIZE];
};
#include "amd_shas.c"
static int cmp_id(const void *key, const void *elem)
{
struct patch_digest *pd = (struct patch_digest *)elem;
u32 patch_id = *(u32 *)key;
if (patch_id == pd->patch_id)
return 0;
else if (patch_id < pd->patch_id)
return -1;
else
return 1;
}
static bool need_sha_check(u32 cur_rev)
{
switch (cur_rev >> 8) {
case 0x80012: return cur_rev <= 0x800126f; break;
case 0x83010: return cur_rev <= 0x830107c; break;
case 0x86001: return cur_rev <= 0x860010e; break;
case 0x86081: return cur_rev <= 0x8608108; break;
case 0x87010: return cur_rev <= 0x8701034; break;
case 0x8a000: return cur_rev <= 0x8a0000a; break;
case 0xa0011: return cur_rev <= 0xa0011da; break;
case 0xa0012: return cur_rev <= 0xa001243; break;
case 0xa1011: return cur_rev <= 0xa101153; break;
case 0xa1012: return cur_rev <= 0xa10124e; break;
case 0xa1081: return cur_rev <= 0xa108109; break;
case 0xa2010: return cur_rev <= 0xa20102f; break;
case 0xa2012: return cur_rev <= 0xa201212; break;
case 0xa6012: return cur_rev <= 0xa60120a; break;
case 0xa7041: return cur_rev <= 0xa704109; break;
case 0xa7052: return cur_rev <= 0xa705208; break;
case 0xa7080: return cur_rev <= 0xa708009; break;
case 0xa70c0: return cur_rev <= 0xa70C009; break;
case 0xaa002: return cur_rev <= 0xaa00218; break;
default: break;
}
pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
return true;
}
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
{
struct patch_digest *pd = NULL;
u8 digest[SHA256_DIGEST_SIZE];
struct sha256_state s;
int i;
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
x86_family(bsp_cpuid_1_eax) > 0x19)
return true;
if (!need_sha_check(cur_rev))
return true;
if (!sha_check)
return true;
pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
if (!pd) {
pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
return false;
}
sha256_init(&s);
sha256_update(&s, data, len);
sha256_final(&s, digest);
if (memcmp(digest, pd->sha256, sizeof(digest))) {
pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
for (i = 0; i < SHA256_DIGEST_SIZE; i++)
pr_cont("0x%x ", digest[i]);
pr_info("\n");
return false;
}
return true;
}
static u32 get_patch_level(void)
{
u32 rev, dummy __always_unused;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
return rev;
}
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
{
union zen_patch_rev p;
@ -246,8 +351,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
* On success, @sh_psize returns the patch size according to the section header,
* to the caller.
*/
static bool
__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
{
u32 p_type, p_size;
const u32 *hdr;
@ -484,10 +588,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
}
}
static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
unsigned int psize)
{
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
u32 rev, dummy;
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
return -1;
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
@ -505,47 +612,13 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
}
/* verify patch application was successful */
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev != mc->hdr.patch_id)
*cur_rev = get_patch_level();
if (*cur_rev != mc->hdr.patch_id)
return false;
return true;
}
/*
* Early load occurs before we can vmalloc(). So we look for the microcode
* patch container file in initrd, traverse equivalent cpu table, look for a
* matching microcode patch, and update, all in initrd memory in place.
* When vmalloc() is available for use later -- on 64-bit during first AP load,
* and on 32-bit during save_microcode_in_initrd_amd() -- we can call
* load_microcode_amd() to save equivalent cpu table and microcode patches in
* kernel heap memory.
*
* Returns true if container found (sets @desc), false otherwise.
*/
static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
{
struct cont_desc desc = { 0 };
struct microcode_amd *mc;
scan_containers(ucode, size, &desc);
mc = desc.mc;
if (!mc)
return false;
/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (old_rev > mc->hdr.patch_id)
return false;
return __apply_microcode_amd(mc, desc.psize);
}
static bool get_builtin_microcode(struct cpio_data *cp)
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
@ -583,14 +656,35 @@ static bool __init find_blobs_in_containers(struct cpio_data *ret)
return found;
}
/*
* Early load occurs before we can vmalloc(). So we look for the microcode
* patch container file in initrd, traverse equivalent cpu table, look for a
* matching microcode patch, and update, all in initrd memory in place.
* When vmalloc() is available for use later -- on 64-bit during first AP load,
* and on 32-bit during save_microcode_in_initrd() -- we can call
* load_microcode_amd() to save equivalent cpu table and microcode patches in
* kernel heap memory.
*/
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
{
struct cont_desc desc = { };
struct microcode_amd *mc;
struct cpio_data cp = { };
u32 dummy;
char buf[4];
u32 rev;
if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
if (!strncmp(buf, "off", 3)) {
sha_check = false;
pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
}
}
bsp_cpuid_1_eax = cpuid_1_eax;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
rev = get_patch_level();
ed->old_rev = rev;
/* Needed in load_microcode_amd() */
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
@ -598,37 +692,23 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
if (!find_blobs_in_containers(&cp))
return;
if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
}
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
static int __init save_microcode_in_initrd(void)
{
unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
enum ucode_state ret;
struct cpio_data cp;
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
if (!find_blobs_in_containers(&cp))
return -EINVAL;
scan_containers(cp.data, cp.size, &desc);
if (!desc.mc)
return -EINVAL;
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret > UCODE_UPDATED)
return -EINVAL;
mc = desc.mc;
if (!mc)
return;
return 0;
/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (ed->old_rev > mc->hdr.patch_id)
return;
if (__apply_microcode_amd(mc, &rev, desc.psize))
ed->new_rev = rev;
}
early_initcall(save_microcode_in_initrd);
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
struct ucode_patch *n,
@ -729,14 +809,9 @@ static void free_cache(void)
static struct ucode_patch *find_patch(unsigned int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u32 rev, dummy __always_unused;
u16 equiv_id = 0;
/* fetch rev if not populated yet: */
if (!uci->cpu_sig.rev) {
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
uci->cpu_sig.rev = rev;
}
uci->cpu_sig.rev = get_patch_level();
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
@ -759,22 +834,20 @@ void reload_ucode_amd(unsigned int cpu)
mc = p->data;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
rev = get_patch_level();
if (rev < mc->hdr.patch_id) {
if (__apply_microcode_amd(mc, p->size))
pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
if (__apply_microcode_amd(mc, &rev, p->size))
pr_info_once("reload revision: 0x%08x\n", rev);
}
}
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct ucode_patch *p;
csig->sig = cpuid_eax(0x00000001);
csig->rev = c->microcode;
csig->rev = get_patch_level();
/*
* a patch could have been loaded early, set uci->mc so that
@ -815,7 +888,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
goto out;
}
if (!__apply_microcode_amd(mc_amd, p->size)) {
if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
return UCODE_ERROR;
@ -937,8 +1010,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
}
/* Scan the blob in @data and add microcode patches to the cache. */
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
size_t size)
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
{
u8 *fw = (u8 *)data;
size_t offset;
@ -1013,6 +1085,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
return ret;
}
static int __init save_microcode_in_initrd(void)
{
unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
enum ucode_state ret;
struct cpio_data cp;
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
if (!find_blobs_in_containers(&cp))
return -EINVAL;
scan_containers(cp.data, cp.size, &desc);
if (!desc.mc)
return -EINVAL;
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret > UCODE_UPDATED)
return -EINVAL;
return 0;
}
early_initcall(save_microcode_in_initrd);
/*
* AMD microcode firmware naming convention, up to family 15h they are in
* the legacy file:

View File

@ -0,0 +1,444 @@
/* Keep 'em sorted. */
static const struct patch_digest phashes[] = {
{ 0x8001227, {
0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
}
},
{ 0x8001250, {
0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
}
},
{ 0x800126e, {
0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
}
},
{ 0x800126f, {
0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
}
},
{ 0x800820d, {
0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
}
},
{ 0x8301025, {
0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
}
},
{ 0x8301055, {
0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
}
},
{ 0x8301072, {
0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
}
},
{ 0x830107a, {
0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
}
},
{ 0x830107b, {
0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
}
},
{ 0x830107c, {
0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
}
},
{ 0x860010d, {
0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
}
},
{ 0x8608108, {
0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
}
},
{ 0x8701034, {
0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
}
},
{ 0x8a00008, {
0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
}
},
{ 0x8a0000a, {
0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
}
},
{ 0xa00104c, {
0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
}
},
{ 0xa00104e, {
0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
}
},
{ 0xa001053, {
0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
}
},
{ 0xa001058, {
0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
}
},
{ 0xa001075, {
0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
}
},
{ 0xa001078, {
0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
}
},
{ 0xa001079, {
0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
}
},
{ 0xa00107a, {
0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
}
},
{ 0xa001143, {
0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
}
},
{ 0xa001144, {
0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
}
},
{ 0xa00115d, {
0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
}
},
{ 0xa001173, {
0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
}
},
{ 0xa0011a8, {
0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
}
},
{ 0xa0011ce, {
0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
}
},
{ 0xa0011d1, {
0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
}
},
{ 0xa0011d3, {
0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
}
},
{ 0xa0011d5, {
0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
}
},
{ 0xa001223, {
0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
}
},
{ 0xa001224, {
0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
}
},
{ 0xa001227, {
0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
}
},
{ 0xa001229, {
0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
}
},
{ 0xa00122e, {
0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
}
},
{ 0xa001231, {
0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
}
},
{ 0xa001234, {
0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
}
},
{ 0xa001236, {
0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
}
},
{ 0xa001238, {
0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
}
},
{ 0xa00820c, {
0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
}
},
{ 0xa10113e, {
0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
}
},
{ 0xa101144, {
0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
}
},
{ 0xa101148, {
0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
}
},
{ 0xa10123e, {
0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
}
},
{ 0xa101244, {
0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
}
},
{ 0xa101248, {
0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
}
},
{ 0xa108108, {
0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
}
},
{ 0xa20102d, {
0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
}
},
{ 0xa201210, {
0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
}
},
{ 0xa404107, {
0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
}
},
{ 0xa500011, {
0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
}
},
{ 0xa601209, {
0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
}
},
{ 0xa704107, {
0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
}
},
{ 0xa705206, {
0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
}
},
{ 0xa708007, {
0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
}
},
{ 0xa70c005, {
0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
}
},
{ 0xaa00116, {
0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
}
},
{ 0xaa00212, {
0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
}
},
{ 0xaa00213, {
0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
}
},
{ 0xaa00215, {
0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
}
},
};

View File

@ -100,14 +100,12 @@ extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
void load_ucode_amd_ap(unsigned int family);
int save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
struct microcode_ops *init_amd_microcode(void);
void exit_amd_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
static inline void load_ucode_amd_ap(unsigned int family) { }
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) { }
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
static inline void exit_amd_microcode(void) { }

View File

@ -2,6 +2,7 @@
/*
* Architecture specific OF callbacks.
*/
#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/interrupt.h>
@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void)
if (initial_dtb)
early_memunmap(dt, map_len);
#endif
if (of_have_populated_dt())
if (acpi_disabled && of_have_populated_dt())
x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
}

View File

@ -25,8 +25,10 @@
#include <asm/posted_intr.h>
#include <asm/irq_remapping.h>
#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
#endif
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);

View File

@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
return true;
}
static void kvm_mmu_start_lpage_recovery(struct once *once)
static int kvm_mmu_start_lpage_recovery(struct once *once)
{
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
struct kvm *kvm = container_of(ka, struct kvm, arch);
@ -7471,13 +7471,14 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
kvm_nx_huge_page_recovery_worker_kill,
kvm, "kvm-nx-lpage-recovery");
if (!nx_thread)
return;
if (IS_ERR(nx_thread))
return PTR_ERR(nx_thread);
vhost_task_start(nx_thread);
/* Make the task visible only once it is fully started. */
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
return 0;
}
int kvm_mmu_post_init_vm(struct kvm *kvm)
@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
if (nx_hugepage_mitigation_hard_disabled)
return 0;
call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
if (!kvm->arch.nx_huge_page_recovery_thread)
return -ENOMEM;
return 0;
return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
}
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)

View File

@ -5084,6 +5084,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
load_vmcs12_host_state(vcpu, vmcs12);
/*
* Process events if an injectable IRQ or NMI is pending, even
* if the event is blocked (RFLAGS.IF is cleared on VM-Exit).
* If an event became pending while L2 was active, KVM needs to
* either inject the event or request an IRQ/NMI window. SMIs
* don't need to be processed as SMM is mutually exclusive with
* non-root mode. INIT/SIPI don't need to be checked as INIT
* is blocked post-VMXON, and SIPIs are ignored.
*/
if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending)
kvm_make_request(KVM_REQ_EVENT, vcpu);
return;
}

View File

@ -12877,11 +12877,11 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
mutex_unlock(&kvm->slots_lock);
}
kvm_unload_vcpu_mmus(kvm);
kvm_destroy_vcpus(kvm);
kvm_x86_call(vm_destroy)(kvm);
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
kvm_destroy_vcpus(kvm);
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
kvm_mmu_uninit_vm(kvm);

View File

@ -77,7 +77,7 @@ struct bio_slab {
struct kmem_cache *slab;
unsigned int slab_ref;
unsigned int slab_size;
char name[8];
char name[12];
};
static DEFINE_MUTEX(bio_slab_lock);
static DEFINE_XARRAY(bio_slabs);

View File

@ -329,7 +329,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes &&
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
bv.bv_offset + bv.bv_len <= lim->min_segment_size) {
nsegs++;
bytes += bv.bv_len;
} else {

View File

@ -246,6 +246,7 @@ int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
unsigned int logical_block_sectors;
unsigned long seg_size;
int err;
/*
@ -303,7 +304,7 @@ int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
@ -341,7 +342,7 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->seg_boundary_mask)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
return -EINVAL;
/*
@ -362,10 +363,17 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->max_segment_size)
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
return -EINVAL;
}
/* setup min segment size for building new segment in fast path */
if (lim->seg_boundary_mask > lim->max_segment_size - 1)
seg_size = lim->max_segment_size;
else
seg_size = lim->seg_boundary_mask + 1;
lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
/*
* We require drivers to at least do logical block aligned I/O, but
* historically could not check for that due to the separate calls

View File

@ -410,13 +410,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk,
}
}
hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
atomic_inc(&disk->nr_zone_wplugs);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
return true;
}
static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
sector_t sector)
static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
sector_t sector)
{
unsigned int zno = disk_zone_no(disk, sector);
unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
@ -437,6 +438,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
return NULL;
}
static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
sector_t sector)
{
if (!atomic_read(&disk->nr_zone_wplugs))
return NULL;
return disk_get_hashed_zone_wplug(disk, sector);
}
static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
{
struct blk_zone_wplug *zwplug =
@ -503,6 +513,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk,
zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
hlist_del_init_rcu(&zwplug->node);
atomic_dec(&disk->nr_zone_wplugs);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
disk_put_zone_wplug(zwplug);
}
@ -593,6 +604,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
{
struct bio *bio;
if (bio_list_empty(&zwplug->bio_list))
return;
pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
zwplug->disk->disk_name, zwplug->zone_no);
while ((bio = bio_list_pop(&zwplug->bio_list)))
blk_zone_wplug_bio_io_error(zwplug, bio);
}
@ -1040,6 +1056,47 @@ plug:
return true;
}
static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;
struct blk_zone_wplug *zwplug;
unsigned long flags;
/*
* We have native support for zone append operations, so we are not
* going to handle @bio through plugging. However, we may already have a
* zone write plug for the target zone if that zone was previously
* partially written using regular writes. In such case, we risk leaving
* the plug in the disk hash table if the zone is fully written using
* zone append operations. Avoid this by removing the zone write plug.
*/
zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
if (likely(!zwplug))
return;
spin_lock_irqsave(&zwplug->lock, flags);
/*
* We are about to remove the zone write plug. But if the user
* (mistakenly) has issued regular writes together with native zone
* append, we must aborts the writes as otherwise the plugged BIOs would
* not be executed by the plug BIO work as disk_get_zone_wplug() will
* return NULL after the plug is removed. Aborting the plugged write
* BIOs is consistent with the fact that these writes will most likely
* fail anyway as there is no ordering guarantees between zone append
* operations and regular write operations.
*/
if (!bio_list_empty(&zwplug->bio_list)) {
pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
disk->disk_name, zwplug->zone_no);
disk_zone_wplug_abort(zwplug);
}
disk_remove_zone_wplug(disk, zwplug);
spin_unlock_irqrestore(&zwplug->lock, flags);
disk_put_zone_wplug(zwplug);
}
/**
* blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
* @bio: The BIO being submitted
@ -1096,8 +1153,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
*/
switch (bio_op(bio)) {
case REQ_OP_ZONE_APPEND:
if (!bdev_emulates_zone_append(bdev))
if (!bdev_emulates_zone_append(bdev)) {
blk_zone_wplug_handle_native_zone_append(bio);
return false;
}
fallthrough;
case REQ_OP_WRITE:
case REQ_OP_WRITE_ZEROES:
@ -1284,6 +1343,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
{
unsigned int i;
atomic_set(&disk->nr_zone_wplugs, 0);
disk->zone_wplugs_hash_bits =
min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
@ -1338,6 +1398,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
}
}
WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
kfree(disk->zone_wplugs_hash);
disk->zone_wplugs_hash = NULL;
disk->zone_wplugs_hash_bits = 0;
@ -1550,11 +1611,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
}
/*
* We need to track the write pointer of all zones that are not
* empty nor full. So make sure we have a zone write plug for
* such zone if the device has a zone write plug hash table.
* If the device needs zone append emulation, we need to track the
* write pointer of all zones that are not empty nor full. So make sure
* we have a zone write plug for such zone if the device has a zone
* write plug hash table.
*/
if (!disk->zone_wplugs_hash)
if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
return 0;
disk_zone_wplug_sync_wp_offset(disk, zone);

View File

@ -14,6 +14,7 @@
struct elevator_type;
#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
#define BLK_MIN_SEGMENT_SIZE 4096
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@ -358,8 +359,12 @@ struct bio *bio_split_zone_append(struct bio *bio,
static inline bool bio_may_need_split(struct bio *bio,
const struct queue_limits *lim)
{
return lim->chunk_sectors || bio->bi_vcnt != 1 ||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
if (lim->chunk_sectors)
return true;
if (bio->bi_vcnt != 1)
return true;
return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
lim->min_segment_size;
}
/**

View File

@ -386,8 +386,12 @@ struct ahci_host_priv {
static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
unsigned int portid)
{
return portid >= hpriv->nports ||
!(hpriv->mask_port_map & (1 << portid));
if (portid >= hpriv->nports)
return true;
/* mask_port_map not set means that all ports are available */
if (!hpriv->mask_port_map)
return false;
return !(hpriv->mask_port_map & (1 << portid));
}
extern int ahci_ignore_sss;

View File

@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
hpriv->saved_port_map = port_map;
}
/* mask_port_map not set means that all ports are available */
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,

View File

@ -4143,10 +4143,6 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
{ "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI |
ATA_QUIRK_NOLPM },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },

View File

@ -3644,6 +3644,7 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,

View File

@ -59,9 +59,6 @@ struct bam_desc_hw {
#define DESC_FLAG_NWD BIT(12)
#define DESC_FLAG_CMD BIT(11)
#define BAM_NDP_REVISION_START 0x20
#define BAM_NDP_REVISION_END 0x27
struct bam_async_desc {
struct virt_dma_desc vd;
@ -401,7 +398,6 @@ struct bam_device {
/* dma start transaction tasklet */
struct tasklet_struct task;
u32 bam_revision;
};
/**
@ -445,10 +441,8 @@ static void bam_reset(struct bam_device *bdev)
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* set descriptor threshold, start with 4 bytes */
if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
BAM_NDP_REVISION_END))
writel_relaxed(DEFAULT_CNT_THRSHLD,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
writel_relaxed(DEFAULT_CNT_THRSHLD,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
@ -1006,10 +1000,9 @@ static void bam_apply_new_config(struct bam_chan *bchan,
maxburst = bchan->slave.src_maxburst;
else
maxburst = bchan->slave.dst_maxburst;
if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
BAM_NDP_REVISION_END))
writel_relaxed(maxburst,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
writel_relaxed(maxburst,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
}
bchan->reconfigure = 0;
@ -1199,11 +1192,10 @@ static int bam_init(struct bam_device *bdev)
u32 val;
/* read revision and configuration information */
val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
if (!bdev->num_ees)
if (!bdev->num_ees) {
val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
bdev->bam_revision = val & REVISION_MASK;
}
/* check that configured EE is within range */
if (bdev->ee >= bdev->num_ees)

View File

@ -83,7 +83,9 @@ struct tegra_adma;
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
* @max_page: Maximum ADMA Channel Page.
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
* @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
@ -99,6 +101,7 @@ struct tegra_adma_chip_data {
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
unsigned int max_page;
bool has_outstanding_reqs;
void (*set_global_pg_config)(struct tegra_adma *tdma);
};
@ -854,6 +857,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
.max_page = 0,
.has_outstanding_reqs = false,
.set_global_pg_config = NULL,
};
@ -871,6 +875,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
.max_page = 4,
.has_outstanding_reqs = true,
.set_global_pg_config = tegra186_adma_global_page_config,
};

View File

@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx,
ctx_info = (struct cper_arm_ctx_info *)err_info;
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
for (i = 0; i < proc->context_info_num; i++) {
int size = sizeof(*ctx_info) + ctx_info->size;
int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16);
printk("%sContext info structure %d:\n", pfx, i);
if (len < size) {

View File

@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
ctx_info = (struct cper_ia_proc_ctx *)err_info;
for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16);
int groupsize = 4;
printk("%sContext Information Structure %d:\n", pfx, i);

View File

@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj;
*/
void __init efi_mokvar_table_init(void)
{
struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry;
efi_memory_desc_t md;
void *va = NULL;
unsigned long cur_offset = 0;
unsigned long offset_limit;
unsigned long map_size = 0;
unsigned long map_size_needed = 0;
unsigned long size;
struct efi_mokvar_table_entry *mokvar_entry;
int err;
if (!efi_enabled(EFI_MEMMAP))
@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void)
*/
err = -EINVAL;
while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
mokvar_entry = va + cur_offset;
map_size_needed = cur_offset + sizeof(*mokvar_entry);
if (map_size_needed > map_size) {
if (va)
early_memunmap(va, map_size);
/*
* Map a little more than the fixed size entry
* header, anticipating some data. It's safe to
* do so as long as we stay within current memory
* descriptor.
*/
map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
offset_limit);
va = early_memremap(efi.mokvar_table, map_size);
if (!va) {
pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
efi.mokvar_table, map_size);
return;
}
mokvar_entry = va + cur_offset;
if (va)
early_memunmap(va, sizeof(*mokvar_entry));
va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
if (!va) {
pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
return;
}
mokvar_entry = va;
next:
/* Check for last sentinel entry */
if (mokvar_entry->name[0] == '\0') {
if (mokvar_entry->data_size != 0)
break;
err = 0;
map_size_needed = cur_offset + sizeof(*mokvar_entry);
break;
}
/* Sanity check that the name is null terminated */
size = strnlen(mokvar_entry->name,
sizeof(mokvar_entry->name));
if (size >= sizeof(mokvar_entry->name))
break;
/* Enforce that the name is NUL terminated */
mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
/* Advance to the next entry */
cur_offset = map_size_needed + mokvar_entry->data_size;
size = sizeof(*mokvar_entry) + mokvar_entry->data_size;
cur_offset += size;
/*
* Don't bother remapping if the current entry header and the
* next one end on the same page.
*/
next_entry = (void *)((unsigned long)mokvar_entry + size);
if (((((unsigned long)(mokvar_entry + 1) - 1) ^
((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) {
mokvar_entry = next_entry;
goto next;
}
}
if (va)
early_memunmap(va, map_size);
early_memunmap(va, sizeof(*mokvar_entry));
if (err) {
pr_err("EFI MOKvar config table is not valid\n");
return;

View File

@ -2712,7 +2712,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
int gpiod_direction_input_nonotify(struct gpio_desc *desc)
{
int ret = 0;
int ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@ -2740,12 +2740,12 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
ret = guard.gc->direction_input(guard.gc,
gpio_chip_hwgpio(desc));
} else if (guard.gc->get_direction) {
ret = guard.gc->get_direction(guard.gc,
dir = guard.gc->get_direction(guard.gc,
gpio_chip_hwgpio(desc));
if (ret < 0)
return ret;
if (dir < 0)
return dir;
if (ret != GPIO_LINE_DIRECTION_IN) {
if (dir != GPIO_LINE_DIRECTION_IN) {
gpiod_warn(desc,
"%s: missing direction_input() operation and line is output\n",
__func__);
@ -2764,7 +2764,7 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
int val = !!value, ret = 0;
int val = !!value, ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@ -2788,12 +2788,12 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
} else {
/* Check that we are in output mode if we can */
if (guard.gc->get_direction) {
ret = guard.gc->get_direction(guard.gc,
dir = guard.gc->get_direction(guard.gc,
gpio_chip_hwgpio(desc));
if (ret < 0)
return ret;
if (dir < 0)
return dir;
if (ret != GPIO_LINE_DIRECTION_OUT) {
if (dir != GPIO_LINE_DIRECTION_OUT) {
gpiod_warn(desc,
"%s: missing direction_output() operation\n",
__func__);

View File

@ -1638,6 +1638,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
/* resizing on Dell G5 SE platforms causes problems with runtime pm */
if ((amdgpu_runtime_pm != 0) &&
adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
adev->pdev->device == 0x731f &&
adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
return 0;
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
DRM_WARN("System can't access extended configuration space, please check!!\n");

View File

@ -1638,22 +1638,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
}
mutex_lock(&adev->enforce_isolation_mutex);
for (i = 0; i < num_partitions; i++) {
if (adev->enforce_isolation[i] && !partition_values[i]) {
if (adev->enforce_isolation[i] && !partition_values[i])
/* Going from enabled to disabled */
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
amdgpu_mes_set_enforce_isolation(adev, i, false);
} else if (!adev->enforce_isolation[i] && partition_values[i]) {
else if (!adev->enforce_isolation[i] && partition_values[i])
/* Going from disabled to enabled */
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
amdgpu_mes_set_enforce_isolation(adev, i, true);
}
adev->enforce_isolation[i] = partition_values[i];
}
mutex_unlock(&adev->enforce_isolation_mutex);
amdgpu_mes_update_enforce_isolation(adev);
return count;
}

View File

@ -1681,7 +1681,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
uint32_t node_id, bool enable)
{
struct mes_misc_op_input op_input = {0};
int r;
@ -1703,6 +1704,23 @@ error:
return r;
}
int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
{
int i, r = 0;
if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
mutex_lock(&adev->enforce_isolation_mutex);
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
if (adev->enforce_isolation[i])
r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
else
r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
}
mutex_unlock(&adev->enforce_isolation_mutex);
}
return r;
}
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)

View File

@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */

View File

@ -2281,7 +2281,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
int r;
int r = 0;
if (!adev->mman.buffer_funcs_enabled)
return -EINVAL;

View File

@ -1633,6 +1633,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
r = amdgpu_mes_update_enforce_isolation(adev);
if (r)
goto failure;
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.

View File

@ -1743,6 +1743,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
r = amdgpu_mes_update_enforce_isolation(adev);
if (r)
goto failure;
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.

View File

@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);

View File

@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);

View File

@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);

View File

@ -183,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@ -245,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);

View File

@ -1618,75 +1618,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
struct amdgpu_dm_quirks {
bool aux_hpd_discon;
bool support_edp0_on_dp1;
};
static struct amdgpu_dm_quirks quirk_entries = {
.aux_hpd_discon = false,
.support_edp0_on_dp1 = false
};
static int edp0_on_dp1_callback(const struct dmi_system_id *id)
{
quirk_entries.support_edp0_on_dp1 = true;
return 0;
}
static int aux_hpd_discon_callback(const struct dmi_system_id *id)
{
quirk_entries.aux_hpd_discon = true;
return 0;
}
static const struct dmi_system_id dmi_quirk_table[] = {
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
},
},
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
{
const struct dmi_system_id *dmi_id;
int dmi_id;
struct drm_device *dev = dm->ddev;
dm->aux_hpd_discon_quirk = false;
init_data->flags.support_edp0_on_dp1 = false;
dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
if (dmi_id) {
dmi_id = dmi_check_system(dmi_quirk_table);
if (!dmi_id)
return;
if (quirk_entries.aux_hpd_discon) {
dm->aux_hpd_discon_quirk = true;
DRM_INFO("aux_hpd_discon_quirk attached\n");
drm_info(dev, "aux_hpd_discon_quirk attached\n");
}
if (quirk_entries.support_edp0_on_dp1) {
init_data->flags.support_edp0_on_dp1 = true;
drm_info(dev, "aux_hpd_discon_quirk attached\n");
}
}
@ -1994,7 +2049,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
retrieve_dmi_info(&adev->dm);
retrieve_dmi_info(&adev->dm, &init_data);
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@ -7240,8 +7295,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
struct i2c_adapter *ddc;
drm_edid = drm_edid_read(connector);
if (dc_link && dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
else
ddc = &aconnector->i2c->base;
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
@ -7286,14 +7347,21 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
const struct drm_edid *drm_edid;
const struct edid *edid;
struct i2c_adapter *ddc;
drm_edid = drm_edid_read(connector);
if (dc_link && dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
else
ddc = &aconnector->i2c->base;
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);

View File

@ -894,6 +894,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@ -920,6 +921,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
/* Update reference counts for HPDs */
for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
}
}
/**
@ -935,6 +942,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@ -960,4 +968,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
/* Update reference counts for HPDs */
for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
}
}

View File

@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
return false;
return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
/* Temporarily disable PSR-SU to avoid glitches */
return false;
}
/*

View File

@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
mutex_unlock(&adev->pm.mutex);
return ret;
}
@ -3066,32 +3069,42 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
cancel_work_sync(&adev->pm.dpm.thermal.work);
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm_enabled = false;
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
mutex_unlock(&adev->pm.mutex);
}
return 0;
}
static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
if (!amdgpu_dpm)
return 0;
if (!adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
/* asic init will reset to the boot state */
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
if (ret) {
adev->pm.dpm_enabled = false;
else
} else {
adev->pm.dpm_enabled = true;
if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
}
mutex_unlock(&adev->pm.mutex);
}
return 0;
return ret;
}
static bool kv_dpm_is_idle(void *handle)

View File

@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
if (!adev->pm.dpm_enabled)
return;
mutex_lock(&adev->pm.mutex);
if (!adev->pm.dpm_enabled) {
mutex_unlock(&adev->pm.mutex);
return;
}
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp,
@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
adev->pm.dpm.state = dpm_state;
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
}

View File

@ -7786,6 +7786,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
@ -7793,6 +7794,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
mutex_unlock(&adev->pm.mutex);
return ret;
}
@ -7810,32 +7812,44 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
cancel_work_sync(&adev->pm.dpm.thermal.work);
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm_enabled = false;
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
mutex_unlock(&adev->pm.mutex);
}
return 0;
}
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
if (!amdgpu_dpm)
return 0;
if (!adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
if (ret) {
adev->pm.dpm_enabled = false;
else
} else {
adev->pm.dpm_enabled = true;
if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
}
mutex_unlock(&adev->pm.mutex);
}
return 0;
return ret;
}
static bool si_dpm_is_idle(void *handle)

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
#include <linux/fb.h>
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
void *shadow = info->screen_buffer;
if (!dma->map_noncoherent)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if (!fb_helper->dev)
return;
return fb_deferred_io_mmap(info, vma);
if (info->fbdefio)
fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
vfree(shadow);
drm_client_buffer_vunmap(fb_helper->buffer);
drm_client_framebuffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy,
.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
};
/*
* struct drm_fb_helper
*/
static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip,
struct iosys_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
size_t offset = clip->y1 * fb->pitches[0];
size_t len = clip->x2 - clip->x1;
unsigned int y;
void *src;
switch (drm_format_info_bpp(fb->format, 0)) {
case 1:
offset += clip->x1 / 8;
len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
break;
case 2:
offset += clip->x1 / 4;
len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
break;
case 4:
offset += clip->x1 / 2;
len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
break;
default:
offset += clip->x1 * fb->format->cpp[0];
len *= fb->format->cpp[0];
break;
}
src = fb_helper->info->screen_buffer + offset;
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
for (y = clip->y1; y < clip->y2; y++) {
iosys_map_memcpy_to(dst, 0, src, len);
iosys_map_incr(dst, fb->pitches[0]);
src += fb->pitches[0];
}
}
static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip)
{
struct drm_client_buffer *buffer = fb_helper->buffer;
struct iosys_map dst;
/*
* For fbdev emulation, we only have to protect against fbdev modeset
* operations. Nothing else will involve the client buffer's BO. So it
* is sufficient to acquire struct drm_fb_helper.lock here.
*/
mutex_lock(&fb_helper->lock);
dst = buffer->map;
drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
mutex_unlock(&fb_helper->lock);
return 0;
}
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
return 0;
if (helper->fb->funcs->dirty) {
ret = drm_fbdev_dma_damage_blit(helper, clip);
if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
return ret;
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
* struct drm_fb_helper
*/
static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer = fb_helper->buffer;
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
struct drm_framebuffer *fb = fb_helper->fb;
struct fb_info *info = fb_helper->info;
struct iosys_map map = buffer->map;
info->fbops = &drm_fbdev_dma_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
if (dma_obj->map_noncoherent)
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
}
info->fix.smem_len = info->screen_size;
return 0;
}
static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_buffer *buffer = fb_helper->buffer;
struct fb_info *info = fb_helper->info;
size_t screen_size = buffer->gem->size;
void *screen_buffer;
int ret;
/*
* Deferred I/O requires struct page for framebuffer memory,
* which is not guaranteed for all DMA ranges. We thus create
* a shadow buffer in system memory.
*/
screen_buffer = vzalloc(screen_size);
if (!screen_buffer)
return -ENOMEM;
info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_buffer = screen_buffer;
info->fix.smem_len = screen_size;
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_vfree;
return 0;
err_vfree:
vfree(screen_buffer);
return ret;
}
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
bool use_deferred_io = false;
struct drm_client_buffer *buffer;
struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
/*
* Deferred I/O requires struct page for framebuffer memory,
* which is not guaranteed for all DMA ranges. We thus only
* install deferred I/O if we have a framebuffer that requires
* it.
*/
if (fb->funcs->dirty)
use_deferred_io = true;
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
if (use_deferred_io)
info->fbops = &drm_fbdev_dma_deferred_fb_ops;
if (fb->funcs->dirty)
ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
else
info->fbops = &drm_fbdev_dma_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
if (dma_obj->map_noncoherent)
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
}
info->fix.smem_len = info->screen_size;
/*
* Only set up deferred I/O if the screen buffer supports
* it. If this disagrees with the previous test for ->dirty,
* mmap on the /dev/fb file might not work correctly.
*/
if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
use_deferred_io = false;
}
/* deferred I/O */
if (use_deferred_io) {
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
}
ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
if (ret)
goto err_drm_fb_helper_release_info;
return 0;

View File

@ -866,7 +866,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@ -878,7 +878,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* we don't expect MST to have been enabled at that point, and
* can assume it's SST.
*/
if (hweight8(dp128b132b_pipe_mask) > 1 || intel_dp->is_mst)
if (hweight8(dp128b132b_pipe_mask) > 1 ||
intel_dp_mst_encoder_active_links(dig_port))
mst_pipe_mask = dp128b132b_pipe_mask;
}
@ -4151,13 +4152,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
if (intel_dp->is_mst)
if (intel_dp_mst_encoder_active_links(dig_port))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);

View File

@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only OR MIT
# Copyright (c) 2023 Imagination Technologies Ltd.
subdir-ccflags-y := -I$(src)
powervr-y := \
pvr_ccb.o \
pvr_cccb.o \

View File

@ -775,7 +775,6 @@ nouveau_connector_force(struct drm_connector *connector)
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
connector->name);
connector->status = connector_status_disconnected;
return;
}

View File

@ -67,7 +67,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
s64 delta = drm_fixp_mul(b_fp - a_fp, t);
return drm_fixp2int(a_fp + delta);
return drm_fixp2int_round(a_fp + delta);
}
static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)

View File

@ -53,7 +53,6 @@
#define RING_CTL(base) XE_REG((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_START_UDW(base) XE_REG((base) + 0x48)

View File

@ -1248,6 +1248,8 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);

View File

@ -1689,7 +1689,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
stream->sample = param->sample;
stream->periodic = param->period_exponent > 0;
stream->periodic = param->period_exponent >= 0;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
stream->wait_num_reports = param->wait_num_reports;
@ -1970,6 +1970,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
}
param.xef = xef;
param.period_exponent = -1;
ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
if (ret)
return ret;
@ -2024,7 +2025,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
goto err_exec_q;
}
if (param.period_exponent > 0) {
if (param.period_exponent >= 0) {
u64 oa_period, oa_freq_hz;
/* Requesting samples from OAG buffer is a privileged operation */

View File

@ -666,20 +666,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
/* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock);
xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
list_del_init(&uvma->userptr.invalidate_link);
list_move_tail(&uvma->userptr.repin_link,
&vm->userptr.repin_list);
list_add_tail(&uvma->userptr.repin_link,
&vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
/* Pin and move to temporary list */
/* Pin and move to bind list */
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
if (err == -EFAULT) {
list_del_init(&uvma->userptr.repin_link);
/*
* We might have already done the pin once already, but
* then had to retry before the re-bind happened, due
* some other condition in the caller, but in the
* meantime the userptr got dinged by the notifier such
* that we need to revalidate here, but this time we hit
* the EFAULT. In such a case make sure we remove
* ourselves from the rebind list to avoid going down in
* flames.
*/
if (!list_empty(&uvma->vma.combined_links.rebind))
list_del_init(&uvma->vma.combined_links.rebind);
/* Wait for pending binds */
xe_vm_lock(vm, false);
@ -690,10 +703,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
err = xe_vm_invalidate_vma(&uvma->vma);
xe_vm_unlock(vm);
if (err)
return err;
break;
} else {
if (err < 0)
return err;
if (err)
break;
list_del_init(&uvma->userptr.repin_link);
list_move_tail(&uvma->vma.combined_links.rebind,
@ -701,7 +714,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
}
}
return 0;
if (err) {
down_write(&vm->userptr.notifier_lock);
spin_lock(&vm->userptr.invalidated_lock);
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
list_del_init(&uvma->userptr.repin_link);
list_move_tail(&uvma->userptr.invalidate_link,
&vm->userptr.invalidated);
}
spin_unlock(&vm->userptr.invalidated_lock);
up_write(&vm->userptr.notifier_lock);
}
return err;
}
/**
@ -1066,6 +1091,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock);
xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
} else if (!xe_vma_is_null(vma)) {

View File

@ -378,6 +378,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
return false;
}
static bool apple_is_omoton_kb066(struct hid_device *hdev)
{
return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI &&
strcmp(hdev->name, "Bluetooth Keyboard") == 0;
}
static inline void apple_setup_key_translation(struct input_dev *input,
const struct apple_key_translation *table)
{
@ -546,9 +552,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
if (usage->hid == 0xc0301) /* Omoton KB066 quirk */
code = KEY_F6;
if (usage->code != code) {
input_event_with_scancode(input, usage->type, code, usage->hid, value);
@ -728,7 +731,7 @@ static int apple_input_configured(struct hid_device *hdev,
{
struct apple_sc *asc = hid_get_drvdata(hdev);
if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) {
hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
asc->quirks &= ~APPLE_HAS_FN;
}

View File

@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report,
static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 };
unsigned long flags;
if (len != 5)
if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT))
goto out;
if (!memcmp(data, keydown, sizeof(keydown))) {

View File

@ -71,11 +71,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
@ -120,6 +118,12 @@ enum {
CORSAIR_VOID_BATTERY_CHARGING = 5,
};
enum {
CORSAIR_VOID_ADD_BATTERY = 0,
CORSAIR_VOID_REMOVE_BATTERY = 1,
CORSAIR_VOID_UPDATE_BATTERY = 2,
};
static enum power_supply_property corsair_void_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@ -155,12 +159,12 @@ struct corsair_void_drvdata {
struct power_supply *battery;
struct power_supply_desc battery_desc;
struct mutex battery_mutex;
struct delayed_work delayed_status_work;
struct delayed_work delayed_firmware_work;
struct work_struct battery_remove_work;
struct work_struct battery_add_work;
unsigned long battery_work_flags;
struct work_struct battery_work;
};
/*
@ -260,11 +264,9 @@ success:
/* Inform power supply if battery values changed */
if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) {
scoped_guard(mutex, &drvdata->battery_mutex) {
if (drvdata->battery) {
power_supply_changed(drvdata->battery);
}
}
set_bit(CORSAIR_VOID_UPDATE_BATTERY,
&drvdata->battery_work_flags);
schedule_work(&drvdata->battery_work);
}
}
@ -536,29 +538,11 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
}
static void corsair_void_battery_remove_work_handler(struct work_struct *work)
static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
{
struct corsair_void_drvdata *drvdata;
drvdata = container_of(work, struct corsair_void_drvdata,
battery_remove_work);
scoped_guard(mutex, &drvdata->battery_mutex) {
if (drvdata->battery) {
power_supply_unregister(drvdata->battery);
drvdata->battery = NULL;
}
}
}
static void corsair_void_battery_add_work_handler(struct work_struct *work)
{
struct corsair_void_drvdata *drvdata;
struct power_supply_config psy_cfg = {};
struct power_supply *new_supply;
drvdata = container_of(work, struct corsair_void_drvdata,
battery_add_work);
guard(mutex)(&drvdata->battery_mutex);
if (drvdata->battery)
return;
@ -583,16 +567,42 @@ static void corsair_void_battery_add_work_handler(struct work_struct *work)
drvdata->battery = new_supply;
}
static void corsair_void_battery_work_handler(struct work_struct *work)
{
struct corsair_void_drvdata *drvdata = container_of(work,
struct corsair_void_drvdata, battery_work);
bool add_battery = test_and_clear_bit(CORSAIR_VOID_ADD_BATTERY,
&drvdata->battery_work_flags);
bool remove_battery = test_and_clear_bit(CORSAIR_VOID_REMOVE_BATTERY,
&drvdata->battery_work_flags);
bool update_battery = test_and_clear_bit(CORSAIR_VOID_UPDATE_BATTERY,
&drvdata->battery_work_flags);
if (add_battery && !remove_battery) {
corsair_void_add_battery(drvdata);
} else if (remove_battery && !add_battery && drvdata->battery) {
power_supply_unregister(drvdata->battery);
drvdata->battery = NULL;
}
if (update_battery && drvdata->battery)
power_supply_changed(drvdata->battery);
}
static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata)
{
schedule_work(&drvdata->battery_add_work);
set_bit(CORSAIR_VOID_ADD_BATTERY, &drvdata->battery_work_flags);
schedule_work(&drvdata->battery_work);
schedule_delayed_work(&drvdata->delayed_firmware_work,
msecs_to_jiffies(100));
}
static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata)
{
schedule_work(&drvdata->battery_remove_work);
set_bit(CORSAIR_VOID_REMOVE_BATTERY, &drvdata->battery_work_flags);
schedule_work(&drvdata->battery_work);
corsair_void_set_unknown_wireless_data(drvdata);
corsair_void_set_unknown_batt(drvdata);
@ -678,13 +688,7 @@ static int corsair_void_probe(struct hid_device *hid_dev,
drvdata->battery_desc.get_property = corsair_void_battery_get_property;
drvdata->battery = NULL;
INIT_WORK(&drvdata->battery_remove_work,
corsair_void_battery_remove_work_handler);
INIT_WORK(&drvdata->battery_add_work,
corsair_void_battery_add_work_handler);
ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex);
if (ret)
return ret;
INIT_WORK(&drvdata->battery_work, corsair_void_battery_work_handler);
ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
if (ret)
@ -721,8 +725,7 @@ static void corsair_void_remove(struct hid_device *hid_dev)
struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev);
hid_hw_stop(hid_dev);
cancel_work_sync(&drvdata->battery_remove_work);
cancel_work_sync(&drvdata->battery_add_work);
cancel_work_sync(&drvdata->battery_work);
if (drvdata->battery)
power_supply_unregister(drvdata->battery);

View File

@ -3450,7 +3450,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_MACRO_RECORD_START] = "MacroRecordStart",
[KEY_MACRO_RECORD_STOP] = "MacroRecordStop",
[KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat",
[KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messanger",
[KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messenger",
[KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo",
[KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement",
[KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter",

View File

@ -268,11 +268,13 @@ static void cbas_ec_remove(struct platform_device *pdev)
mutex_unlock(&cbas_ec_reglock);
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id cbas_ec_acpi_ids[] = {
{ "GOOG000B", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
#endif
#ifdef CONFIG_OF
static const struct of_device_id cbas_ec_of_match[] = {

View File

@ -457,13 +457,13 @@ static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = {
};
static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
{ BTN_A, JC_BTN_A, },
{ BTN_B, JC_BTN_B, },
{ BTN_C, JC_BTN_R, },
{ BTN_X, JC_BTN_X, }, /* MD/GEN 6B Only */
{ BTN_Y, JC_BTN_Y, }, /* MD/GEN 6B Only */
{ BTN_Z, JC_BTN_L, }, /* MD/GEN 6B Only */
{ BTN_SELECT, JC_BTN_ZR, },
{ BTN_WEST, JC_BTN_A, }, /* A */
{ BTN_SOUTH, JC_BTN_B, }, /* B */
{ BTN_EAST, JC_BTN_R, }, /* C */
{ BTN_TL, JC_BTN_X, }, /* X MD/GEN 6B Only */
{ BTN_NORTH, JC_BTN_Y, }, /* Y MD/GEN 6B Only */
{ BTN_TR, JC_BTN_L, }, /* Z MD/GEN 6B Only */
{ BTN_SELECT, JC_BTN_ZR, }, /* Mode */
{ BTN_START, JC_BTN_PLUS, },
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },

View File

@ -1327,11 +1327,11 @@ static void steam_remove(struct hid_device *hdev)
return;
}
hid_destroy_device(steam->client_hdev);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->work_connect);
cancel_work_sync(&steam->rumble_work);
cancel_work_sync(&steam->unregister_work);
hid_destroy_device(steam->client_hdev);
steam->client_hdev = NULL;
steam->client_opened = 0;
if (steam->quirks & STEAM_QUIRK_WIRELESS) {

View File

@ -290,7 +290,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid,
ihid->rawbuf, recv_len + sizeof(__le16));
if (error) {
dev_err(&ihid->client->dev,
"failed to set a report to device: %d\n", error);
"failed to get a report from device: %d\n", error);
return error;
}

View File

@ -832,9 +832,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
hid_ishtp_cl);
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
hid_ishtp_cl_deinit(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
hid_ishtp_cl_deinit(hid_ishtp_cl);
hid_ishtp_cl = NULL;

View File

@ -261,12 +261,14 @@ err_hid_data:
*/
void ishtp_hid_remove(struct ishtp_cl_data *client_data)
{
void *data;
int i;
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (client_data->hid_sensor_hubs[i]) {
kfree(client_data->hid_sensor_hubs[i]->driver_data);
data = client_data->hid_sensor_hubs[i]->driver_data;
hid_destroy_device(client_data->hid_sensor_hubs[i]);
kfree(data);
client_data->hid_sensor_hubs[i] = NULL;
}
}

View File

@ -909,6 +909,8 @@ static int quickspi_restore(struct device *device)
thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
qsdev->state = QUICKSPI_ENABLED;
return 0;
}

View File

@ -107,7 +107,7 @@ static int quickspi_get_device_descriptor(struct quickspi_device *qsdev)
return 0;
}
dev_err_once(qsdev->dev, "Unexpected intput report type: %d\n", input_rep_type);
dev_err_once(qsdev->dev, "Unexpected input report type: %d\n", input_rep_type);
return -EINVAL;
}

View File

@ -293,6 +293,7 @@ static irqreturn_t amd_asf_irq_handler(int irq, void *ptr)
amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true);
}
iowrite32(irq, dev->eoi_base);
return IRQ_HANDLED;
}

View File

@ -10,6 +10,7 @@
* Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/device.h>
@ -26,7 +27,8 @@
#include <linux/units.h>
/* I2C Registers */
#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */
#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */
#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */
#define I2C_LS2X_CTR 0x2 /* Control Register */
#define I2C_LS2X_TXR 0x3 /* Transport Data Register */
#define I2C_LS2X_RXR 0x3 /* Receive Data Register */
@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
*/
static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
{
u16 val;
struct i2c_timings *t = &priv->i2c_t;
struct device *dev = priv->adapter.dev.parent;
u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
else
t->bus_freq_hz = LS2X_I2C_FREQ_STD;
/* Calculate and set i2c frequency. */
writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
priv->base + I2C_LS2X_PRER);
/*
* According to the chip manual, we can only access the registers as bytes,
* otherwise the high bits will be truncated.
* So set the I2C frequency with a sequential writeb() instead of writew().
*/
val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
}
static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)

View File

@ -2554,6 +2554,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
if (irq < 0)
return irq;
/*
* Disable the interrupt to avoid the interrupt handler being triggered
* incorrectly by the asynchronous interrupt status since the machine
* might do a warm reset during the last smbus/i2c transfer session.
*/
npcm_i2c_int_enable(bus, false);
ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
dev_name(bus->dev), bus);
if (ret)

View File

@ -56,6 +56,7 @@
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
#include <asm/tsc.h>
#include <asm/fpu/api.h>
#define INTEL_IDLE_VERSION "0.5.1"
@ -1799,6 +1800,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
if (intel_idle_state_needs_timer_stop(state))
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle");
state->enter = intel_idle;
state->enter_s2idle = intel_idle_s2idle;
}

View File

@ -2043,12 +2043,12 @@ static void set_dte_entry(struct amd_iommu *iommu,
make_clear_dte(dev_data, dte, &new);
if (domain->iop.mode != PAGE_MODE_NONE)
new.data[0] = iommu_virt_to_phys(domain->iop.root);
new.data[0] |= iommu_virt_to_phys(domain->iop.root);
new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW;
/*
* When SNP is enabled, we can only support TV=1 with non-zero domain ID.

View File

@ -2043,6 +2043,7 @@ int enable_drhd_fault_handling(unsigned int cpu)
/*
* Enable fault control interrupt.
*/
guard(rwsem_read)(&dmar_global_lock);
for_each_iommu(iommu, drhd) {
u32 fault_status;
int ret;

Some files were not shown because too many files have changed in this diff Show More