1
0
mirror of https://github.com/torvalds/linux.git synced 2025-04-12 06:49:52 +00:00

Merge branch 'kvm-arm64/pv-cpuid' into kvmarm/next

* kvm-arm64/pv-cpuid:
  : Paravirtualized implementation ID, courtesy of Shameer Kolothum
  :
  : Big-little has historically been a pain in the ass to virtualize. The
  : implementation ID (MIDR, REVIDR, AIDR) of a vCPU can change at the whim
  : of vCPU scheduling. This can be particularly annoying when the guest
  : needs to know the underlying implementation to mitigate errata.
  :
  : "Hyperscalers" face a similar scheduling problem, where VMs may freely
  : migrate between hosts in a pool of heterogenous hardware. And yes, our
  : server-class friends are equally riddled with errata too.
  :
  : In absence of an architected solution to this wart on the ecosystem,
  : introduce support for paravirtualizing the implementation exposed
  : to a VM, allowing the VMM to describe the pool of implementations that a
  : VM may be exposed to due to scheduling/migration.
  :
  : Userspace is expected to intercept and handle these hypercalls using the
  : SMCCC filter UAPI, should it choose to do so.
  smccc: kvm_guest: Fix kernel builds for 32 bit arm
  KVM: selftests: Add test for KVM_REG_ARM_VENDOR_HYP_BMAP_2
  smccc/kvm_guest: Enable errata based on implementation CPUs
  arm64: Make  _midr_in_range_list() an exported function
  KVM: arm64: Introduce KVM_REG_ARM_VENDOR_HYP_BMAP_2
  KVM: arm64: Specify hypercall ABI for retrieving target implementations
  arm64: Modify _midr_range() functions to read MIDR/REVIDR internally

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton 2025-03-19 14:53:09 -07:00
commit d300b0168e
20 changed files with 329 additions and 62 deletions
Documentation/virt/kvm/arm
arch/arm64
drivers
clocksource
firmware/smccc
hwtracing/coresight
include/linux
tools
arch/arm64/include/uapi/asm
testing/selftests/kvm/arm64

@ -116,7 +116,7 @@ The pseudo-firmware bitmap register are as follows:
ARM DEN0057A.
* KVM_REG_ARM_VENDOR_HYP_BMAP:
Controls the bitmap of the Vendor specific Hypervisor Service Calls.
Controls the bitmap of the Vendor specific Hypervisor Service Calls[0-63].
The following bits are accepted:
@ -127,6 +127,19 @@ The pseudo-firmware bitmap register are as follows:
Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_PTP:
The bit represents the Precision Time Protocol KVM service.
* KVM_REG_ARM_VENDOR_HYP_BMAP_2:
Controls the bitmap of the Vendor specific Hypervisor Service Calls[64-127].
The following bits are accepted:
Bit-0: KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER
This represents the ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID
function-id. This is reset to 0.
Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS
This represents the ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID
function-id. This is reset to 0.
Errors:
======= =============================================================

@ -142,3 +142,62 @@ region is equal to the memory protection granule advertised by
| | | +---------------------------------------------+
| | | | ``INVALID_PARAMETER (-3)`` |
+---------------------+----------+----+---------------------------------------------+
``ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID``
-------------------------------------------------------
Request the target CPU implementation version information and the number of target
implementations for the Guest VM.
+---------------------+-------------------------------------------------------------+
| Presence: | Optional; KVM/ARM64 Guests only |
+---------------------+-------------------------------------------------------------+
| Calling convention: | HVC64 |
+---------------------+----------+--------------------------------------------------+
| Function ID: | (uint32) | 0xC6000040 |
+---------------------+----------+--------------------------------------------------+
| Arguments: | None |
+---------------------+----------+----+---------------------------------------------+
| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
| | | +---------------------------------------------+
| | | | ``NOT_SUPPORTED (-1)`` |
| +----------+----+---------------------------------------------+
| | (uint64) | R1 | Bits [63:32] Reserved/Must be zero |
| | | +---------------------------------------------+
| | | | Bits [31:16] Major version |
| | | +---------------------------------------------+
| | | | Bits [15:0] Minor version |
| +----------+----+---------------------------------------------+
| | (uint64) | R2 | Number of target implementations |
| +----------+----+---------------------------------------------+
| | (uint64) | R3 | Reserved / Must be zero |
+---------------------+----------+----+---------------------------------------------+
``ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID``
-------------------------------------------------------
Request the target CPU implementation information for the Guest VM. The Guest kernel
will use this information to enable the associated errata.
+---------------------+-------------------------------------------------------------+
| Presence: | Optional; KVM/ARM64 Guests only |
+---------------------+-------------------------------------------------------------+
| Calling convention: | HVC64 |
+---------------------+----------+--------------------------------------------------+
| Function ID: | (uint32) | 0xC6000041 |
+---------------------+----------+----+---------------------------------------------+
| Arguments: | (uint64) | R1 | selected implementation index |
| +----------+----+---------------------------------------------+
| | (uint64) | R2 | Reserved / Must be zero |
| +----------+----+---------------------------------------------+
| | (uint64) | R3 | Reserved / Must be zero |
+---------------------+----------+----+---------------------------------------------+
| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
| | | +---------------------------------------------+
| | | | ``INVALID_PARAMETER (-3)`` |
| +----------+----+---------------------------------------------+
| | (uint64) | R1 | MIDR_EL1 of the selected implementation |
| +----------+----+---------------------------------------------+
| | (uint64) | R2 | REVIDR_EL1 of the selected implementation |
| +----------+----+---------------------------------------------+
| | (uint64) | R3 | AIDR_EL1 of the selected implementation |
+---------------------+----------+----+---------------------------------------------+

@ -231,6 +231,16 @@
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
* rather than directly reading processor_id or read_cpuid() directly.
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
return read_cpuid(MIDR_EL1);
}
/*
* Represent a range of MIDR values for a given CPU model and a
* range of variant/revision values.
@ -266,30 +276,14 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
return _model == model && rv >= rv_min && rv <= rv_max;
}
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
{
return midr_is_cpu_model_range(midr, range->model,
range->rv_min, range->rv_max);
}
struct target_impl_cpu {
u64 midr;
u64 revidr;
u64 aidr;
};
static inline bool
is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
{
while (ranges->model)
if (is_midr_in_range(midr, ranges++))
return true;
return false;
}
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
* rather than directly reading processor_id or read_cpuid() directly.
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
return read_cpuid(MIDR_EL1);
}
bool cpu_errata_set_target_impl(u64 num, void *impl_cpus);
bool is_midr_in_range_list(struct midr_range const *ranges);
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{

@ -6,6 +6,7 @@
void kvm_init_hyp_services(void);
bool kvm_arm_hyp_service_available(u32 func_id);
void kvm_arm_target_impl_cpu_init(void);
#ifdef CONFIG_ARM_PKVM_GUEST
void pkvm_init_hyp_services(void);

@ -238,7 +238,8 @@ struct kvm_arch_memory_slot {
struct kvm_smccc_features {
unsigned long std_bmap;
unsigned long std_hyp_bmap;
unsigned long vendor_hyp_bmap;
unsigned long vendor_hyp_bmap; /* Function numbers 0-63 */
unsigned long vendor_hyp_bmap_2; /* Function numbers 64-127 */
};
typedef unsigned int pkvm_handle_t;

@ -101,8 +101,7 @@ static inline bool kaslr_requires_kpti(void)
if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
extern const struct midr_range cavium_erratum_27456_cpus[];
if (is_midr_in_range_list(read_cpuid_id(),
cavium_erratum_27456_cpus))
if (is_midr_in_range_list(cavium_erratum_27456_cpus))
return false;
}

@ -372,6 +372,7 @@ enum {
#endif
};
/* Vendor hyper call function numbers 0-63 */
#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
enum {
@ -382,6 +383,17 @@ enum {
#endif
};
/* Vendor hyper call function numbers 64-127 */
#define KVM_REG_ARM_VENDOR_HYP_BMAP_2 KVM_REG_ARM_FW_FEAT_BMAP_REG(3)
enum {
KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER = 0,
KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS = 1,
#ifdef __KERNEL__
KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT,
#endif
};
/* Device Control API on vm fd */
#define KVM_ARM_VM_SMCCC_CTRL 0
#define KVM_ARM_VM_SMCCC_FILTER 0

@ -14,31 +14,85 @@
#include <asm/kvm_asm.h>
#include <asm/smp_plat.h>
static u64 target_impl_cpu_num;
static struct target_impl_cpu *target_impl_cpus;
bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
{
if (target_impl_cpu_num || !num || !impl_cpus)
return false;
target_impl_cpu_num = num;
target_impl_cpus = impl_cpus;
return true;
}
static inline bool is_midr_in_range(struct midr_range const *range)
{
int i;
if (!target_impl_cpu_num)
return midr_is_cpu_model_range(read_cpuid_id(), range->model,
range->rv_min, range->rv_max);
for (i = 0; i < target_impl_cpu_num; i++) {
if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
range->model,
range->rv_min, range->rv_max))
return true;
}
return false;
}
bool is_midr_in_range_list(struct midr_range const *ranges)
{
while (ranges->model)
if (is_midr_in_range(ranges++))
return true;
return false;
}
EXPORT_SYMBOL_GPL(is_midr_in_range_list);
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
__is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
u32 midr, u32 revidr)
{
const struct arm64_midr_revidr *fix;
u32 midr = read_cpuid_id(), revidr;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
if (!is_midr_in_range(midr, &entry->midr_range))
if (!is_midr_in_range(&entry->midr_range))
return false;
midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
revidr = read_cpuid(REVIDR_EL1);
for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
return false;
return true;
}
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
{
int i;
if (!target_impl_cpu_num) {
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return __is_affected_midr_range(entry, read_cpuid_id(),
read_cpuid(REVIDR_EL1));
}
for (i = 0; i < target_impl_cpu_num; i++) {
if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
target_impl_cpus[i].midr))
return true;
}
return false;
}
static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
int scope)
{
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
return is_midr_in_range_list(entry->midr_range_list);
}
static bool __maybe_unused
@ -186,12 +240,11 @@ static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
int scope)
{
u32 midr = read_cpuid_id();
bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range(midr, &range) && has_dic;
return is_midr_in_range(&range) && has_dic;
}
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI

@ -86,6 +86,7 @@
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
@ -1793,7 +1794,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
char const *str = "kpti command line option";
bool meltdown_safe;
meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
meltdown_safe = is_midr_in_range_list(kpti_safe_list);
/* Defer to CPU feature registers */
if (has_cpuid_feature(entry, scope))
@ -1863,7 +1864,7 @@ static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope)
return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) &&
!(has_cpuid_feature(entry, scope) ||
is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)));
is_midr_in_range_list(nv1_ni_list)));
}
#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
@ -2046,7 +2047,7 @@ static bool cpu_has_broken_dbm(void)
{},
};
return is_midr_in_range_list(read_cpuid_id(), cpus);
return is_midr_in_range_list(cpus);
}
static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
@ -3691,6 +3692,7 @@ unsigned long cpu_get_elf_hwcap3(void)
static void __init setup_boot_cpu_capabilities(void)
{
kvm_arm_target_impl_cpu_init();
/*
* The boot CPU's feature register values have been recorded. Detect
* boot cpucaps and local cpucaps for the boot CPU, then enable and

@ -49,6 +49,7 @@ PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
#ifdef CONFIG_CAVIUM_ERRATUM_27456
PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
PROVIDE(__pi_is_midr_in_range_list = is_midr_in_range_list);
#endif
PROVIDE(__pi__ctype = _ctype);
PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);

@ -172,7 +172,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */
if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
if (is_midr_in_range_list(spectre_v2_safe_list))
return SPECTRE_UNAFFECTED;
return SPECTRE_VULNERABLE;
@ -331,7 +331,7 @@ bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
};
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
return is_midr_in_range_list(spectre_v3a_unsafe_list);
}
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
@ -475,7 +475,7 @@ static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
{ /* sentinel */ },
};
if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
if (is_midr_in_range_list(spectre_v4_safe_list))
return SPECTRE_UNAFFECTED;
/* CPU features are detected first */
@ -878,13 +878,13 @@ u8 spectre_bhb_loop_affected(int scope)
{},
};
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
if (is_midr_in_range_list(spectre_bhb_k32_list))
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
else if (is_midr_in_range_list(spectre_bhb_k24_list))
k = 24;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
else if (is_midr_in_range_list(spectre_bhb_k11_list))
k = 11;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
else if (is_midr_in_range_list(spectre_bhb_k8_list))
k = 8;
max_bhb_k = max(max_bhb_k, k);
@ -926,8 +926,7 @@ static bool is_spectre_bhb_fw_affected(int scope)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
{},
};
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
spectre_bhb_firmware_mitigated_list);
bool cpu_in_list = is_midr_in_range_list(spectre_bhb_firmware_mitigated_list);
if (scope != SCOPE_LOCAL_CPU)
return system_affected;

@ -15,6 +15,8 @@
GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \
GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2 \
GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT - 1, 0)
static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
{
@ -360,6 +362,8 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
break;
case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
val[0] = smccc_feat->vendor_hyp_bmap;
/* Function numbers 2-63 are reserved for pKVM for now */
val[2] = smccc_feat->vendor_hyp_bmap_2;
break;
case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
kvm_ptp_get_time(vcpu, val);
@ -387,6 +391,7 @@ static const u64 kvm_arm_fw_reg_ids[] = {
KVM_REG_ARM_STD_BMAP,
KVM_REG_ARM_STD_HYP_BMAP,
KVM_REG_ARM_VENDOR_HYP_BMAP,
KVM_REG_ARM_VENDOR_HYP_BMAP_2,
};
void kvm_arm_init_hypercalls(struct kvm *kvm)
@ -497,6 +502,9 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM_VENDOR_HYP_BMAP:
val = READ_ONCE(smccc_feat->vendor_hyp_bmap);
break;
case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
val = READ_ONCE(smccc_feat->vendor_hyp_bmap_2);
break;
default:
return -ENOENT;
}
@ -527,6 +535,10 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
fw_reg_bmap = &smccc_feat->vendor_hyp_bmap;
fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
break;
case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
fw_reg_bmap = &smccc_feat->vendor_hyp_bmap_2;
fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2;
break;
default:
return -ENOENT;
}
@ -633,6 +645,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM_STD_BMAP:
case KVM_REG_ARM_STD_HYP_BMAP:
case KVM_REG_ARM_VENDOR_HYP_BMAP:
case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val);
default:
return -ENOENT;

@ -632,7 +632,7 @@ static const struct midr_range broken_seis[] = {
static bool vgic_v3_broken_seis(void)
{
return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_SEIS) &&
is_midr_in_range_list(read_cpuid_id(), broken_seis));
is_midr_in_range_list(broken_seis));
}
/**

@ -842,7 +842,7 @@ static u64 __arch_timer_check_delta(void)
{},
};
if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
if (is_midr_in_range_list(broken_cval_midrs)) {
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
return CLOCKSOURCE_MASK(31);
}

@ -6,8 +6,11 @@
#include <linux/bitmap.h>
#include <linux/cache.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/string.h>
#include <uapi/linux/psci.h>
#include <asm/hypervisor.h>
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
@ -51,3 +54,66 @@ bool kvm_arm_hyp_service_available(u32 func_id)
return test_bit(func_id, __kvm_arm_hyp_services);
}
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
#ifdef CONFIG_ARM64
void __init kvm_arm_target_impl_cpu_init(void)
{
int i;
u32 ver;
u64 max_cpus;
struct arm_smccc_res res;
struct target_impl_cpu *target;
if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
return;
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
0, &res);
if (res.a0 != SMCCC_RET_SUCCESS)
return;
/* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
ver = lower_32_bits(res.a1);
if (PSCI_VERSION_MAJOR(ver) != 1) {
pr_warn("Unsupported target CPU implementation version v%d.%d\n",
PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
return;
}
if (!res.a2) {
pr_warn("No target implementation CPUs specified\n");
return;
}
max_cpus = res.a2;
target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target));
if (!target) {
pr_warn("Not enough memory for struct target_impl_cpu\n");
return;
}
for (i = 0; i < max_cpus; i++) {
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
i, &res);
if (res.a0 != SMCCC_RET_SUCCESS) {
pr_warn("Discovering target implementation CPUs failed\n");
goto mem_free;
}
target[i].midr = res.a1;
target[i].revidr = res.a2;
target[i].aidr = res.a3;
};
if (!cpu_errata_set_target_impl(max_cpus, target)) {
pr_warn("Failed to set target implementation CPUs\n");
goto mem_free;
}
pr_info("Number of target implementation CPUs is %lld\n", max_cpus);
return;
mem_free:
memblock_free(target, sizeof(*target) * max_cpus);
}
#endif

@ -1216,7 +1216,7 @@ static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
* recorded value for 'drvdata->ccitmin' to workaround
* this problem.
*/
if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) {
if (is_midr_in_range_list(etm_wrong_ccitmin_cpus)) {
if (drvdata->ccitmin == 256)
drvdata->ccitmin = 4;
}

@ -179,6 +179,9 @@
#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_62 62
#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_63 63
/* End of pKVM hypercall range */
#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER 64
#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS 65
#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
#define ARM_SMCCC_KVM_NUM_FUNCS 128
@ -225,6 +228,18 @@
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_MMIO_GUARD)
#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER)
#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS)
/* ptp_kvm counter type ID */
#define KVM_PTP_VIRT_COUNTER 0
#define KVM_PTP_PHYS_COUNTER 1

@ -374,6 +374,7 @@ enum {
#endif
};
/* Vendor hyper call function numbers 0-63 */
#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
enum {
@ -384,6 +385,17 @@ enum {
#endif
};
/* Vendor hyper call function numbers 64-127 */
#define KVM_REG_ARM_VENDOR_HYP_BMAP_2 KVM_REG_ARM_FW_FEAT_BMAP_REG(3)
enum {
KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER = 0,
KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS = 1,
#ifdef __KERNEL__
KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT,
#endif
};
/* Device Control API on vm fd */
#define KVM_ARM_VM_SMCCC_CTRL 0
#define KVM_ARM_VM_SMCCC_FILTER 0

@ -332,6 +332,7 @@ static __u64 base_regs[] = {
KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(3), /* KVM_REG_ARM_VENDOR_HYP_BMAP_2 */
ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
ARM64_SYS_REG(3, 3, 14, 0, 2),

@ -21,22 +21,31 @@
#define KVM_REG_ARM_STD_BMAP_BIT_MAX 0
#define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0
#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1
#define KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_MAX 1
#define KVM_REG_ARM_STD_BMAP_RESET_VAL FW_REG_ULIMIT_VAL(KVM_REG_ARM_STD_BMAP_BIT_MAX)
#define KVM_REG_ARM_STD_HYP_BMAP_RESET_VAL FW_REG_ULIMIT_VAL(KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX)
#define KVM_REG_ARM_VENDOR_HYP_BMAP_RESET_VAL FW_REG_ULIMIT_VAL(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX)
#define KVM_REG_ARM_VENDOR_HYP_BMAP_2_RESET_VAL 0
struct kvm_fw_reg_info {
uint64_t reg; /* Register definition */
uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */
uint64_t reset_val; /* Reset value for the register */
};
#define FW_REG_INFO(r) \
{ \
.reg = r, \
.max_feat_bit = r##_BIT_MAX, \
.reset_val = r##_RESET_VAL \
}
static const struct kvm_fw_reg_info fw_reg_info[] = {
FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP_2),
};
enum test_stage {
@ -171,22 +180,39 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
uint64_t set_val;
/* First 'read' should be an upper limit of the features supported */
/* First 'read' should be the reset value for the reg */
val = vcpu_get_reg(vcpu, reg_info->reg);
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
TEST_ASSERT(val == reg_info->reset_val,
"Unexpected reset value for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
reg_info->reg, reg_info->reset_val, val);
/* Test a 'write' by disabling all the features of the register map */
ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
if (reg_info->reset_val)
set_val = 0;
else
set_val = FW_REG_ULIMIT_VAL(reg_info->max_feat_bit);
ret = __vcpu_set_reg(vcpu, reg_info->reg, set_val);
TEST_ASSERT(ret == 0,
"Failed to %s all the features of reg: 0x%lx; ret: %d",
(set_val ? "set" : "clear"), reg_info->reg, errno);
val = vcpu_get_reg(vcpu, reg_info->reg);
TEST_ASSERT(val == set_val,
"Expected all the features to be %s for reg: 0x%lx",
(set_val ? "set" : "cleared"), reg_info->reg);
/*
* If the reg has been set, clear it as test_fw_regs_after_vm_start()
* expects it to be cleared.
*/
if (set_val) {
ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
TEST_ASSERT(ret == 0,
"Failed to clear all the features of reg: 0x%lx; ret: %d",
reg_info->reg, errno);
val = vcpu_get_reg(vcpu, reg_info->reg);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);
}
/*
* Test enabling a feature that's not supported.