mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00
KVM: arm64: Fix typos
Fix typos, most reported by "codespell arch/arm64". Only touches comments, no code changes. Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Cc: James Morse <james.morse@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Zenghui Yu <yuzenghui@huawei.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: linux-arm-kernel@lists.infradead.org Cc: kvmarm@lists.linux.dev Reviewed-by: Zenghui Yu <yuzenghui@huawei.com> Reviewed-by: Randy Dunlap <rdunlap@infradead.org> Link: https://lore.kernel.org/r/20240103231605.1801364-6-helgaas@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
284851ee5c
commit
75841d89f3
@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
/*
|
||||
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
|
||||
* static inline can allow the compiler to out-of-line this. KVM always wants
|
||||
* the macro version as its always inlined.
|
||||
* the macro version as it's always inlined.
|
||||
*/
|
||||
#define __kvm_swab32(x) ___constant_swab32(x)
|
||||
|
||||
|
@ -745,7 +745,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
/*
|
||||
* The virtual offset behaviour is "interresting", as it
|
||||
* The virtual offset behaviour is "interesting", as it
|
||||
* always applies when HCR_EL2.E2H==0, but only when
|
||||
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
|
||||
* track E2H when putting the HV timer in "direct" mode.
|
||||
|
@ -117,7 +117,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called just before entering the guest once we are no longer preemptable
|
||||
* Called just before entering the guest once we are no longer preemptible
|
||||
* and interrupts are disabled. If we have managed to run anything using
|
||||
* FP while we were preemptible (such as off the back of an interrupt),
|
||||
* then neither the host nor the guest own the FP hardware (and it was the
|
||||
|
@ -110,7 +110,7 @@ SYM_FUNC_END(__host_enter)
|
||||
* u64 elr, u64 par);
|
||||
*/
|
||||
SYM_FUNC_START(__hyp_do_panic)
|
||||
/* Prepare and exit to the host's panic funciton. */
|
||||
/* Prepare and exit to the host's panic function. */
|
||||
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
msr spsr_el2, lr
|
||||
|
@ -155,7 +155,7 @@ int hyp_back_vmemmap(phys_addr_t back)
|
||||
start = hyp_memory[i].base;
|
||||
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
|
||||
/*
|
||||
* The begining of the hyp_vmemmap region for the current
|
||||
* The beginning of the hyp_vmemmap region for the current
|
||||
* memblock may already be backed by the page backing the end
|
||||
* the previous region, so avoid mapping it twice.
|
||||
*/
|
||||
@ -408,7 +408,7 @@ static void *admit_host_page(void *arg)
|
||||
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
|
||||
}
|
||||
|
||||
/* Refill our local memcache by poping pages from the one provided by the host. */
|
||||
/* Refill our local memcache by popping pages from the one provided by the host. */
|
||||
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
|
||||
struct kvm_hyp_memcache *host_mc)
|
||||
{
|
||||
|
@ -134,7 +134,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
|
||||
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
|
||||
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
|
||||
} else {
|
||||
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
|
||||
/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
|
||||
fsr = DFSR_FSC_EXTABT_nLPAE;
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ int vgic_init(struct kvm *kvm)
|
||||
vgic_lpi_translation_cache_init(kvm);
|
||||
|
||||
/*
|
||||
* If we have GICv4.1 enabled, unconditionnaly request enable the
|
||||
* If we have GICv4.1 enabled, unconditionally request enable the
|
||||
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
|
||||
* enable it if we present a virtual ITS to the guest.
|
||||
*/
|
||||
|
@ -1342,8 +1342,8 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
|
||||
}
|
||||
|
||||
/**
|
||||
* vgic_its_invall - invalidate all LPIs targetting a given vcpu
|
||||
* @vcpu: the vcpu for which the RD is targetted by an invalidation
|
||||
* vgic_its_invall - invalidate all LPIs targeting a given vcpu
|
||||
* @vcpu: the vcpu for which the RD is targeted by an invalidation
|
||||
*
|
||||
* Contrary to the INVALL command, this targets a RD instead of a
|
||||
* collection, and we don't need to hold the its_lock, since no ITS is
|
||||
|
Loading…
x
Reference in New Issue
Block a user