aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--virt/kvm/kvm_main.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 490c8cb8cc8d..e95e7a9e4d53 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -235,9 +235,13 @@ static void ack_flush(void *_completed)
{
}
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
{
- if (unlikely(!cpus))
+ const struct cpumask *cpus;
+
+ if (likely(cpumask_available(tmp)))
+ cpus = tmp;
+ else
cpus = cpu_online_mask;
if (cpumask_empty(cpus))
@@ -268,6 +272,14 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
continue;
/*
+ * tmp can be "unavailable" if cpumasks are allocated off stack
+ * as allocation of the mask is deliberately not fatal and is
+ * handled by falling back to kicking all online CPUs.
+ */
+ if (!cpumask_available(tmp))
+ continue;
+
+ /*
* Note, the vCPU could get migrated to a different pCPU at any
* point after kvm_request_needs_ipi(), which could result in
* sending an IPI to the previous pCPU. But, that's ok because
@@ -278,7 +290,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
* were reading SPTEs _before_ any changes were finalized. See
* kvm_vcpu_kick() for more details on handling requests.
*/
- if (tmp != NULL && kvm_request_needs_ipi(vcpu, req)) {
+ if (kvm_request_needs_ipi(vcpu, req)) {
cpu = READ_ONCE(vcpu->cpu);
if (cpu != -1 && cpu != me)
__cpumask_set_cpu(cpu, tmp);