aboutsummaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorVitaly Kuznetsov2021-09-03 09:51:40 +0200
committerPaolo Bonzini2021-09-30 04:27:04 -0400
commitbaff59ccdc657d290be51b95b38ebe5de40036b4 (patch)
tree377c18fe9ef9d8ceaa8aaa1293a48476c8adf353 /virt
parent381cecc5d7b777ada7cdf12f5b0bf4caf43bf7aa (diff)
KVM: Pre-allocate cpumasks for kvm_make_all_cpus_request_except()
Allocating cpumask dynamically in zalloc_cpumask_var() is not ideal. Allocation is somewhat slow and can (in theory and when CPUMASK_OFFSTACK) fail. kvm_make_all_cpus_request_except() already disables preemption so we can use pre-allocated per-cpu cpumasks instead. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20210903075141.403071-8-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bcadbc0a70f2..9cb0fd1723e6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
+static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
+
__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end)
{
@@ -313,14 +315,15 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except)
{
struct kvm_vcpu *vcpu;
- cpumask_var_t cpus;
+ struct cpumask *cpus;
bool called;
int i, me;
- zalloc_cpumask_var(&cpus, GFP_ATOMIC);
-
me = get_cpu();
+ cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
+ cpumask_clear(cpus);
+
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu == except)
continue;
@@ -330,7 +333,6 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
put_cpu();
- free_cpumask_var(cpus);
return called;
}
@@ -5560,9 +5562,17 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_free_3;
}
+ for_each_possible_cpu(cpu) {
+ if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu))) {
+ r = -ENOMEM;
+ goto out_free_4;
+ }
+ }
+
r = kvm_async_pf_init();
if (r)
- goto out_free;
+ goto out_free_5;
kvm_chardev_ops.owner = module;
kvm_vm_fops.owner = module;
@@ -5588,7 +5598,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
out_unreg:
kvm_async_pf_deinit();
-out_free:
+out_free_5:
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
+out_free_4:
kmem_cache_destroy(kvm_vcpu_cache);
out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
@@ -5608,8 +5621,12 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
+ int cpu;
+
debugfs_remove_recursive(kvm_debugfs_dir);
misc_deregister(&kvm_dev);
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
kvm_async_pf_deinit();
unregister_syscore_ops(&kvm_syscore_ops);