aboutsummaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorSean Christopherson2021-11-04 00:25:02 +0000
committerPaolo Bonzini2021-11-18 02:15:19 -0500
commitbda44d844758c70c8dc1478e6fc9c25efa90c5a7 (patch)
tree07ad02921f801906d9ff13c5d9e53a91d88231ff /virt
parent574c3c55e969096cea770eda3375ff35ccf91702 (diff)
KVM: Ensure local memslot copies operate on up-to-date arch-specific data
When modifying memslots, snapshot the "old" memslot and copy it to the "new" memslot's arch data after (re)acquiring slots_arch_lock. x86 can change a memslot's arch data while memslot updates are in-progress so long as it holds slots_arch_lock, thus snapshotting a memslot without holding the lock can result in the consumption of stale data. Fixes: b10a038e84d1 ("KVM: mmu: Add slots_arch_lock for memslot arch fields") Cc: stable@vger.kernel.org Cc: Ben Gardon <bgardon@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20211104002531.1176691-2-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c47
1 files changed, 31 insertions, 16 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9646bb9112c1..2104fc29cdd2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
static int kvm_set_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
struct kvm_memory_slot *new, int as_id,
enum kvm_mr_change change)
{
- struct kvm_memory_slot *slot;
+ struct kvm_memory_slot *slot, old;
struct kvm_memslots *slots;
int r;
@@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm,
* Note, the INVALID flag needs to be in the appropriate entry
* in the freshly allocated memslots, not in @old or @new.
*/
- slot = id_to_memslot(slots, old->id);
+ slot = id_to_memslot(slots, new->id);
slot->flags |= KVM_MEMSLOT_INVALID;
/*
@@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm,
kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
}
+ /*
+ * Make a full copy of the old memslot, the pointer will become stale
+ * when the memslots are re-sorted by update_memslots(), and the old
+ * memslot needs to be referenced after calling update_memslots(), e.g.
+ * to free its resources and for arch specific behavior. This needs to
+ * happen *after* (re)acquiring slots_arch_lock.
+ */
+ slot = id_to_memslot(slots, new->id);
+ if (slot) {
+ old = *slot;
+ } else {
+ WARN_ON_ONCE(change != KVM_MR_CREATE);
+ memset(&old, 0, sizeof(old));
+ old.id = new->id;
+ old.as_id = as_id;
+ }
+
+ /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
+ memcpy(&new->arch, &old.arch, sizeof(old.arch));
+
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
if (r)
goto out_slots;
@@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm,
update_memslots(slots, new, change);
slots = install_new_memslots(kvm, as_id, slots);
- kvm_arch_commit_memory_region(kvm, mem, old, new, change);
+ kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
+
+ /* Free the old memslot's metadata. Note, this is the full copy!!! */
+ if (change == KVM_MR_DELETE)
+ kvm_free_memslot(kvm, &old);
kvfree(slots);
return 0;
out_slots:
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
- slot = id_to_memslot(slots, old->id);
+ slot = id_to_memslot(slots, new->id);
slot->flags &= ~KVM_MEMSLOT_INVALID;
slots = install_new_memslots(kvm, as_id, slots);
} else {
@@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm,
struct kvm_memory_slot *old, int as_id)
{
struct kvm_memory_slot new;
- int r;
if (!old->npages)
return -EINVAL;
@@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm,
*/
new.as_id = as_id;
- r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
- if (r)
- return r;
-
- kvm_free_memslot(kvm, old);
- return 0;
+ return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
}
/*
@@ -1718,7 +1735,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!old.npages) {
change = KVM_MR_CREATE;
new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
} else { /* Modify an existing slot. */
if ((new.userspace_addr != old.userspace_addr) ||
(new.npages != old.npages) ||
@@ -1732,9 +1748,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
else /* Nothing to change. */
return 0;
- /* Copy dirty_bitmap and arch from the current memslot. */
+ /* Copy dirty_bitmap from the current memslot. */
new.dirty_bitmap = old.dirty_bitmap;
- memcpy(&new.arch, &old.arch, sizeof(new.arch));
}
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1760,7 +1775,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
bitmap_set(new.dirty_bitmap, 0, new.npages);
}
- r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
+ r = kvm_set_memslot(kvm, mem, &new, as_id, change);
if (r)
goto out_bitmap;