aboutsummaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini2024-04-05 07:58:12 -0400
committerPaolo Bonzini2024-04-11 13:18:27 -0400
commitf3b65bbaed7c43d10989380d4b95e2a3e9fe5a6b (patch)
tree130db7e7016f8406b45898024a04d7ab892e0553 /virt
parent9bc60f733839ab6fcdde0d0b15cbb486123e6402 (diff)
KVM: delete .change_pte MMU notifier callback
The .change_pte() MMU notifier callback was intended as an optimization. The original point of it was that KSM could tell KVM to flip its secondary PTE to a new location without having to first zap it. At the time there was also an .invalidate_page() callback; both of them were *not* bracketed by calls to mmu_notifier_invalidate_range_{start,end}(), and .invalidate_page() also doubled as a fallback implementation of .change_pte(). Later on, however, both callbacks were changed to occur within an invalidate_range_start/end() block. In the case of .change_pte(), commit 6bdb913f0a70 ("mm: wrap calls to set_pte_at_notify with invalidate_range_start and invalidate_range_end", 2012-10-09) did so to remove the fallback from .invalidate_page() to .change_pte() and allow sleepable .invalidate_page() hooks. This however made KVM's usage of the .change_pte() callback completely moot, because KVM unmaps the sPTEs during .invalidate_range_start() and therefore .change_pte() has no hope of finding a sPTE to change. Drop the generic KVM code that dispatches to kvm_set_spte_gfn(), as well as all the architecture specific implementations. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Acked-by: Anup Patel <anup@brainfault.org> Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Reviewed-by: Bibo Mao <maobibo@loongson.cn> Message-ID: <20240405115815.3226315-2-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c43
1 files changed, 0 insertions, 43 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index fb49c2a60200..fb0f922d3109 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -705,48 +705,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
return __kvm_handle_hva_range(kvm, &range).ret;
}
-static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- /*
- * Skipping invalid memslots is correct if and only change_pte() is
- * surrounded by invalidate_range_{start,end}(), which is currently
- * guaranteed by the primary MMU. If that ever changes, KVM needs to
- * unmap the memslot instead of skipping the memslot to ensure that KVM
- * doesn't hold references to the old PFN.
- */
- WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
-
- if (range->slot->flags & KVM_MEMSLOT_INVALID)
- return false;
-
- return kvm_set_spte_gfn(kvm, range);
-}
-
-static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address,
- pte_t pte)
-{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const union kvm_mmu_notifier_arg arg = { .pte = pte };
-
- trace_kvm_set_spte_hva(address);
-
- /*
- * .change_pte() must be surrounded by .invalidate_range_{start,end}().
- * If mmu_invalidate_in_progress is zero, then no in-progress
- * invalidations, including this one, found a relevant memslot at
- * start(); rechecking memslots here is unnecessary. Note, a false
- * positive (count elevated by a different invalidation) is sub-optimal
- * but functionally ok.
- */
- WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
- if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
- return;
-
- kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
-}
-
void kvm_mmu_invalidate_begin(struct kvm *kvm)
{
lockdep_assert_held_write(&kvm->mmu_lock);
@@ -964,7 +922,6 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
.clear_young = kvm_mmu_notifier_clear_young,
.test_young = kvm_mmu_notifier_test_young,
- .change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};