diff options
author | Sean Christopherson | 2021-04-01 17:56:54 -0700 |
---|---|---|
committer | Paolo Bonzini | 2021-04-17 08:31:08 -0400 |
commit | b4c5936c47f86295cc76672e8dbeeca8b2379ba6 (patch) | |
tree | 0d7af76c669115a334e088bb107275976fd3357a /virt/kvm | |
parent | b1c5356e873cc4fcbb8f58965e0cd910f3ee37a9 (diff) |
KVM: Kill off the old hva-based MMU notifier callbacks
Yank out the hva-based MMU notifier APIs now that all architectures that
use the notifiers have moved to the gfn-based APIs.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210402005658.3024832-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/kvm_main.c | 85 |
1 files changed, 0 insertions, 85 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 20836cf342ba..d4c249719a56 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -451,8 +451,6 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, srcu_read_unlock(&kvm->srcu, idx); } -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); struct kvm_hva_range { @@ -564,8 +562,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn return ret; } -#endif /* KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS */ - static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, @@ -573,9 +569,6 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, { struct kvm *kvm = mmu_notifier_to_kvm(mn); -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - int idx; -#endif trace_kvm_set_spte_hva(address); /* @@ -585,26 +578,13 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, */ WARN_ON_ONCE(!kvm->mmu_notifier_count); -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); -#else - idx = srcu_read_lock(&kvm->srcu); - - KVM_MMU_LOCK(kvm); - - if (kvm_set_spte_hva(kvm, address, pte)) - kvm_flush_remote_tlbs(kvm); - - KVM_MMU_UNLOCK(kvm); - srcu_read_unlock(&kvm->srcu, idx); -#endif } static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range) { struct kvm *kvm = mmu_notifier_to_kvm(mn); -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS const struct kvm_hva_range hva_range = { .start = range->start, .end = range->end, @@ -613,16 +593,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, .flush_on_ret = true, .may_block = mmu_notifier_range_blockable(range), }; -#else - int need_tlb_flush = 0, idx; -#endif trace_kvm_unmap_hva_range(range->start, range->end); -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - idx = srcu_read_lock(&kvm->srcu); -#endif - KVM_MMU_LOCK(kvm); /* * The count increase must become visible at unlock time as no @@ -649,20 +622,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, max(kvm->mmu_notifier_range_end, range->end); } -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS __kvm_handle_hva_range(kvm, &hva_range); -#else - need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, - range->flags); - /* we've to flush the tlb before the pages can be freed */ - if (need_tlb_flush || kvm->tlbs_dirty) - kvm_flush_remote_tlbs(kvm); -#endif KVM_MMU_UNLOCK(kvm); -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - srcu_read_unlock(&kvm->srcu, idx); -#endif return 0; } @@ -696,27 +658,9 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, unsigned long start, unsigned long end) { -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int young, idx; -#endif trace_kvm_age_hva(start, end); -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); -#else - idx = srcu_read_lock(&kvm->srcu); - KVM_MMU_LOCK(kvm); - - young = kvm_age_hva(kvm, start, end); - if (young) - kvm_flush_remote_tlbs(kvm); - - KVM_MMU_UNLOCK(kvm); - srcu_read_unlock(&kvm->srcu, idx); - - return young; -#endif } static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, @@ -724,11 +668,6 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, unsigned long start, unsigned long end) { -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int young, idx; -#endif - trace_kvm_age_hva(start, end); /* @@ -744,41 +683,17 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, * cadence. If we find this inaccurate, we might come up with a * more sophisticated heuristic later. */ -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); -#else - idx = srcu_read_lock(&kvm->srcu); - KVM_MMU_LOCK(kvm); - young = kvm_age_hva(kvm, start, end); - KVM_MMU_UNLOCK(kvm); - srcu_read_unlock(&kvm->srcu, idx); - - return young; -#endif } static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int young, idx; -#endif trace_kvm_test_age_hva(address); -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS return kvm_handle_hva_range_no_flush(mn, address, address + 1, kvm_test_age_gfn); -#else - idx = srcu_read_lock(&kvm->srcu); - KVM_MMU_LOCK(kvm); - young = kvm_test_age_hva(kvm, address); - KVM_MMU_UNLOCK(kvm); - srcu_read_unlock(&kvm->srcu, idx); - - return young; -#endif } static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |