diff options
author | Sean Christopherson | 2020-01-08 12:24:40 -0800 |
---|---|---|
committer | Paolo Bonzini | 2020-01-27 20:00:04 +0100 |
commit | 17eff01904f5f2fa12f4a56666637ce69ce5c645 (patch) | |
tree | 582891aa169cb3287f5518aaf9b3395ee6910033 /arch/x86/kvm/mmu | |
parent | 13c72c060f1ba6f4eddd7b1c4f52a8aded43d6d9 (diff) |
KVM: x86/mmu: Refactor THP adjust to prep for changing query
Refactor transparent_hugepage_adjust() in preparation for walking the
host page tables to identify hugepage mappings, initially for THP pages,
and eventualy for HugeTLB and DAX-backed pages as well. The latter
cases support 1gb pages, i.e. the adjustment logic needs access to the
max allowed level.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 44 | ||||
-rw-r--r-- | arch/x86/kvm/mmu/paging_tmpl.h | 3 |
2 files changed, 23 insertions, 24 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e4458c9aec8c..64c28a39d8ef 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3329,33 +3329,34 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) __direct_pte_prefetch(vcpu, sp, sptep); } -static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, - gfn_t gfn, kvm_pfn_t *pfnp, +static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, + int max_level, kvm_pfn_t *pfnp, int *levelp) { kvm_pfn_t pfn = *pfnp; int level = *levelp; + kvm_pfn_t mask; + + if (max_level == PT_PAGE_TABLE_LEVEL || level > PT_PAGE_TABLE_LEVEL) + return; + + if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn) || + kvm_is_zone_device_pfn(pfn)) + return; + + if (!kvm_is_transparent_hugepage(pfn)) + return; + + level = PT_DIRECTORY_LEVEL; /* - * Check if it's a transparent hugepage. If this would be an - * hugetlbfs page, level wouldn't be set to - * PT_PAGE_TABLE_LEVEL and there would be no adjustment done - * here. + * mmu_notifier_retry() was successful and mmu_lock is held, so + * the pmd can't be split from under us. */ - if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && - !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && - kvm_is_transparent_hugepage(pfn)) { - unsigned long mask; - - /* - * mmu_notifier_retry() was successful and mmu_lock is held, so - * the pmd can't be split from under us. - */ - *levelp = level = PT_DIRECTORY_LEVEL; - mask = KVM_PAGES_PER_HPAGE(level) - 1; - VM_BUG_ON((gfn & mask) != (pfn & mask)); - *pfnp = pfn & ~mask; - } + *levelp = level; + mask = KVM_PAGES_PER_HPAGE(level) - 1; + VM_BUG_ON((gfn & mask) != (pfn & mask)); + *pfnp = pfn & ~mask; } static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, @@ -3395,8 +3396,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) return RET_PF_RETRY; - if (likely(max_level > PT_PAGE_TABLE_LEVEL)) - transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); + transparent_hugepage_adjust(vcpu, gfn, max_level, &pfn, &level); trace_kvm_mmu_spte_requested(gpa, level, pfn); for_each_shadow_entry(vcpu, gpa, it) { diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index eaa00c4daeb1..1ad87f0e19d0 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -688,8 +688,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT); base_gfn = gfn; - if (max_level > PT_PAGE_TABLE_LEVEL) - transparent_hugepage_adjust(vcpu, gw->gfn, &pfn, &hlevel); + transparent_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn, &hlevel); trace_kvm_mmu_spte_requested(addr, gw->level, pfn); |