diff options
author | Yosry Ahmed | 2022-08-23 00:46:39 +0000 |
---|---|---|
committer | Sean Christopherson | 2022-08-30 07:44:25 -0700 |
commit | d38ba8ccd9c22b177d50f896a1bdc2f0979b343f (patch) | |
tree | b1cf2c47575da918b8015f1cdc478c2aeb0500fe | |
parent | 43a063cab325ee7cc50349967e536b3cd4e57f03 (diff) |
KVM: arm64/mmu: count KVM s2 mmu usage in secondary pagetable stats
Count the pages used by KVM in arm64 for stage2 mmu in memory stats
under secondary pagetable stats (e.g. "SecPageTables" in /proc/meminfo)
to give better visibility into the memory consumption of KVM mmu in a
similar way to how normal user page tables are accounted.
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220823004639.2387269-5-yosryahmed@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r-- | arch/arm64/kvm/mmu.c | 36 |
1 files changed, 32 insertions, 4 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index c9a13e487187..34c5feed9dc1 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -92,9 +92,13 @@ static bool kvm_is_device_pfn(unsigned long pfn) static void *stage2_memcache_zalloc_page(void *arg) { struct kvm_mmu_memory_cache *mc = arg; + void *virt; /* Allocated with __GFP_ZERO, so no need to zero */ - return kvm_mmu_memory_cache_alloc(mc); + virt = kvm_mmu_memory_cache_alloc(mc); + if (virt) + kvm_account_pgtable_pages(virt, 1); + return virt; } static void *kvm_host_zalloc_pages_exact(size_t size) @@ -102,6 +106,21 @@ static void *kvm_host_zalloc_pages_exact(size_t size) return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); } +static void *kvm_s2_zalloc_pages_exact(size_t size) +{ + void *virt = kvm_host_zalloc_pages_exact(size); + + if (virt) + kvm_account_pgtable_pages(virt, (size >> PAGE_SHIFT)); + return virt; +} + +static void kvm_s2_free_pages_exact(void *virt, size_t size) +{ + kvm_account_pgtable_pages(virt, -(size >> PAGE_SHIFT)); + free_pages_exact(virt, size); +} + static void kvm_host_get_page(void *addr) { get_page(virt_to_page(addr)); @@ -112,6 +131,15 @@ static void kvm_host_put_page(void *addr) put_page(virt_to_page(addr)); } +static void kvm_s2_put_page(void *addr) +{ + struct page *p = virt_to_page(addr); + /* Dropping last refcount, the page will be freed */ + if (page_count(p) == 1) + kvm_account_pgtable_pages(addr, -1); + put_page(p); +} + static int kvm_host_page_count(void *addr) { return page_count(virt_to_page(addr)); @@ -625,10 +653,10 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr) static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { .zalloc_page = stage2_memcache_zalloc_page, - .zalloc_pages_exact = kvm_host_zalloc_pages_exact, - .free_pages_exact = free_pages_exact, + .zalloc_pages_exact = kvm_s2_zalloc_pages_exact, + .free_pages_exact = kvm_s2_free_pages_exact, .get_page = kvm_host_get_page, - .put_page = kvm_host_put_page, + .put_page = kvm_s2_put_page, .page_count = kvm_host_page_count, .phys_to_virt = kvm_host_va, .virt_to_phys = kvm_host_pa, |