aboutsummaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorMichal Luczaj2022-10-13 21:12:22 +0000
committerPaolo Bonzini2022-11-30 11:03:58 -0500
commitaba3caef58626f09b629085440eec5dd1368669a (patch)
treed0d8deb0f8e1e017906b2e956a5f20e295629b57 /virt
parent8acc35186ed63436bfaf60051c8bb53f344dcbfc (diff)
KVM: Shorten gfn_to_pfn_cache function names
Formalize "gpc" as the acronym and use it in function names. No functional change intended. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Michal Luczaj <mhal@rbox.co> Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/pfncache.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 5f83321bfd2a..8c4db3dcaf6d 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -76,8 +76,8 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
}
}
-bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- gpa_t gpa, unsigned long len)
+bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
+ unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
@@ -96,7 +96,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
return true;
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
+EXPORT_SYMBOL_GPL(kvm_gpc_check);
static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva)
{
@@ -238,8 +238,8 @@ out_error:
return -EFAULT;
}
-int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- gpa_t gpa, unsigned long len)
+int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
+ unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
@@ -333,9 +333,9 @@ out_unlock:
return ret;
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh);
+EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
-void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
+void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
void *old_khva;
kvm_pfn_t old_pfn;
@@ -360,7 +360,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
gpc_unmap_khva(kvm, old_pfn, old_khva);
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
+EXPORT_SYMBOL_GPL(kvm_gpc_unmap);
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc)
{
@@ -396,7 +396,7 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
+ return kvm_gpc_refresh(kvm, gpc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gpc_activate);
@@ -416,7 +416,7 @@ void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock);
- kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
+ kvm_gpc_unmap(kvm, gpc);
}
}
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);