diff options
author | Bharata B Rao | 2019-11-25 08:36:26 +0530 |
---|---|---|
committer | Paul Mackerras | 2019-11-28 16:30:02 +1100 |
commit | ca9f4942670c37407bb109090eaf776ce2ccc54c (patch) | |
tree | 48bc8e1e00eb9486288656fe1121e3421fa44f33 /arch/powerpc/include/asm | |
parent | 33cf170715e87efd0608af6f2c056cafe0e7fc47 (diff) |
KVM: PPC: Book3S HV: Support for running secure guests
A pseries guest can be run as secure guest on Ultravisor-enabled
POWER platforms. On such platforms, this driver will be used to manage
the movement of guest pages between the normal memory managed by
hypervisor (HV) and secure memory managed by Ultravisor (UV).
HV is informed about the guest's transition to secure mode via hcalls:
H_SVM_INIT_START: Initiate securing a VM
H_SVM_INIT_DONE: Conclude securing a VM
As part of H_SVM_INIT_START, register all existing memslots with
the UV. H_SVM_INIT_DONE call by UV informs HV that transition of
the guest to secure mode is complete.
These two states (transition to secure mode STARTED and transition
to secure mode COMPLETED) are recorded in kvm->arch.secure_guest.
Setting these states will cause the assembly code that enters the
guest to call the UV_RETURN ucall instead of trying to enter the
guest directly.
Migration of pages betwen normal and secure memory of secure
guest is implemented in H_SVM_PAGE_IN and H_SVM_PAGE_OUT hcalls.
H_SVM_PAGE_IN: Move the content of a normal page to secure page
H_SVM_PAGE_OUT: Move the content of a secure page to normal page
Private ZONE_DEVICE memory equal to the amount of secure memory
available in the platform for running secure guests is created.
Whenever a page belonging to the guest becomes secure, a page from
this private device memory is used to represent and track that secure
page on the HV side. The movement of pages between normal and secure
memory is done via migrate_vma_pages() using UV_PAGE_IN and
UV_PAGE_OUT ucalls.
In order to prevent the device private pages (that correspond to pages
of secure guest) from participating in KSM merging, H_SVM_PAGE_IN
calls ksm_madvise() under read version of mmap_sem. However
ksm_madvise() needs to be under write lock. Hence we call
kvmppc_svm_page_in with mmap_sem held for writing, and it then
downgrades to a read lock after calling ksm_madvise.
[paulus@ozlabs.org - roll in patch "KVM: PPC: Book3S HV: Take write
mmap_sem when calling ksm_madvise"]
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/hvcall.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_uvmem.h | 62 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/ultravisor-api.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/ultravisor.h | 21 |
5 files changed, 98 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 11112023e327..4150732c81a0 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -342,6 +342,12 @@ #define H_TLB_INVALIDATE 0xF808 #define H_COPY_TOFROM_GUEST 0xF80C +/* Platform-specific hcalls used by the Ultravisor */ +#define H_SVM_PAGE_IN 0xEF00 +#define H_SVM_PAGE_OUT 0xEF04 +#define H_SVM_INIT_START 0xEF08 +#define H_SVM_INIT_DONE 0xEF0C + /* Values for 2nd argument to H_SET_MODE */ #define H_SET_MODE_RESOURCE_SET_CIABR 1 #define H_SET_MODE_RESOURCE_SET_DAWR 2 diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h new file mode 100644 index 000000000000..95f389c2937b --- /dev/null +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KVM_BOOK3S_UVMEM_H__ +#define __ASM_KVM_BOOK3S_UVMEM_H__ + +#ifdef CONFIG_PPC_UV +int kvmppc_uvmem_init(void); +void kvmppc_uvmem_free(void); +int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot); +void kvmppc_uvmem_slot_free(struct kvm *kvm, + const struct kvm_memory_slot *slot); +unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, + unsigned long gra, + unsigned long flags, + unsigned long page_shift); +unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, + unsigned long gra, + unsigned long flags, + unsigned long page_shift); +unsigned long kvmppc_h_svm_init_start(struct kvm *kvm); +unsigned long kvmppc_h_svm_init_done(struct kvm *kvm); +#else +static inline int kvmppc_uvmem_init(void) +{ + return 0; +} + +static inline void kvmppc_uvmem_free(void) { } + +static inline int +kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) +{ + return 0; +} + +static inline void +kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { } + +static inline unsigned long +kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra, + unsigned long flags, unsigned long page_shift) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long +kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra, + unsigned long flags, unsigned long page_shift) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) +{ + return H_UNSUPPORTED; +} +#endif /* CONFIG_PPC_UV */ +#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 4273e799203d..0a398f2321c2 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -275,6 +275,10 @@ struct kvm_hpt_info { struct kvm_resize_hpt; +/* Flag values for kvm_arch.secure_guest */ +#define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */ +#define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */ + struct kvm_arch { unsigned int lpid; unsigned int smt_mode; /* # vcpus per virtual core */ @@ -330,6 +334,8 @@ struct kvm_arch { #endif struct kvmppc_ops *kvm_ops; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + struct mutex uvmem_lock; + struct list_head uvmem_pfns; struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */ u64 l1_ptcr; int max_nested_lpid; diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h index 4fcda1d5793d..2483f15bd71a 100644 --- a/arch/powerpc/include/asm/ultravisor-api.h +++ b/arch/powerpc/include/asm/ultravisor-api.h @@ -26,6 +26,9 @@ #define UV_WRITE_PATE 0xF104 #define UV_RETURN 0xF11C #define UV_ESM 0xF110 +#define UV_REGISTER_MEM_SLOT 0xF120 +#define UV_PAGE_IN 0xF128 +#define UV_PAGE_OUT 0xF12C #define UV_SHARE_PAGE 0xF130 #define UV_UNSHARE_PAGE 0xF134 #define UV_UNSHARE_ALL_PAGES 0xF140 diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h index b1bc2e043ed4..79bb005e8ee9 100644 --- a/arch/powerpc/include/asm/ultravisor.h +++ b/arch/powerpc/include/asm/ultravisor.h @@ -46,4 +46,25 @@ static inline int uv_unshare_all_pages(void) return ucall_norets(UV_UNSHARE_ALL_PAGES); } +static inline int uv_page_in(u64 lpid, u64 src_ra, u64 dst_gpa, u64 flags, + u64 page_shift) +{ + return ucall_norets(UV_PAGE_IN, lpid, src_ra, dst_gpa, flags, + page_shift); +} + +static inline int uv_page_out(u64 lpid, u64 dst_ra, u64 src_gpa, u64 flags, + u64 page_shift) +{ + return ucall_norets(UV_PAGE_OUT, lpid, dst_ra, src_gpa, flags, + page_shift); +} + +static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size, + u64 flags, u64 slotid) +{ + return ucall_norets(UV_REGISTER_MEM_SLOT, lpid, start_gpa, + size, flags, slotid); +} + #endif /* _ASM_POWERPC_ULTRAVISOR_H */ |