diff options
author | Tina Zhang | 2022-06-06 00:11:52 +0800 |
---|---|---|
committer | Joerg Roedel | 2022-07-15 10:09:27 +0200 |
commit | 7e62edd7a33accf02cb7abdc1bbe381d975b9b5a (patch) | |
tree | b7d33cec96f4a6b79b35b9c2876713dc6275842a /drivers/iommu | |
parent | a111daf0c53ae91e71fd2bfe7497862d14132e3e (diff) |
iommu/virtio: Add map/unmap_pages() callbacks implementation
Map/unmap_pags() allows map and unmap multiple pages of the same size
in one call, which can improve performance by reducing the numbers of
vmexits. With map/unmap_pages() implemented, the prior map/unmap()
callbacks are deprecated.
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Link: https://lore.kernel.org/r/20220605161152.3171-1-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/virtio-iommu.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 25be4b822aa0..3c943dbd9fd0 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -788,11 +788,13 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) return 0; } -static int viommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { int ret; u32 flags; + size_t size = pgsize * pgcount; u64 end = iova + size - 1; struct virtio_iommu_req_map map; struct viommu_domain *vdomain = to_viommu_domain(domain); @@ -823,17 +825,21 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); if (ret) viommu_del_mappings(vdomain, iova, end); + else if (mapped) + *mapped = size; return ret; } -static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) +static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { int ret = 0; size_t unmapped; struct virtio_iommu_req_unmap unmap; struct viommu_domain *vdomain = to_viommu_domain(domain); + size_t size = pgsize * pgcount; unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); if (unmapped < size) @@ -1018,8 +1024,8 @@ static struct iommu_ops viommu_ops = { .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = viommu_attach_dev, - .map = viommu_map, - .unmap = viommu_unmap, + .map_pages = viommu_map_pages, + .unmap_pages = viommu_unmap_pages, .iova_to_phys = viommu_iova_to_phys, .iotlb_sync = viommu_iotlb_sync, .free = viommu_domain_free, |