diff options
author | Suravee Suthikulpanit | 2018-02-21 14:19:45 +0700 |
---|---|---|
committer | Joerg Roedel | 2018-03-15 13:37:34 +0100 |
commit | eb5ecd1a40e2098f805fb63cb07817ac48826e40 (patch) | |
tree | 82322557f572777f1620ae059bf56167ef0f7902 /drivers/iommu | |
parent | df42a04b15f19a842393dc98a84cbc52b1f8ed49 (diff) |
iommu/amd: Add support for fast IOTLB flushing
Since AMD IOMMU driver currently flushes all TLB entries
when page size is more than one, use the same interface
for both iommu_ops.flush_iotlb_all() and iommu_ops.iotlb_sync().
Cc: Joerg Roedel <joro@8bytes.org>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 14efeb306a9f..997a947ddc3b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3056,9 +3056,6 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, unmap_size = iommu_unmap_page(domain, iova, page_size); mutex_unlock(&domain->api_lock); - domain_flush_tlb_pde(domain); - domain_flush_complete(domain); - return unmap_size; } @@ -3176,6 +3173,19 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, return dev_data->defer_attach; } +static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct protection_domain *dom = to_pdomain(domain); + + domain_flush_tlb_pde(dom); + domain_flush_complete(dom); +} + +static void amd_iommu_iotlb_range_add(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, @@ -3194,6 +3204,9 @@ const struct iommu_ops amd_iommu_ops = { .apply_resv_region = amd_iommu_apply_resv_region, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_range_add = amd_iommu_iotlb_range_add, + .iotlb_sync = amd_iommu_flush_iotlb_all, }; /***************************************************************************** |