aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLiu Yi L2020-07-24 09:49:17 +0800
committerJoerg Roedel2020-07-24 10:51:21 +0200
commit288d08e78008828416ffaa85ef274b4e29ef3dae (patch)
treed6d0c95bbc0c5a64549268ffb2494ce28e5d14ba /drivers/iommu
parente7e69461a83264dbce2b4ff480f858f3f1454db7 (diff)
iommu/vt-d: Handle non-page aligned address
Address information for device TLB invalidation comes from userspace when device is directly assigned to a guest with vIOMMU support. VT-d requires page aligned address. This patch checks and enforce address to be page aligned, otherwise reserved bits can be set in the invalidation descriptor. Unrecoverable fault will be reported due to non-zero value in the reserved bits. Fixes: 61a06a16e36d8 ("iommu/vt-d: Support flushing more translation cache types") Signed-off-by: Liu Yi L <yi.l.liu@intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Link: https://lore.kernel.org/r/20200724014925.15523-5-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel/dmar.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9be08b9400ee..f6cbe3f95c8d 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1456,9 +1456,26 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
- desc.qw1 |= addr & ~mask;
- if (size_order)
+ if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+ pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
+ addr, size_order);
+
+ /* Take page address */
+ desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
+
+ if (size_order) {
+ /*
+ * Existing 0s in address below size_order may be the least
+ * significant bit, we must set them to 1s to avoid having
+ * smaller size than desired.
+ */
+ desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
+ VTD_PAGE_SHIFT);
+ /* Clear size_order bit to indicate size */
+ desc.qw1 &= ~mask;
+ /* Set the S bit to indicate flushing more than 1 page */
desc.qw1 |= QI_DEV_EIOTLB_SIZE;
+ }
qi_submit_sync(iommu, &desc, 1, 0);
}