aboutsummaryrefslogtreecommitdiff
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlex Williamson2020-08-17 11:09:13 -0600
committerAlex Williamson2020-08-17 11:09:13 -0600
commitaae7a75a821a793ed6b8ad502a5890fb8e8f172d (patch)
treed589f12398a0f08eefe2a8fedcdd7b3fda5c28f8 /drivers/vfio
parentbc93b9ae0151ae5ad5b8504cdc598428ea99570b (diff)
vfio/type1: Add proper error unwind for vfio_iommu_replay()
The vfio_iommu_replay() function does not currently unwind on error, yet it does pin pages, perform IOMMU mapping, and modify the vfio_dma structure to indicate IOMMU mapping. The IOMMU mappings are torn down when the domain is destroyed, but the other actions go on to cause trouble later. For example, the iommu->domain_list can be empty if we only have a non-IOMMU backed mdev attached. We don't currently check if the list is empty before getting the first entry in the list, which leads to a bogus domain pointer. If a vfio_dma entry is erroneously marked as iommu_mapped, we'll attempt to use that bogus pointer to retrieve the existing physical page addresses. This is the scenario that uncovered this issue, attempting to hot-add a vfio-pci device to a container with an existing mdev device and DMA mappings, one of which could not be pinned, causing a failure adding the new group to the existing container and setting the conditions for a subsequent attempt to explode. To resolve this, we can first check if the domain_list is empty so that we can reject replay of a bogus domain, should we ever encounter this inconsistent state again in the future. The real fix though is to add the necessary unwind support, which means cleaning up the current pinning if an IOMMU mapping fails, then walking back through the r-b tree of DMA entries, reading from the IOMMU which ranges are mapped, and unmapping and unpinning those ranges. To be able to do this, we also defer marking the DMA entry as IOMMU mapped until all entries are processed, in order to allow the unwind to know the disposition of each entry. Fixes: a54eb55045ae ("vfio iommu type1: Add support for mediated devices") Reported-by: Zhiyi Guo <zhguo@redhat.com> Tested-by: Zhiyi Guo <zhguo@redhat.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c71
1 files changed, 66 insertions, 5 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 6990fc711a80..c992973cc2d5 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1424,13 +1424,16 @@ static int vfio_bus_type(struct device *dev, void *data)
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
- struct vfio_domain *d;
+ struct vfio_domain *d = NULL;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret;
/* Arbitrarily pick the first domain in the list for lookups */
- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
+ if (!list_empty(&iommu->domain_list))
+ d = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
+
n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) {
@@ -1448,6 +1451,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
phys_addr_t p;
dma_addr_t i;
+ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
+ ret = -EINVAL;
+ goto unwind;
+ }
+
phys = iommu_iova_to_phys(d->domain, iova);
if (WARN_ON(!phys)) {
@@ -1477,7 +1485,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
- return ret;
+ goto unwind;
}
phys = pfn << PAGE_SHIFT;
@@ -1486,14 +1494,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
ret = iommu_map(domain->domain, iova, phys,
size, dma->prot | domain->prot);
- if (ret)
- return ret;
+ if (ret) {
+ if (!dma->iommu_mapped)
+ vfio_unpin_pages_remote(dma, iova,
+ phys >> PAGE_SHIFT,
+ size >> PAGE_SHIFT,
+ true);
+ goto unwind;
+ }
iova += size;
}
+ }
+
+ /* All dmas are now mapped, defer to second tree walk for unwind */
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
dma->iommu_mapped = true;
}
+
return 0;
+
+unwind:
+ for (; n; n = rb_prev(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+ dma_addr_t iova;
+
+ if (dma->iommu_mapped) {
+ iommu_unmap(domain->domain, dma->iova, dma->size);
+ continue;
+ }
+
+ iova = dma->iova;
+ while (iova < dma->iova + dma->size) {
+ phys_addr_t phys, p;
+ size_t size;
+ dma_addr_t i;
+
+ phys = iommu_iova_to_phys(domain->domain, iova);
+ if (!phys) {
+ iova += PAGE_SIZE;
+ continue;
+ }
+
+ size = PAGE_SIZE;
+ p = phys + size;
+ i = iova + size;
+ while (i < dma->iova + dma->size &&
+ p == iommu_iova_to_phys(domain->domain, i)) {
+ size += PAGE_SIZE;
+ p += PAGE_SIZE;
+ i += PAGE_SIZE;
+ }
+
+ iommu_unmap(domain->domain, iova, size);
+ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, true);
+ }
+ }
+
+ return ret;
}
/*