diff options
author | Dragos Tatulea | 2023-10-18 20:14:51 +0300 |
---|---|---|
committer | Michael S. Tsirkin | 2023-11-01 09:19:57 -0400 |
commit | 625e4b59a923d2bb30759b88ecca34d12735fa7e (patch) | |
tree | 1247eed390a0eaa52a8261310687bb854797ec2e /drivers/vdpa | |
parent | 186e25387ed6f1403a1b464e31f5381ba4424681 (diff) |
vdpa/mlx5: Improve mr update flow
The current flow for updating an mr works directly on mvdev->mr which
makes it cumbersome to handle multiple new mr structs.
This patch makes the flow more straightforward by having
mlx5_vdpa_create_mr return a new mr which will update the old mr (if
any). The old mr will be deleted and unlinked from mvdev. For the case
when the iotlb is empty (not NULL), the old mr will be cleared.
This change paves the way for adding mrs for different ASIDs.
The initialized bool is no longer needed as mr is now a pointer in the
mlx5_vdpa_dev struct which will be NULL when not initialized.
Acked-by: Eugenio PĂ©rez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Message-Id: <20231018171456.1624030-14-dtatulea@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Si-Wei Liu <si-wei.liu@oracle.com>
Tested-by: Si-Wei Liu <si-wei.liu@oracle.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Diffstat (limited to 'drivers/vdpa')
-rw-r--r-- | drivers/vdpa/mlx5/core/mlx5_vdpa.h | 14 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/core/mr.c | 87 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/net/mlx5_vnet.c | 53 |
3 files changed, 82 insertions, 72 deletions
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 9c6ac42c21e1..bbe4335106bd 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -31,8 +31,6 @@ struct mlx5_vdpa_mr { struct list_head head; unsigned long num_directs; unsigned long num_klms; - /* state of dvq mr */ - bool initialized; bool user_mr; }; @@ -91,7 +89,7 @@ struct mlx5_vdpa_dev { u16 max_idx; u32 generation; - struct mlx5_vdpa_mr mr; + struct mlx5_vdpa_mr *mr; /* serialize mr access */ struct mutex mr_mtx; struct mlx5_control_vq cvq; @@ -114,14 +112,14 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev); int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in, int inlen); int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey); -int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, - bool *change_map, unsigned int asid); -int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, - struct mlx5_vdpa_mr *mr, - struct vhost_iotlb *iotlb); +struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb); void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev); void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr); +void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr, + unsigned int asid); int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, unsigned int asid); diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index abd6a6fb122f..00eff5a07152 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -495,30 +495,51 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { - if (!mr->initialized) - return; - if (mr->user_mr) destroy_user_mr(mvdev, mr); else destroy_dma_mr(mvdev, mr); - - mr->initialized = false; } void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { + if (!mr) + return; + mutex_lock(&mvdev->mr_mtx); _mlx5_vdpa_destroy_mr(mvdev, mr); + if (mvdev->mr == mr) + mvdev->mr = NULL; + + mutex_unlock(&mvdev->mr_mtx); + + kfree(mr); +} + +void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *new_mr, + unsigned int asid) +{ + struct mlx5_vdpa_mr *old_mr = mvdev->mr; + + mutex_lock(&mvdev->mr_mtx); + + mvdev->mr = new_mr; + if (old_mr) { + _mlx5_vdpa_destroy_mr(mvdev, old_mr); + kfree(old_mr); + } + mutex_unlock(&mvdev->mr_mtx); + } void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) { - mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr); + mlx5_vdpa_destroy_mr(mvdev, mvdev->mr); prune_iotlb(mvdev); } @@ -528,52 +549,36 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, { int err; - if (mr->initialized) - return 0; - if (iotlb) err = create_user_mr(mvdev, mr, iotlb); else err = create_dma_mr(mvdev, mr); - if (err) - return err; - - mr->initialized = true; - - return 0; + return err; } -int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, - struct mlx5_vdpa_mr *mr, - struct vhost_iotlb *iotlb) +struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb) { + struct mlx5_vdpa_mr *mr; int err; + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + mutex_lock(&mvdev->mr_mtx); err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb); mutex_unlock(&mvdev->mr_mtx); - return err; -} - -int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, - bool *change_map, unsigned int asid) -{ - struct mlx5_vdpa_mr *mr = &mvdev->mr; - int err = 0; + if (err) + goto out_err; - *change_map = false; - mutex_lock(&mvdev->mr_mtx); - if (mr->initialized) { - mlx5_vdpa_info(mvdev, "memory map update\n"); - *change_map = true; - } - if (!*change_map) - err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb); - mutex_unlock(&mvdev->mr_mtx); + return mr; - return err; +out_err: + kfree(mr); + return ERR_PTR(err); } int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev, @@ -597,11 +602,13 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev, int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev) { - int err; + struct mlx5_vdpa_mr *mr; - err = mlx5_vdpa_create_mr(mvdev, &mvdev->mr, NULL); - if (err) - return err; + mr = mlx5_vdpa_create_mr(mvdev, NULL); + if (IS_ERR(mr)) + return PTR_ERR(mr); + + mlx5_vdpa_update_mr(mvdev, mr, 0); return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0); } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 9b1f0d084d24..35ed0749da28 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -913,7 +913,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); - MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey); + MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr->mkey); MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); @@ -2673,7 +2673,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev) } static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, - struct vhost_iotlb *iotlb, unsigned int asid) + struct mlx5_vdpa_mr *new_mr, unsigned int asid) { struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); int err; @@ -2681,27 +2681,18 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, suspend_vqs(ndev); err = save_channels_info(ndev); if (err) - goto err_mr; + return err; teardown_driver(ndev); - mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr); - err = mlx5_vdpa_create_mr(mvdev, &mvdev->mr, iotlb); - if (err) - goto err_mr; + + mlx5_vdpa_update_mr(mvdev, new_mr, asid); if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) - goto err_mr; + return 0; restore_channels_info(ndev); err = setup_driver(mvdev); - if (err) - goto err_setup; - - return 0; -err_setup: - mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr); -err_mr: return err; } @@ -2919,26 +2910,40 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev) static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, unsigned int asid) { - bool change_map; + struct mlx5_vdpa_mr *new_mr; int err; if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid) goto end; - err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid); - if (err) { - mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); - return err; + if (vhost_iotlb_itree_first(iotlb, 0, U64_MAX)) { + new_mr = mlx5_vdpa_create_mr(mvdev, iotlb); + if (IS_ERR(new_mr)) { + err = PTR_ERR(new_mr); + mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err); + return err; + } + } else { + /* Empty iotlbs don't have an mr but will clear the previous mr. */ + new_mr = NULL; } - if (change_map) { - err = mlx5_vdpa_change_map(mvdev, iotlb, asid); - if (err) - return err; + if (!mvdev->mr) { + mlx5_vdpa_update_mr(mvdev, new_mr, asid); + } else { + err = mlx5_vdpa_change_map(mvdev, new_mr, asid); + if (err) { + mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err); + goto out_err; + } } end: return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid); + +out_err: + mlx5_vdpa_destroy_mr(mvdev, new_mr); + return err; } static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid, |