aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu/iommufd/hw_pagetable.c
blob: 33d142f8057d70a77f44e842afdd84b1bee0a970 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
 */
#include <linux/iommu.h>
#include <uapi/linux/iommufd.h>

#include "../iommu-priv.h"
#include "iommufd_private.h"

void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
{
	struct iommufd_hwpt_paging *hwpt_paging =
		container_of(obj, struct iommufd_hwpt_paging, common.obj);

	if (!list_empty(&hwpt_paging->hwpt_item)) {
		mutex_lock(&hwpt_paging->ioas->mutex);
		list_del(&hwpt_paging->hwpt_item);
		mutex_unlock(&hwpt_paging->ioas->mutex);

		iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
					 hwpt_paging->common.domain);
	}

	if (hwpt_paging->common.domain)
		iommu_domain_free(hwpt_paging->common.domain);

	refcount_dec(&hwpt_paging->ioas->obj.users);
}

void iommufd_hwpt_paging_abort(struct iommufd_object *obj)
{
	struct iommufd_hwpt_paging *hwpt_paging =
		container_of(obj, struct iommufd_hwpt_paging, common.obj);

	/* The ioas->mutex must be held until finalize is called. */
	lockdep_assert_held(&hwpt_paging->ioas->mutex);

	if (!list_empty(&hwpt_paging->hwpt_item)) {
		list_del_init(&hwpt_paging->hwpt_item);
		iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
					 hwpt_paging->common.domain);
	}
	iommufd_hwpt_paging_destroy(obj);
}

void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
{
	struct iommufd_hwpt_nested *hwpt_nested =
		container_of(obj, struct iommufd_hwpt_nested, common.obj);

	if (hwpt_nested->common.domain)
		iommu_domain_free(hwpt_nested->common.domain);

	refcount_dec(&hwpt_nested->parent->common.obj.users);
}

void iommufd_hwpt_nested_abort(struct iommufd_object *obj)
{
	iommufd_hwpt_nested_destroy(obj);
}

static int
iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
{
	struct iommu_domain *paging_domain = hwpt_paging->common.domain;

	if (hwpt_paging->enforce_cache_coherency)
		return 0;

	if (paging_domain->ops->enforce_cache_coherency)
		hwpt_paging->enforce_cache_coherency =
			paging_domain->ops->enforce_cache_coherency(
				paging_domain);
	if (!hwpt_paging->enforce_cache_coherency)
		return -EINVAL;
	return 0;
}

/**
 * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device
 * @ictx: iommufd context
 * @ioas: IOAS to associate the domain with
 * @idev: Device to get an iommu_domain for
 * @flags: Flags from userspace
 * @immediate_attach: True if idev should be attached to the hwpt
 * @user_data: The user provided driver specific data describing the domain to
 *             create
 *
 * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
 * will be linked to the given ioas and upon return the underlying iommu_domain
 * is fully popoulated.
 *
 * The caller must hold the ioas->mutex until after
 * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
 * the returned hwpt.
 */
struct iommufd_hwpt_paging *
iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
			  struct iommufd_device *idev, u32 flags,
			  bool immediate_attach,
			  const struct iommu_user_data *user_data)
{
	const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
				IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
	struct iommufd_hwpt_paging *hwpt_paging;
	struct iommufd_hw_pagetable *hwpt;
	int rc;

	lockdep_assert_held(&ioas->mutex);

	if ((flags || user_data) && !ops->domain_alloc_user)
		return ERR_PTR(-EOPNOTSUPP);
	if (flags & ~valid_flags)
		return ERR_PTR(-EOPNOTSUPP);

	hwpt_paging = __iommufd_object_alloc(
		ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
	if (IS_ERR(hwpt_paging))
		return ERR_CAST(hwpt_paging);
	hwpt = &hwpt_paging->common;

	INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
	/* Pairs with iommufd_hw_pagetable_destroy() */
	refcount_inc(&ioas->obj.users);
	hwpt_paging->ioas = ioas;
	hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;

	if (ops->domain_alloc_user) {
		hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL,
						      user_data);
		if (IS_ERR(hwpt->domain)) {
			rc = PTR_ERR(hwpt->domain);
			hwpt->domain = NULL;
			goto out_abort;
		}
		hwpt->domain->owner = ops;
	} else {
		hwpt->domain = iommu_domain_alloc(idev->dev->bus);
		if (!hwpt->domain) {
			rc = -ENOMEM;
			goto out_abort;
		}
	}

	/*
	 * Set the coherency mode before we do iopt_table_add_domain() as some
	 * iommus have a per-PTE bit that controls it and need to decide before
	 * doing any maps. It is an iommu driver bug to report
	 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
	 * a new domain.
	 *
	 * The cache coherency mode must be configured here and unchanged later.
	 * Note that a HWPT (non-CC) created for a device (non-CC) can be later
	 * reused by another device (either non-CC or CC). However, A HWPT (CC)
	 * created for a device (CC) cannot be reused by another device (non-CC)
	 * but only devices (CC). Instead user space in this case would need to
	 * allocate a separate HWPT (non-CC).
	 */
	if (idev->enforce_cache_coherency) {
		rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging);
		if (WARN_ON(rc))
			goto out_abort;
	}

	/*
	 * immediate_attach exists only to accommodate iommu drivers that cannot
	 * directly allocate a domain. These drivers do not finish creating the
	 * domain until attach is completed. Thus we must have this call
	 * sequence. Once those drivers are fixed this should be removed.
	 */
	if (immediate_attach) {
		rc = iommufd_hw_pagetable_attach(hwpt, idev);
		if (rc)
			goto out_abort;
	}

	rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain);
	if (rc)
		goto out_detach;
	list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list);
	return hwpt_paging;

out_detach:
	if (immediate_attach)
		iommufd_hw_pagetable_detach(idev);
out_abort:
	iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
	return ERR_PTR(rc);
}

/**
 * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device
 * @ictx: iommufd context
 * @parent: Parent PAGING-type hwpt to associate the domain with
 * @idev: Device to get an iommu_domain for
 * @flags: Flags from userspace
 * @user_data: user_data pointer. Must be valid
 *
 * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as
 * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of
 * being a parent.
 */
static struct iommufd_hwpt_nested *
iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
			  struct iommufd_hwpt_paging *parent,
			  struct iommufd_device *idev, u32 flags,
			  const struct iommu_user_data *user_data)
{
	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
	struct iommufd_hwpt_nested *hwpt_nested;
	struct iommufd_hw_pagetable *hwpt;
	int rc;

	if (flags || !user_data->len || !ops->domain_alloc_user)
		return ERR_PTR(-EOPNOTSUPP);
	if (parent->auto_domain || !parent->nest_parent)
		return ERR_PTR(-EINVAL);

	hwpt_nested = __iommufd_object_alloc(
		ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
	if (IS_ERR(hwpt_nested))
		return ERR_CAST(hwpt_nested);
	hwpt = &hwpt_nested->common;

	refcount_inc(&parent->common.obj.users);
	hwpt_nested->parent = parent;

	hwpt->domain = ops->domain_alloc_user(idev->dev, flags,
					      parent->common.domain, user_data);
	if (IS_ERR(hwpt->domain)) {
		rc = PTR_ERR(hwpt->domain);
		hwpt->domain = NULL;
		goto out_abort;
	}
	hwpt->domain->owner = ops;

	if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
		rc = -EINVAL;
		goto out_abort;
	}
	return hwpt_nested;

out_abort:
	iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
	return ERR_PTR(rc);
}

int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
{
	struct iommu_hwpt_alloc *cmd = ucmd->cmd;
	const struct iommu_user_data user_data = {
		.type = cmd->data_type,
		.uptr = u64_to_user_ptr(cmd->data_uptr),
		.len = cmd->data_len,
	};
	struct iommufd_hw_pagetable *hwpt;
	struct iommufd_ioas *ioas = NULL;
	struct iommufd_object *pt_obj;
	struct iommufd_device *idev;
	int rc;

	if (cmd->__reserved)
		return -EOPNOTSUPP;
	if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
	    (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
		return -EINVAL;

	idev = iommufd_get_device(ucmd, cmd->dev_id);
	if (IS_ERR(idev))
		return PTR_ERR(idev);

	pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY);
	if (IS_ERR(pt_obj)) {
		rc = -EINVAL;
		goto out_put_idev;
	}

	if (pt_obj->type == IOMMUFD_OBJ_IOAS) {
		struct iommufd_hwpt_paging *hwpt_paging;

		ioas = container_of(pt_obj, struct iommufd_ioas, obj);
		mutex_lock(&ioas->mutex);
		hwpt_paging = iommufd_hwpt_paging_alloc(
			ucmd->ictx, ioas, idev, cmd->flags, false,
			user_data.len ? &user_data : NULL);
		if (IS_ERR(hwpt_paging)) {
			rc = PTR_ERR(hwpt_paging);
			goto out_unlock;
		}
		hwpt = &hwpt_paging->common;
	} else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) {
		struct iommufd_hwpt_nested *hwpt_nested;

		hwpt_nested = iommufd_hwpt_nested_alloc(
			ucmd->ictx,
			container_of(pt_obj, struct iommufd_hwpt_paging,
				     common.obj),
			idev, cmd->flags, &user_data);
		if (IS_ERR(hwpt_nested)) {
			rc = PTR_ERR(hwpt_nested);
			goto out_unlock;
		}
		hwpt = &hwpt_nested->common;
	} else {
		rc = -EINVAL;
		goto out_put_pt;
	}

	cmd->out_hwpt_id = hwpt->obj.id;
	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
	if (rc)
		goto out_hwpt;
	iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
	goto out_unlock;

out_hwpt:
	iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
out_unlock:
	if (ioas)
		mutex_unlock(&ioas->mutex);
out_put_pt:
	iommufd_put_object(ucmd->ictx, pt_obj);
out_put_idev:
	iommufd_put_object(ucmd->ictx, &idev->obj);
	return rc;
}

int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
{
	struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd;
	struct iommufd_hwpt_paging *hwpt_paging;
	struct iommufd_ioas *ioas;
	int rc = -EOPNOTSUPP;
	bool enable;

	if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE)
		return rc;

	hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
	if (IS_ERR(hwpt_paging))
		return PTR_ERR(hwpt_paging);

	ioas = hwpt_paging->ioas;
	enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE;

	rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
				     enable);

	iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
	return rc;
}

int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
{
	struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd;
	struct iommufd_hwpt_paging *hwpt_paging;
	struct iommufd_ioas *ioas;
	int rc = -EOPNOTSUPP;

	if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) ||
	    cmd->__reserved)
		return -EOPNOTSUPP;

	hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
	if (IS_ERR(hwpt_paging))
		return PTR_ERR(hwpt_paging);

	ioas = hwpt_paging->ioas;
	rc = iopt_read_and_clear_dirty_data(
		&ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);

	iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
	return rc;
}

int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
{
	struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
	struct iommu_user_data_array data_array = {
		.type = cmd->data_type,
		.uptr = u64_to_user_ptr(cmd->data_uptr),
		.entry_len = cmd->entry_len,
		.entry_num = cmd->entry_num,
	};
	struct iommufd_hw_pagetable *hwpt;
	u32 done_num = 0;
	int rc;

	if (cmd->__reserved) {
		rc = -EOPNOTSUPP;
		goto out;
	}

	if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) {
		rc = -EINVAL;
		goto out;
	}

	hwpt = iommufd_get_hwpt_nested(ucmd, cmd->hwpt_id);
	if (IS_ERR(hwpt)) {
		rc = PTR_ERR(hwpt);
		goto out;
	}

	rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain,
						      &data_array);
	done_num = data_array.entry_num;

	iommufd_put_object(ucmd->ictx, &hwpt->obj);
out:
	cmd->entry_num = done_num;
	if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
		return -EFAULT;
	return rc;
}