1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 ARM Ltd.
*/
#ifndef _LINUX_ARM_FFA_H
#define _LINUX_ARM_FFA_H
#include <linux/device.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/uuid.h>
/* FFA Bus/Device/Driver related */
struct ffa_device {
u32 id;
int vm_id;
bool mode_32bit;
uuid_t uuid;
struct device dev;
const struct ffa_ops *ops;
};
#define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
struct ffa_device_id {
uuid_t uuid;
};
struct ffa_driver {
const char *name;
int (*probe)(struct ffa_device *sdev);
void (*remove)(struct ffa_device *sdev);
const struct ffa_device_id *id_table;
struct device_driver driver;
};
#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver)
static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
{
dev_set_drvdata(&fdev->dev, data);
}
static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
{
return dev_get_drvdata(&fdev->dev);
}
#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
const struct ffa_ops *ops);
void ffa_device_unregister(struct ffa_device *ffa_dev);
int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name);
void ffa_driver_unregister(struct ffa_driver *driver);
bool ffa_device_is_valid(struct ffa_device *ffa_dev);
#else
static inline
struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
const struct ffa_ops *ops)
{
return NULL;
}
static inline void ffa_device_unregister(struct ffa_device *dev) {}
static inline int
ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name)
{
return -EINVAL;
}
static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
static inline
bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
#endif /* CONFIG_ARM_FFA_TRANSPORT */
#define ffa_register(driver) \
ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
#define ffa_unregister(driver) \
ffa_driver_unregister(driver)
/**
* module_ffa_driver() - Helper macro for registering a psa_ffa driver
* @__ffa_driver: ffa_driver structure
*
* Helper macro for psa_ffa drivers to set up proper module init / exit
* functions. Replaces module_init() and module_exit() and keeps people from
* printing pointless things to the kernel log when their driver is loaded.
*/
#define module_ffa_driver(__ffa_driver) \
module_driver(__ffa_driver, ffa_register, ffa_unregister)
/* FFA transport related */
struct ffa_partition_info {
u16 id;
u16 exec_ctxt;
/* partition supports receipt of direct requests */
#define FFA_PARTITION_DIRECT_RECV BIT(0)
/* partition can send direct requests. */
#define FFA_PARTITION_DIRECT_SEND BIT(1)
/* partition can send and receive indirect messages. */
#define FFA_PARTITION_INDIRECT_MSG BIT(2)
/* partition runs in the AArch64 execution state. */
#define FFA_PARTITION_AARCH64_EXEC BIT(8)
u32 properties;
u32 uuid[4];
};
/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
struct ffa_send_direct_data {
unsigned long data0; /* w3/x3 */
unsigned long data1; /* w4/x4 */
unsigned long data2; /* w5/x5 */
unsigned long data3; /* w6/x6 */
unsigned long data4; /* w7/x7 */
};
struct ffa_mem_region_addr_range {
/* The base IPA of the constituent memory region, aligned to 4 kiB */
u64 address;
/* The number of 4 kiB pages in the constituent memory region. */
u32 pg_cnt;
u32 reserved;
};
struct ffa_composite_mem_region {
/*
* The total number of 4 kiB pages included in this memory region. This
* must be equal to the sum of page counts specified in each
* `struct ffa_mem_region_addr_range`.
*/
u32 total_pg_cnt;
/* The number of constituents included in this memory region range */
u32 addr_range_cnt;
u64 reserved;
/** An array of `addr_range_cnt` memory region constituents. */
struct ffa_mem_region_addr_range constituents[];
};
struct ffa_mem_region_attributes {
/* The ID of the VM to which the memory is being given or shared. */
u16 receiver;
/*
* The permissions with which the memory region should be mapped in the
* receiver's page table.
*/
#define FFA_MEM_EXEC BIT(3)
#define FFA_MEM_NO_EXEC BIT(2)
#define FFA_MEM_RW BIT(1)
#define FFA_MEM_RO BIT(0)
u8 attrs;
/*
* Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
* for memory regions with multiple borrowers.
*/
#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
u8 flag;
u32 composite_off;
/*
* Offset in bytes from the start of the outer `ffa_memory_region` to
* an `struct ffa_mem_region_addr_range`.
*/
u64 reserved;
};
struct ffa_mem_region {
/* The ID of the VM/owner which originally sent the memory region */
u16 sender_id;
#define FFA_MEM_NORMAL BIT(5)
#define FFA_MEM_DEVICE BIT(4)
#define FFA_MEM_WRITE_BACK (3 << 2)
#define FFA_MEM_NON_CACHEABLE (1 << 2)
#define FFA_DEV_nGnRnE (0 << 2)
#define FFA_DEV_nGnRE (1 << 2)
#define FFA_DEV_nGRE (2 << 2)
#define FFA_DEV_GRE (3 << 2)
#define FFA_MEM_NON_SHAREABLE (0)
#define FFA_MEM_OUTER_SHAREABLE (2)
#define FFA_MEM_INNER_SHAREABLE (3)
u8 attributes;
u8 reserved_0;
/*
* Clear memory region contents after unmapping it from the sender and
* before mapping it for any receiver.
*/
#define FFA_MEM_CLEAR BIT(0)
/*
* Whether the hypervisor may time slice the memory sharing or retrieval
* operation.
*/
#define FFA_TIME_SLICE_ENABLE BIT(1)
#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3)
#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3)
#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3)
#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3)
#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9)
#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5)
/* Flags to control behaviour of the transaction. */
u32 flags;
#define HANDLE_LOW_MASK GENMASK_ULL(31, 0)
#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32)
#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
#define PACK_HANDLE(l, h) \
(FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
/*
* A globally-unique ID assigned by the hypervisor for a region
* of memory being sent between VMs.
*/
u64 handle;
/*
* An implementation defined value associated with the receiver and the
* memory region.
*/
u64 tag;
u32 reserved_1;
/*
* The number of `ffa_mem_region_attributes` entries included in this
* transaction.
*/
u32 ep_count;
/*
* An array of endpoint memory access descriptors.
* Each one specifies a memory region offset, an endpoint and the
* attributes with which this memory region should be mapped in that
* endpoint's page table.
*/
struct ffa_mem_region_attributes ep_mem_access[];
};
#define COMPOSITE_OFFSET(x) \
(offsetof(struct ffa_mem_region, ep_mem_access[x]))
#define CONSTITUENTS_OFFSET(x) \
(offsetof(struct ffa_composite_mem_region, constituents[x]))
#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \
(COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
struct ffa_mem_ops_args {
bool use_txbuf;
u32 nattrs;
u32 flags;
u64 tag;
u64 g_handle;
struct scatterlist *sg;
struct ffa_mem_region_attributes *attrs;
};
struct ffa_info_ops {
u32 (*api_version_get)(void);
int (*partition_info_get)(const char *uuid_str,
struct ffa_partition_info *buffer);
};
struct ffa_msg_ops {
void (*mode_32bit_set)(struct ffa_device *dev);
int (*sync_send_receive)(struct ffa_device *dev,
struct ffa_send_direct_data *data);
};
struct ffa_mem_ops {
int (*memory_reclaim)(u64 g_handle, u32 flags);
int (*memory_share)(struct ffa_mem_ops_args *args);
int (*memory_lend)(struct ffa_mem_ops_args *args);
};
struct ffa_ops {
const struct ffa_info_ops *info_ops;
const struct ffa_msg_ops *msg_ops;
const struct ffa_mem_ops *mem_ops;
};
#endif /* _LINUX_ARM_FFA_H */
|