1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
|
/*
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
* Copyright (C) 2014 Linaro Limited
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* This source file contains the implementation of a special device driver
* that intends to provide a *very* fast communication channel between the
* guest system and the QEMU emulator.
*
* Usage from the guest is simply the following (error handling simplified):
*
* int fd = open("/dev/qemu_pipe",O_RDWR);
* .... write() or read() through the pipe.
*
* This driver doesn't deal with the exact protocol used during the session.
* It is intended to be as simple as something like:
*
* // do this _just_ after opening the fd to connect to a specific
* // emulator service.
* const char* msg = "<pipename>";
* if (write(fd, msg, strlen(msg)+1) < 0) {
* ... could not connect to <pipename> service
* close(fd);
* }
*
* // after this, simply read() and write() to communicate with the
* // service. Exact protocol details left as an exercise to the reader.
*
* This driver is very fast because it doesn't copy any data through
* intermediate buffers, since the emulator is capable of translating
* guest user addresses into host ones.
*
* Note that we must however ensure that each user page involved in the
* exchange is properly mapped during a transfer.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/goldfish.h>
#include <linux/mm.h>
#include <linux/acpi.h>
/*
* IMPORTANT: The following constants must match the ones used and defined
* in external/qemu/hw/goldfish_pipe.c in the Android source tree.
*/
/* pipe device registers */
#define PIPE_REG_COMMAND 0x00 /* write: value = command */
#define PIPE_REG_STATUS 0x04 /* read */
#define PIPE_REG_CHANNEL 0x08 /* read/write: channel id */
#define PIPE_REG_CHANNEL_HIGH 0x30 /* read/write: channel id */
#define PIPE_REG_SIZE 0x0c /* read/write: buffer size */
#define PIPE_REG_ADDRESS 0x10 /* write: physical address */
#define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */
#define PIPE_REG_WAKES 0x14 /* read: wake flags */
#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
#define PIPE_REG_VERSION 0x24 /* read: device version */
/* list of commands for PIPE_REG_COMMAND */
#define CMD_OPEN 1 /* open new channel */
#define CMD_CLOSE 2 /* close channel (from guest) */
#define CMD_POLL 3 /* poll read/write status */
/* List of bitflags returned in status of CMD_POLL command */
#define PIPE_POLL_IN (1 << 0)
#define PIPE_POLL_OUT (1 << 1)
#define PIPE_POLL_HUP (1 << 2)
/* The following commands are related to write operations */
#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
is possible */
#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
* is possible */
/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
#define PIPE_ERROR_INVAL -1
#define PIPE_ERROR_AGAIN -2
#define PIPE_ERROR_NOMEM -3
#define PIPE_ERROR_IO -4
/* Bit-flags used to signal events from the emulator */
#define PIPE_WAKE_CLOSED (1 << 0) /* emulator closed pipe */
#define PIPE_WAKE_READ (1 << 1) /* pipe can now be read from */
#define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */
struct access_params {
unsigned long channel;
u32 size;
unsigned long address;
u32 cmd;
u32 result;
/* reserved for future extension */
u32 flags;
};
/* The global driver data. Holds a reference to the i/o page used to
* communicate with the emulator, and a wake queue for blocked tasks
* waiting to be awoken.
*/
struct goldfish_pipe_dev {
spinlock_t lock;
unsigned char __iomem *base;
struct access_params *aps;
int irq;
u32 version;
};
static struct goldfish_pipe_dev pipe_dev[1];
/* This data type models a given pipe instance */
struct goldfish_pipe {
struct goldfish_pipe_dev *dev;
struct mutex lock;
unsigned long flags;
wait_queue_head_t wake_queue;
};
/* Bit flags for the 'flags' field */
enum {
BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
};
static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
{
unsigned long flags;
u32 status;
struct goldfish_pipe_dev *dev = pipe->dev;
spin_lock_irqsave(&dev->lock, flags);
gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
dev->base + PIPE_REG_CHANNEL_HIGH);
writel(cmd, dev->base + PIPE_REG_COMMAND);
status = readl(dev->base + PIPE_REG_STATUS);
spin_unlock_irqrestore(&dev->lock, flags);
return status;
}
static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
{
unsigned long flags;
struct goldfish_pipe_dev *dev = pipe->dev;
spin_lock_irqsave(&dev->lock, flags);
gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
dev->base + PIPE_REG_CHANNEL_HIGH);
writel(cmd, dev->base + PIPE_REG_COMMAND);
spin_unlock_irqrestore(&dev->lock, flags);
}
/* This function converts an error code returned by the emulator through
* the PIPE_REG_STATUS i/o register into a valid negative errno value.
*/
static int goldfish_pipe_error_convert(int status)
{
switch (status) {
case PIPE_ERROR_AGAIN:
return -EAGAIN;
case PIPE_ERROR_NOMEM:
return -ENOMEM;
case PIPE_ERROR_IO:
return -EIO;
default:
return -EINVAL;
}
}
/*
* Notice: QEMU will return 0 for un-known register access, indicating
* param_acess is supported or not
*/
static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
struct access_params *aps)
{
u32 aph, apl;
u64 paddr;
aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
paddr = ((u64)aph << 32) | apl;
if (paddr != (__pa(aps)))
return 0;
return 1;
}
/* 0 on success */
static int setup_access_params_addr(struct platform_device *pdev,
struct goldfish_pipe_dev *dev)
{
u64 paddr;
struct access_params *aps;
aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
if (!aps)
return -1;
/* FIXME */
paddr = __pa(aps);
writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
if (valid_batchbuffer_addr(dev, aps)) {
dev->aps = aps;
return 0;
} else
return -1;
}
/* A value that will not be set by qemu emulator */
#define INITIAL_BATCH_RESULT (0xdeadbeaf)
static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
unsigned long address, unsigned long avail,
struct goldfish_pipe *pipe, int *status)
{
struct access_params *aps = dev->aps;
if (aps == NULL)
return -1;
aps->result = INITIAL_BATCH_RESULT;
aps->channel = (unsigned long)pipe;
aps->size = avail;
aps->address = address;
aps->cmd = cmd;
writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
/*
* If the aps->result has not changed, that means
* that the batch command failed
*/
if (aps->result == INITIAL_BATCH_RESULT)
return -1;
*status = aps->result;
return 0;
}
static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
size_t bufflen, int is_write)
{
unsigned long irq_flags;
struct goldfish_pipe *pipe = filp->private_data;
struct goldfish_pipe_dev *dev = pipe->dev;
unsigned long address, address_end;
int count = 0, ret = -EINVAL;
/* If the emulator already closed the pipe, no need to go further */
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
return -EIO;
/* Null reads or writes succeeds */
if (unlikely(bufflen == 0))
return 0;
/* Check the buffer range for access */
if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
buffer, bufflen))
return -EFAULT;
/* Serialize access to the pipe */
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
address = (unsigned long)(void *)buffer;
address_end = address + bufflen;
while (address < address_end) {
unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
unsigned long next = page_end < address_end ? page_end
: address_end;
unsigned long avail = next - address;
int status, wakeBit;
struct page *page;
/* Either vaddr or paddr depending on the device version */
unsigned long xaddr;
/*
* We grab the pages on a page-by-page basis in case user
* space gives us a potentially huge buffer but the read only
* returns a small amount, then there's no need to pin that
* much memory to the process.
*/
down_read(¤t->mm->mmap_sem);
ret = get_user_pages(current, current->mm, address, 1,
!is_write, 0, &page, NULL);
up_read(¤t->mm->mmap_sem);
if (ret < 0)
break;
if (dev->version) {
/* Device version 1 or newer (qemu-android) expects the
* physical address.
*/
xaddr = page_to_phys(page) | (address & ~PAGE_MASK);
} else {
/* Device version 0 (classic emulator) expects the
* virtual address.
*/
xaddr = address;
}
/* Now, try to transfer the bytes in the current page */
spin_lock_irqsave(&dev->lock, irq_flags);
if (access_with_param(dev,
is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
xaddr, avail, pipe, &status)) {
gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
dev->base + PIPE_REG_CHANNEL_HIGH);
writel(avail, dev->base + PIPE_REG_SIZE);
gf_write_ptr((void *)xaddr,
dev->base + PIPE_REG_ADDRESS,
dev->base + PIPE_REG_ADDRESS_HIGH);
writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
dev->base + PIPE_REG_COMMAND);
status = readl(dev->base + PIPE_REG_STATUS);
}
spin_unlock_irqrestore(&dev->lock, irq_flags);
if (status > 0 && !is_write)
set_page_dirty(page);
put_page(page);
if (status > 0) { /* Correct transfer */
count += status;
address += status;
continue;
} else if (status == 0) { /* EOF */
ret = 0;
break;
} else if (status < 0 && count > 0) {
/*
* An error occurred and we already transferred
* something on one of the previous pages.
* Just return what we already copied and log this
* err.
*
* Note: This seems like an incorrect approach but
* cannot change it until we check if any user space
* ABI relies on this behavior.
*/
if (status != PIPE_ERROR_AGAIN)
pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
status, is_write ? "write" : "read");
ret = 0;
break;
}
/*
* If the error is not PIPE_ERROR_AGAIN, or if we are not in
* non-blocking mode, just return the error code.
*/
if (status != PIPE_ERROR_AGAIN ||
(filp->f_flags & O_NONBLOCK) != 0) {
ret = goldfish_pipe_error_convert(status);
break;
}
/*
* The backend blocked the read/write, wait until the backend
* tells us it's ready to process more data.
*/
wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
set_bit(wakeBit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
goldfish_cmd(pipe,
is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
/* Unlock the pipe, then wait for the wake signal */
mutex_unlock(&pipe->lock);
while (test_bit(wakeBit, &pipe->flags)) {
if (wait_event_interruptible(
pipe->wake_queue,
!test_bit(wakeBit, &pipe->flags)))
return -ERESTARTSYS;
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
return -EIO;
}
/* Try to re-acquire the lock */
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
}
mutex_unlock(&pipe->lock);
if (ret < 0)
return ret;
else
return count;
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
size_t bufflen, loff_t *ppos)
{
return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
}
static ssize_t goldfish_pipe_write(struct file *filp,
const char __user *buffer, size_t bufflen,
loff_t *ppos)
{
return goldfish_pipe_read_write(filp, (char __user *)buffer,
bufflen, 1);
}
static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
{
struct goldfish_pipe *pipe = filp->private_data;
unsigned int mask = 0;
int status;
mutex_lock(&pipe->lock);
poll_wait(filp, &pipe->wake_queue, wait);
status = goldfish_cmd_status(pipe, CMD_POLL);
mutex_unlock(&pipe->lock);
if (status & PIPE_POLL_IN)
mask |= POLLIN | POLLRDNORM;
if (status & PIPE_POLL_OUT)
mask |= POLLOUT | POLLWRNORM;
if (status & PIPE_POLL_HUP)
mask |= POLLHUP;
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
mask |= POLLERR;
return mask;
}
static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
{
struct goldfish_pipe_dev *dev = dev_id;
unsigned long irq_flags;
int count = 0;
/*
* We're going to read from the emulator a list of (channel,flags)
* pairs corresponding to the wake events that occurred on each
* blocked pipe (i.e. channel).
*/
spin_lock_irqsave(&dev->lock, irq_flags);
for (;;) {
/* First read the channel, 0 means the end of the list */
struct goldfish_pipe *pipe;
unsigned long wakes;
unsigned long channel = 0;
#ifdef CONFIG_64BIT
channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
if (channel == 0)
break;
#endif
channel |= readl(dev->base + PIPE_REG_CHANNEL);
if (channel == 0)
break;
/* Convert channel to struct pipe pointer + read wake flags */
wakes = readl(dev->base + PIPE_REG_WAKES);
pipe = (struct goldfish_pipe *)(ptrdiff_t)channel;
/* Did the emulator just closed a pipe? */
if (wakes & PIPE_WAKE_CLOSED) {
set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
}
if (wakes & PIPE_WAKE_READ)
clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
if (wakes & PIPE_WAKE_WRITE)
clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
wake_up_interruptible(&pipe->wake_queue);
count++;
}
spin_unlock_irqrestore(&dev->lock, irq_flags);
return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
}
/**
* goldfish_pipe_open - open a channel to the AVD
* @inode: inode of device
* @file: file struct of opener
*
* Create a new pipe link between the emulator and the use application.
* Each new request produces a new pipe.
*
* Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
* right now so this is fine. A move to 64bit will need this addressing
*/
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
struct goldfish_pipe *pipe;
struct goldfish_pipe_dev *dev = pipe_dev;
int32_t status;
/* Allocate new pipe kernel object */
pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
if (pipe == NULL)
return -ENOMEM;
pipe->dev = dev;
mutex_init(&pipe->lock);
init_waitqueue_head(&pipe->wake_queue);
/*
* Now, tell the emulator we're opening a new pipe. We use the
* pipe object's address as the channel identifier for simplicity.
*/
status = goldfish_cmd_status(pipe, CMD_OPEN);
if (status < 0) {
kfree(pipe);
return status;
}
/* All is done, save the pipe into the file's private data field */
file->private_data = pipe;
return 0;
}
static int goldfish_pipe_release(struct inode *inode, struct file *filp)
{
struct goldfish_pipe *pipe = filp->private_data;
/* The guest is closing the channel, so tell the emulator right now */
goldfish_cmd(pipe, CMD_CLOSE);
kfree(pipe);
filp->private_data = NULL;
return 0;
}
static const struct file_operations goldfish_pipe_fops = {
.owner = THIS_MODULE,
.read = goldfish_pipe_read,
.write = goldfish_pipe_write,
.poll = goldfish_pipe_poll,
.open = goldfish_pipe_open,
.release = goldfish_pipe_release,
};
static struct miscdevice goldfish_pipe_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "goldfish_pipe",
.fops = &goldfish_pipe_fops,
};
static int goldfish_pipe_probe(struct platform_device *pdev)
{
int err;
struct resource *r;
struct goldfish_pipe_dev *dev = pipe_dev;
/* not thread safe, but this should not happen */
WARN_ON(dev->base != NULL);
spin_lock_init(&dev->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL || resource_size(r) < PAGE_SIZE) {
dev_err(&pdev->dev, "can't allocate i/o page\n");
return -EINVAL;
}
dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
if (dev->base == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
return -EINVAL;
}
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (r == NULL) {
err = -EINVAL;
goto error;
}
dev->irq = r->start;
err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
IRQF_SHARED, "goldfish_pipe", dev);
if (err) {
dev_err(&pdev->dev, "unable to allocate IRQ\n");
goto error;
}
err = misc_register(&goldfish_pipe_device);
if (err) {
dev_err(&pdev->dev, "unable to register device\n");
goto error;
}
setup_access_params_addr(pdev, dev);
/* Although the pipe device in the classic Android emulator does not
* recognize the 'version' register, it won't treat this as an error
* either and will simply return 0, which is fine.
*/
dev->version = readl(dev->base + PIPE_REG_VERSION);
return 0;
error:
dev->base = NULL;
return err;
}
static int goldfish_pipe_remove(struct platform_device *pdev)
{
struct goldfish_pipe_dev *dev = pipe_dev;
misc_deregister(&goldfish_pipe_device);
dev->base = NULL;
return 0;
}
static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
{ "GFSH0003", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
static const struct of_device_id goldfish_pipe_of_match[] = {
{ .compatible = "google,android-pipe", },
{},
};
MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
static struct platform_driver goldfish_pipe = {
.probe = goldfish_pipe_probe,
.remove = goldfish_pipe_remove,
.driver = {
.name = "goldfish_pipe",
.owner = THIS_MODULE,
.of_match_table = goldfish_pipe_of_match,
.acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
}
};
module_platform_driver(goldfish_pipe);
MODULE_AUTHOR("David Turner <digit@google.com>");
MODULE_LICENSE("GPL");
|