1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/vmalloc.h>
#include "null_blk.h"
/* zone_size in MBs to sectors. */
#define ZONE_SIZE_SHIFT 11
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
return sect >> ilog2(dev->zone_size_sects);
}
int null_zone_init(struct nullb_device *dev)
{
sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
sector_t sector = 0;
unsigned int i;
if (!is_power_of_2(dev->zone_size)) {
pr_err("null_blk: zone_size must be power-of-two\n");
return -EINVAL;
}
dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
dev->nr_zones = dev_size >>
(SECTOR_SHIFT + ilog2(dev->zone_size_sects));
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
GFP_KERNEL | __GFP_ZERO);
if (!dev->zones)
return -ENOMEM;
for (i = 0; i < dev->nr_zones; i++) {
struct blk_zone *zone = &dev->zones[i];
zone->start = zone->wp = sector;
zone->len = dev->zone_size_sects;
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone->cond = BLK_ZONE_COND_EMPTY;
sector += dev->zone_size_sects;
}
return 0;
}
void null_zone_exit(struct nullb_device *dev)
{
kvfree(dev->zones);
}
static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
unsigned int zno, unsigned int nr_zones)
{
struct blk_zone_report_hdr *hdr = NULL;
struct bio_vec bvec;
struct bvec_iter iter;
void *addr;
unsigned int zones_to_cpy;
bio_for_each_segment(bvec, bio, iter) {
addr = kmap_atomic(bvec.bv_page);
zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
if (!hdr) {
hdr = (struct blk_zone_report_hdr *)addr;
hdr->nr_zones = nr_zones;
zones_to_cpy--;
addr += sizeof(struct blk_zone_report_hdr);
}
zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
memcpy(addr, &dev->zones[zno],
zones_to_cpy * sizeof(struct blk_zone));
kunmap_atomic(addr);
nr_zones -= zones_to_cpy;
zno += zones_to_cpy;
if (!nr_zones)
break;
}
}
blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
{
struct nullb_device *dev = nullb->dev;
unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
unsigned int nr_zones = dev->nr_zones - zno;
unsigned int max_zones;
max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
nr_zones = min_t(unsigned int, nr_zones, max_zones);
null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
return BLK_STS_OK;
}
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */
cmd->error = BLK_STS_IOERR;
break;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
/* Writes must be at the write pointer position */
if (sector != zone->wp) {
cmd->error = BLK_STS_IOERR;
break;
}
if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->len)
zone->cond = BLK_ZONE_COND_FULL;
break;
default:
/* Invalid zone condition */
cmd->error = BLK_STS_IOERR;
break;
}
}
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
}
|