1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
|
/*
* dfu_nand.c -- DFU for NAND routines.
*
* Copyright (C) 2012-2013 Texas Instruments, Inc.
*
* Based on dfu_mmc.c which is:
* Copyright (C) 2012 Samsung Electronics
* author: Lukasz Majewski <l.majewski@samsung.com>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <malloc.h>
#include <errno.h>
#include <div64.h>
#include <dfu.h>
#include <linux/mtd/mtd.h>
#include <jffs2/load_kernel.h>
#include <nand.h>
static int nand_block_op(enum dfu_op op, struct dfu_entity *dfu,
u64 offset, void *buf, long *len)
{
loff_t start, lim;
size_t count, actual;
int ret;
struct mtd_info *mtd;
/* if buf == NULL return total size of the area */
if (buf == NULL) {
*len = dfu->data.nand.size;
return 0;
}
start = dfu->data.nand.start + offset + dfu->bad_skip;
lim = dfu->data.nand.start + dfu->data.nand.size - start;
count = *len;
if (nand_curr_device < 0 ||
nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
!nand_info[nand_curr_device]->name) {
printf("%s: invalid nand device\n", __func__);
return -1;
}
mtd = nand_info[nand_curr_device];
if (op == DFU_OP_READ) {
ret = nand_read_skip_bad(mtd, start, &count, &actual,
lim, buf);
} else {
nand_erase_options_t opts;
memset(&opts, 0, sizeof(opts));
opts.offset = start;
opts.length = count;
opts.spread = 1;
opts.quiet = 1;
opts.lim = lim;
/* first erase */
ret = nand_erase_opts(mtd, &opts);
if (ret)
return ret;
/* then write */
ret = nand_write_skip_bad(mtd, start, &count, &actual,
lim, buf, WITH_WR_VERIFY);
}
if (ret != 0) {
printf("%s: nand_%s_skip_bad call failed at %llx!\n",
__func__, op == DFU_OP_READ ? "read" : "write",
start);
return ret;
}
/*
* Find out where we stopped writing data. This can be deeper into
* the NAND than we expected due to having to skip bad blocks. So
* we must take this into account for the next write, if any.
*/
if (actual > count)
dfu->bad_skip += actual - count;
return ret;
}
static inline int nand_block_write(struct dfu_entity *dfu,
u64 offset, void *buf, long *len)
{
return nand_block_op(DFU_OP_WRITE, dfu, offset, buf, len);
}
static inline int nand_block_read(struct dfu_entity *dfu,
u64 offset, void *buf, long *len)
{
return nand_block_op(DFU_OP_READ, dfu, offset, buf, len);
}
static int dfu_write_medium_nand(struct dfu_entity *dfu,
u64 offset, void *buf, long *len)
{
int ret = -1;
switch (dfu->layout) {
case DFU_RAW_ADDR:
ret = nand_block_write(dfu, offset, buf, len);
break;
default:
printf("%s: Layout (%s) not (yet) supported!\n", __func__,
dfu_get_layout(dfu->layout));
}
return ret;
}
long dfu_get_medium_size_nand(struct dfu_entity *dfu)
{
return dfu->data.nand.size;
}
static int dfu_read_medium_nand(struct dfu_entity *dfu, u64 offset, void *buf,
long *len)
{
int ret = -1;
switch (dfu->layout) {
case DFU_RAW_ADDR:
ret = nand_block_read(dfu, offset, buf, len);
break;
default:
printf("%s: Layout (%s) not (yet) supported!\n", __func__,
dfu_get_layout(dfu->layout));
}
return ret;
}
static int dfu_flush_medium_nand(struct dfu_entity *dfu)
{
int ret = 0;
u64 off;
/* in case of ubi partition, erase rest of the partition */
if (dfu->data.nand.ubi) {
struct mtd_info *mtd;
nand_erase_options_t opts;
if (nand_curr_device < 0 ||
nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
!nand_info[nand_curr_device]->name) {
printf("%s: invalid nand device\n", __func__);
return -1;
}
mtd = nand_info[nand_curr_device];
memset(&opts, 0, sizeof(opts));
off = dfu->offset;
if ((off & (mtd->erasesize - 1)) != 0) {
/*
* last write ended with unaligned length
* sector is erased, jump to next
*/
off = off & ~((mtd->erasesize - 1));
off += mtd->erasesize;
}
opts.offset = dfu->data.nand.start + off +
dfu->bad_skip;
opts.length = dfu->data.nand.start +
dfu->data.nand.size - opts.offset;
ret = nand_erase_opts(mtd, &opts);
if (ret != 0)
printf("Failure erase: %d\n", ret);
}
return ret;
}
unsigned int dfu_polltimeout_nand(struct dfu_entity *dfu)
{
/*
* Currently, Poll Timeout != 0 is only needed on nand
* ubi partition, as the not used sectors need an erase
*/
if (dfu->data.nand.ubi)
return DFU_MANIFEST_POLL_TIMEOUT;
return DFU_DEFAULT_POLL_TIMEOUT;
}
int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, char *s)
{
char *st;
int ret, dev, part;
dfu->data.nand.ubi = 0;
dfu->dev_type = DFU_DEV_NAND;
st = strsep(&s, " ");
if (!strcmp(st, "raw")) {
dfu->layout = DFU_RAW_ADDR;
dfu->data.nand.start = simple_strtoul(s, &s, 16);
s++;
dfu->data.nand.size = simple_strtoul(s, &s, 16);
} else if ((!strcmp(st, "part")) || (!strcmp(st, "partubi"))) {
char mtd_id[32];
struct mtd_device *mtd_dev;
u8 part_num;
struct part_info *pi;
dfu->layout = DFU_RAW_ADDR;
dev = simple_strtoul(s, &s, 10);
s++;
part = simple_strtoul(s, &s, 10);
sprintf(mtd_id, "%s%d,%d", "nand", dev, part - 1);
printf("using id '%s'\n", mtd_id);
mtdparts_init();
ret = find_dev_and_part(mtd_id, &mtd_dev, &part_num, &pi);
if (ret != 0) {
printf("Could not locate '%s'\n", mtd_id);
return -1;
}
dfu->data.nand.start = pi->offset;
dfu->data.nand.size = pi->size;
if (!strcmp(st, "partubi"))
dfu->data.nand.ubi = 1;
} else {
printf("%s: Memory layout (%s) not supported!\n", __func__, st);
return -1;
}
dfu->get_medium_size = dfu_get_medium_size_nand;
dfu->read_medium = dfu_read_medium_nand;
dfu->write_medium = dfu_write_medium_nand;
dfu->flush_medium = dfu_flush_medium_nand;
dfu->poll_timeout = dfu_polltimeout_nand;
/* initial state */
dfu->inited = 0;
return 0;
}
|