diff options
author | Heiko Schocher | 2015-10-22 06:19:21 +0200 |
---|---|---|
committer | Heiko Schocher | 2015-10-26 09:22:36 +0100 |
commit | 0195a7bb36a0abc71145da419001377bf56662fd (patch) | |
tree | bca19320118d4cbeb770d94f6b1e9d3ad91dc151 /fs/ubifs/io.c | |
parent | 5219db8ae802210730b348a888474efc2f9bf0a4 (diff) |
ubi,ubifs: sync with linux v4.2
sync with linux v4.2
commit 64291f7db5bd8150a74ad2036f1037e6a0428df2
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date: Sun Aug 30 11:34:09 2015 -0700
Linux 4.2
This update is needed, as it turned out, that fastmap
was in experimental/broken state in kernel v3.15, which
was the last base for U-Boot.
Signed-off-by: Heiko Schocher <hs@denx.de>
Tested-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
Diffstat (limited to 'fs/ubifs/io.c')
-rw-r--r-- | fs/ubifs/io.c | 68 |
1 files changed, 39 insertions, 29 deletions
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index bdccdc45e11..51a95bb8552 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -79,7 +79,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) c->ro_error = 1; c->no_chk_data_crc = 0; c->vfs_sb->s_flags |= MS_RDONLY; - ubifs_warn("switched to read-only mode, error %d", err); + ubifs_warn(c, "switched to read-only mode, error %d", err); dump_stack(); } } @@ -101,7 +101,7 @@ int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, * @even_ebadmsg is true. */ if (err && (err != -EBADMSG || even_ebadmsg)) { - ubifs_err("reading %d bytes from LEB %d:%d failed, error %d", + ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d", len, lnum, offs, err); dump_stack(); } @@ -118,10 +118,12 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_write(c->ubi, lnum, buf, offs, len); +#ifndef __UBOOT__ else err = dbg_leb_write(c, lnum, buf, offs, len); +#endif if (err) { - ubifs_err("writing %d bytes to LEB %d:%d failed, error %d", + ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d", len, lnum, offs, err); ubifs_ro_mode(c, err); dump_stack(); @@ -138,10 +140,12 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_change(c->ubi, lnum, buf, len); +#ifndef __UBOOT__ else err = dbg_leb_change(c, lnum, buf, len); +#endif if (err) { - ubifs_err("changing %d bytes in LEB %d failed, error %d", + ubifs_err(c, "changing %d bytes in LEB %d failed, error %d", len, lnum, err); ubifs_ro_mode(c, err); dump_stack(); @@ -158,10 +162,12 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum) return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_unmap(c->ubi, lnum); +#ifndef __UBOOT__ else err = dbg_leb_unmap(c, lnum); +#endif if (err) { - ubifs_err("unmap LEB %d failed, error %d", lnum, err); + ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); dump_stack(); } @@ -177,10 +183,12 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum) return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_map(c->ubi, lnum); +#ifndef __UBOOT__ else err = dbg_leb_map(c, lnum); +#endif if (err) { - ubifs_err("mapping LEB %d failed, error %d", lnum, err); + ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); dump_stack(); } @@ -193,7 +201,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) err = ubi_is_mapped(c->ubi, lnum); if (err < 0) { - ubifs_err("ubi_is_mapped failed for LEB %d, error %d", + ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d", lnum, err); dump_stack(); } @@ -241,7 +249,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, magic = le32_to_cpu(ch->magic); if (magic != UBIFS_NODE_MAGIC) { if (!quiet) - ubifs_err("bad magic %#08x, expected %#08x", + ubifs_err(c, "bad magic %#08x, expected %#08x", magic, UBIFS_NODE_MAGIC); err = -EUCLEAN; goto out; @@ -250,7 +258,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, type = ch->node_type; if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { if (!quiet) - ubifs_err("bad node type %d", type); + ubifs_err(c, "bad node type %d", type); goto out; } @@ -273,7 +281,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) { if (!quiet) - ubifs_err("bad CRC: calculated %#08x, read %#08x", + ubifs_err(c, "bad CRC: calculated %#08x, read %#08x", crc, node_crc); err = -EUCLEAN; goto out; @@ -283,10 +291,10 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, out_len: if (!quiet) - ubifs_err("bad node length %d", node_len); + ubifs_err(c, "bad node length %d", node_len); out: if (!quiet) { - ubifs_err("bad node at LEB %d:%d", lnum, offs); + ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); ubifs_dump_node(c, buf); dump_stack(); } @@ -349,11 +357,11 @@ static unsigned long long next_sqnum(struct ubifs_info *c) if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) { if (sqnum >= SQNUM_WATERMARK) { - ubifs_err("sequence number overflow %llu, end of life", + ubifs_err(c, "sequence number overflow %llu, end of life", sqnum); ubifs_ro_mode(c, -EINVAL); } - ubifs_warn("running out of sequence numbers, end of life soon"); + ubifs_warn(c, "running out of sequence numbers, end of life soon"); } return sqnum; @@ -426,7 +434,7 @@ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) #ifndef __UBOOT__ /** * wbuf_timer_callback - write-buffer timer callback function. - * @data: timer data (write-buffer descriptor) + * @timer: timer data (write-buffer descriptor) * * This function is called when the write-buffer timer expires. */ @@ -635,7 +643,7 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c) err = ubifs_wbuf_sync_nolock(wbuf); mutex_unlock(&wbuf->io_mutex); if (err) { - ubifs_err("cannot sync write-buffer, error %d", err); + ubifs_err(c, "cannot sync write-buffer, error %d", err); ubifs_ro_mode(c, err); goto out_timers; } @@ -832,7 +840,7 @@ exit: return 0; out: - ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", + ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d", len, wbuf->lnum, wbuf->offs, err); ubifs_dump_node(c, buf); dump_stack(); @@ -932,27 +940,27 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, } if (type != ch->node_type) { - ubifs_err("bad node type (%d but expected %d)", + ubifs_err(c, "bad node type (%d but expected %d)", ch->node_type, type); goto out; } err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { - ubifs_err("expected node type %d", type); + ubifs_err(c, "expected node type %d", type); return err; } rlen = le32_to_cpu(ch->len); if (rlen != len) { - ubifs_err("bad node length %d, expected %d", rlen, len); + ubifs_err(c, "bad node length %d, expected %d", rlen, len); goto out; } return 0; out: - ubifs_err("bad node at LEB %d:%d", lnum, offs); + ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); ubifs_dump_node(c, buf); dump_stack(); return -EINVAL; @@ -988,30 +996,32 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, return err; if (type != ch->node_type) { - ubifs_err("bad node type (%d but expected %d)", - ch->node_type, type); + ubifs_errc(c, "bad node type (%d but expected %d)", + ch->node_type, type); goto out; } err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { - ubifs_err("expected node type %d", type); + ubifs_errc(c, "expected node type %d", type); return err; } l = le32_to_cpu(ch->len); if (l != len) { - ubifs_err("bad node length %d, expected %d", l, len); + ubifs_errc(c, "bad node length %d, expected %d", l, len); goto out; } return 0; out: - ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs, - ubi_is_mapped(c->ubi, lnum)); - ubifs_dump_node(c, buf); - dump_stack(); + ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum, + offs, ubi_is_mapped(c->ubi, lnum)); + if (!c->probing) { + ubifs_dump_node(c, buf); + dump_stack(); + } return -EINVAL; } |