diff options
author | Anton Salikhmetov | 2010-12-16 18:08:38 +0200 |
---|---|---|
committer | Christoph Hellwig | 2010-12-16 18:08:45 +0100 |
commit | 2753cc281c9a0e8a0a45ee2b8110866a9fe63bdd (patch) | |
tree | d28794ef990637cfcd734a8b467f119e6d69dac1 /fs/hfsplus/bnode.c | |
parent | 596276c3571e2108f4b336be545ece2eacf3da59 (diff) |
hfsplus: over 80 character lines clean-up
Match coding style line length limitation where checkpatch.pl
reported over-80-character-line warnings.
Signed-off-by: Anton Salikhmetov <alexo@tuxera.com>
Signed-off-by: Christoph Hellwig <hch@tuxera.com>
Diffstat (limited to 'fs/hfsplus/bnode.c')
-rw-r--r-- | fs/hfsplus/bnode.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index c8aa1659b838..cf7dc8dac766 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -212,7 +212,8 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) dst_page--; } src -= len; - memmove(kmap(*dst_page) + src, kmap(*src_page) + src, len); + memmove(kmap(*dst_page) + src, + kmap(*src_page) + src, len); kunmap(*src_page); set_page_dirty(*dst_page); kunmap(*dst_page); @@ -250,14 +251,16 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) if (src == dst) { l = min(len, (int)PAGE_CACHE_SIZE - src); - memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l); + memmove(kmap(*dst_page) + src, + kmap(*src_page) + src, l); kunmap(*src_page); set_page_dirty(*dst_page); kunmap(*dst_page); while ((len -= l) != 0) { l = min(len, (int)PAGE_CACHE_SIZE); - memmove(kmap(*++dst_page), kmap(*++src_page), l); + memmove(kmap(*++dst_page), + kmap(*++src_page), l); kunmap(*src_page); set_page_dirty(*dst_page); kunmap(*dst_page); @@ -268,7 +271,8 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) do { src_ptr = kmap(*src_page) + src; dst_ptr = kmap(*dst_page) + dst; - if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { + if (PAGE_CACHE_SIZE - src < + PAGE_CACHE_SIZE - dst) { l = PAGE_CACHE_SIZE - src; src = 0; dst += l; @@ -340,7 +344,8 @@ void hfs_bnode_unlink(struct hfs_bnode *node) return; tmp->next = node->next; cnid = cpu_to_be32(tmp->next); - hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); + hfs_bnode_write(tmp, &cnid, + offsetof(struct hfs_bnode_desc, next), 4); hfs_bnode_put(tmp); } else if (node->type == HFS_NODE_LEAF) tree->leaf_head = node->next; @@ -351,7 +356,8 @@ void hfs_bnode_unlink(struct hfs_bnode *node) return; tmp->prev = node->prev; cnid = cpu_to_be32(tmp->prev); - hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); + hfs_bnode_write(tmp, &cnid, + offsetof(struct hfs_bnode_desc, prev), 4); hfs_bnode_put(tmp); } else if (node->type == HFS_NODE_LEAF) tree->leaf_tail = node->prev; @@ -379,7 +385,9 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) struct hfs_bnode *node; if (cnid >= tree->node_count) { - printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); + printk(KERN_ERR "hfs: request for non-existent node " + "%d in B*Tree\n", + cnid); return NULL; } @@ -402,7 +410,9 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) loff_t off; if (cnid >= tree->node_count) { - printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); + printk(KERN_ERR "hfs: request for non-existent node " + "%d in B*Tree\n", + cnid); return NULL; } @@ -429,7 +439,8 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) } else { spin_unlock(&tree->hash_lock); kfree(node); - wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); + wait_event(node2->lock_wq, + !test_bit(HFS_BNODE_NEW, &node2->flags)); return node2; } spin_unlock(&tree->hash_lock); @@ -483,7 +494,8 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) if (node) { hfs_bnode_get(node); spin_unlock(&tree->hash_lock); - wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); + wait_event(node->lock_wq, + !test_bit(HFS_BNODE_NEW, &node->flags)); if (test_bit(HFS_BNODE_ERROR, &node->flags)) goto node_error; return node; @@ -497,7 +509,8 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) if (!test_bit(HFS_BNODE_NEW, &node->flags)) return node; - desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); + desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + + node->page_offset); node->prev = be32_to_cpu(desc->prev); node->next = be32_to_cpu(desc->next); node->num_recs = be16_to_cpu(desc->num_recs); @@ -607,7 +620,8 @@ void hfs_bnode_get(struct hfs_bnode *node) if (node) { atomic_inc(&node->refcnt); dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", - node->tree->cnid, node->this, atomic_read(&node->refcnt)); + node->tree->cnid, node->this, + atomic_read(&node->refcnt)); } } @@ -619,7 +633,8 @@ void hfs_bnode_put(struct hfs_bnode *node) int i; dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", - node->tree->cnid, node->this, atomic_read(&node->refcnt)); + node->tree->cnid, node->this, + atomic_read(&node->refcnt)); BUG_ON(!atomic_read(&node->refcnt)); if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) return; |