diff options
author | Matias Bjørling | 2015-11-02 17:12:27 +0100 |
---|---|---|
committer | Jens Axboe | 2015-11-03 09:53:24 -0700 |
commit | b7ceb7d50048d0dd4830f106f0fb7f5424031598 (patch) | |
tree | 63907b30b02b8ca1cb6c6ea108114a696a6ffff2 /drivers/lightnvm/rrpc.c | |
parent | ca0640850e43f5f80c6029e2895b119b705f23bd (diff) |
lightnvm: refactor phys addrs type to u64
For cases where CONFIG_LBDAF is not set. The struct ppa_addr exceeds its
type on 32 bit architectures. ppa_addr requires a 64bit integer to hold
the generic ppa format. We therefore refactor it to u64 and
replaces the sector_t usages with u64 for physical addresses.
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/rrpc.c')
-rw-r--r-- | drivers/lightnvm/rrpc.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 22fcd629565d..64a888a5e9b3 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -116,15 +116,14 @@ static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) return (rblk->next_page == rrpc->dev->pgs_per_blk); } -static sector_t block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) +static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) { struct nvm_block *blk = rblk->parent; return blk->id * rrpc->dev->pgs_per_blk; } -static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, - sector_t addr) +static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) { struct ppa_addr paddr; @@ -231,7 +230,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) struct page *page; int slot; int nr_pgs_per_blk = rrpc->dev->pgs_per_blk; - sector_t phys_addr; + u64 phys_addr; DECLARE_COMPLETION_ONSTACK(wait); if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) @@ -464,7 +463,7 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) } static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, - struct rrpc_block *rblk, sector_t paddr) + struct rrpc_block *rblk, u64 paddr) { struct rrpc_addr *gp; struct rrpc_rev_addr *rev; @@ -486,9 +485,9 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, return gp; } -static sector_t rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) +static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) { - sector_t addr = ADDR_EMPTY; + u64 addr = ADDR_EMPTY; spin_lock(&rblk->lock); if (block_is_full(rrpc, rblk)) @@ -516,7 +515,7 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, struct rrpc_lun *rlun; struct rrpc_block *rblk; struct nvm_lun *lun; - sector_t paddr; + u64 paddr; rlun = rrpc_get_lun_rr(rrpc, is_gc); lun = rlun->parent; @@ -1144,7 +1143,7 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) struct nvm_dev *dev = rrpc->dev; int offset; struct rrpc_addr *laddr; - sector_t paddr, pladdr; + u64 paddr, pladdr; for (offset = 0; offset < dev->pgs_per_blk; offset++) { paddr = block_to_addr(rrpc, rblk) + offset; |