diff options
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/card/block.c | 82 | ||||
-rw-r--r-- | drivers/mmc/card/mmc_test.c | 811 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 19 | ||||
-rw-r--r-- | drivers/mmc/core/core.c | 346 | ||||
-rw-r--r-- | drivers/mmc/core/core.h | 2 | ||||
-rw-r--r-- | drivers/mmc/core/mmc.c | 47 | ||||
-rw-r--r-- | drivers/mmc/core/sd.c | 82 | ||||
-rw-r--r-- | drivers/mmc/core/sd_ops.c | 48 | ||||
-rw-r--r-- | drivers/mmc/core/sd_ops.h | 1 | ||||
-rw-r--r-- | drivers/mmc/host/mmc_spi.c | 1 | ||||
-rw-r--r-- | drivers/mmc/host/omap_hsmmc.c | 13 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-of-core.c | 8 |
12 files changed, 1447 insertions, 13 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 8433cde29c8b..d545f79f6000 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -247,7 +247,76 @@ static u32 get_card_status(struct mmc_card *card, struct request *req) return cmd.resp[0]; } -static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) +static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + unsigned int from, nr, arg; + int err = 0; + + mmc_claim_host(card->host); + + if (!mmc_can_erase(card)) { + err = -EOPNOTSUPP; + goto out; + } + + from = blk_rq_pos(req); + nr = blk_rq_sectors(req); + + if (mmc_can_trim(card)) + arg = MMC_TRIM_ARG; + else + arg = MMC_ERASE_ARG; + + err = mmc_erase(card, from, nr, arg); +out: + spin_lock_irq(&md->lock); + __blk_end_request(req, err, blk_rq_bytes(req)); + spin_unlock_irq(&md->lock); + + mmc_release_host(card->host); + + return err ? 0 : 1; +} + +static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + unsigned int from, nr, arg; + int err = 0; + + mmc_claim_host(card->host); + + if (!mmc_can_secure_erase_trim(card)) { + err = -EOPNOTSUPP; + goto out; + } + + from = blk_rq_pos(req); + nr = blk_rq_sectors(req); + + if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) + arg = MMC_SECURE_TRIM1_ARG; + else + arg = MMC_SECURE_ERASE_ARG; + + err = mmc_erase(card, from, nr, arg); + if (!err && arg == MMC_SECURE_TRIM1_ARG) + err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); +out: + spin_lock_irq(&md->lock); + __blk_end_request(req, err, blk_rq_bytes(req)); + spin_unlock_irq(&md->lock); + + mmc_release_host(card->host); + + return err ? 0 : 1; +} + +static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; @@ -475,6 +544,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) return 0; } +static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) +{ + if (req->cmd_flags & REQ_DISCARD) { + if (req->cmd_flags & REQ_SECURE) + return mmc_blk_issue_secdiscard_rq(mq, req); + else + return mmc_blk_issue_discard_rq(mq, req); + } else { + return mmc_blk_issue_rw_rq(mq, req); + } +} static inline int mmc_blk_readonly(struct mmc_card *card) { diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 445d7db2277e..5dd8576b5c18 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -16,6 +16,7 @@ #include <linux/slab.h> #include <linux/scatterlist.h> +#include <linux/swap.h> /* For nr_free_buffer_pages() */ #define RESULT_OK 0 #define RESULT_FAIL 1 @@ -25,6 +26,60 @@ #define BUFFER_ORDER 2 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) +/* + * Limit the test area size to the maximum MMC HC erase group size. Note that + * the maximum SD allocation unit size is just 4MiB. + */ +#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) + +/** + * struct mmc_test_pages - pages allocated by 'alloc_pages()'. + * @page: first page in the allocation + * @order: order of the number of pages allocated + */ +struct mmc_test_pages { + struct page *page; + unsigned int order; +}; + +/** + * struct mmc_test_mem - allocated memory. + * @arr: array of allocations + * @cnt: number of allocations + */ +struct mmc_test_mem { + struct mmc_test_pages *arr; + unsigned int cnt; +}; + +/** + * struct mmc_test_area - information for performance tests. + * @max_sz: test area size (in bytes) + * @dev_addr: address on card at which to do performance tests + * @max_segs: maximum segments in scatterlist @sg + * @blocks: number of (512 byte) blocks currently mapped by @sg + * @sg_len: length of currently mapped scatterlist @sg + * @mem: allocated memory + * @sg: scatterlist + */ +struct mmc_test_area { + unsigned long max_sz; + unsigned int dev_addr; + unsigned int max_segs; + unsigned int blocks; + unsigned int sg_len; + struct mmc_test_mem *mem; + struct scatterlist *sg; +}; + +/** + * struct mmc_test_card - test information. + * @card: card under test + * @scratch: transfer buffer + * @buffer: transfer buffer + * @highmem: buffer for highmem tests + * @area: information for performance tests + */ struct mmc_test_card { struct mmc_card *card; @@ -33,6 +88,7 @@ struct mmc_test_card { #ifdef CONFIG_HIGHMEM struct page *highmem; #endif + struct mmc_test_area area; }; /*******************************************************************/ @@ -97,6 +153,12 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, mmc_set_data_timeout(mrq->data, test->card); } +static int mmc_test_busy(struct mmc_command *cmd) +{ + return !(cmd->resp[0] & R1_READY_FOR_DATA) || + (R1_CURRENT_STATE(cmd->resp[0]) == 7); +} + /* * Wait for the card to finish the busy state */ @@ -117,13 +179,13 @@ static int mmc_test_wait_busy(struct mmc_test_card *test) if (ret) break; - if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { + if (!busy && mmc_test_busy(&cmd)) { busy = 1; printk(KERN_INFO "%s: Warning: Host did not " "wait for busy state to end.\n", mmc_hostname(test->card->host)); } - } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); + } while (mmc_test_busy(&cmd)); return ret; } @@ -170,6 +232,248 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test, return 0; } +static void mmc_test_free_mem(struct mmc_test_mem *mem) +{ + if (!mem) + return; + while (mem->cnt--) + __free_pages(mem->arr[mem->cnt].page, + mem->arr[mem->cnt].order); + kfree(mem->arr); + kfree(mem); +} + +/* + * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case + * there isn't much memory do not exceed 1/16th total lowmem pages. + */ +static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, + unsigned long max_sz) +{ + unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); + unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); + unsigned long page_cnt = 0; + unsigned long limit = nr_free_buffer_pages() >> 4; + struct mmc_test_mem *mem; + + if (max_page_cnt > limit) + max_page_cnt = limit; + if (max_page_cnt < min_page_cnt) + max_page_cnt = min_page_cnt; + + mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL); + if (!mem) + return NULL; + + mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt, + GFP_KERNEL); + if (!mem->arr) + goto out_free; + + while (max_page_cnt) { + struct page *page; + unsigned int order; + gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | + __GFP_NORETRY; + + order = get_order(max_page_cnt << PAGE_SHIFT); + while (1) { + page = alloc_pages(flags, order); + if (page || !order) + break; + order -= 1; + } + if (!page) { + if (page_cnt < min_page_cnt) + goto out_free; + break; + } + mem->arr[mem->cnt].page = page; + mem->arr[mem->cnt].order = order; + mem->cnt += 1; + if (max_page_cnt <= (1UL << order)) + break; + max_page_cnt -= 1UL << order; + page_cnt += 1UL << order; + } + + return mem; + +out_free: + mmc_test_free_mem(mem); + return NULL; +} + +/* + * Map memory into a scatterlist. Optionally allow the same memory to be + * mapped more than once. + */ +static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, + struct scatterlist *sglist, int repeat, + unsigned int max_segs, unsigned int *sg_len) +{ + struct scatterlist *sg = NULL; + unsigned int i; + + sg_init_table(sglist, max_segs); + + *sg_len = 0; + do { + for (i = 0; i < mem->cnt; i++) { + unsigned long len = PAGE_SIZE << mem->arr[i].order; + + if (sz < len) + len = sz; + if (sg) + sg = sg_next(sg); + else + sg = sglist; + if (!sg) + return -EINVAL; + sg_set_page(sg, mem->arr[i].page, len, 0); + sz -= len; + *sg_len += 1; + if (!sz) + break; + } + } while (sz && repeat); + + if (sz) + return -EINVAL; + + if (sg) + sg_mark_end(sg); + + return 0; +} + +/* + * Map memory into a scatterlist so that no pages are contiguous. Allow the + * same memory to be mapped more than once. + */ +static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, + unsigned long sz, + struct scatterlist *sglist, + unsigned int max_segs, + unsigned int *sg_len) +{ + struct scatterlist *sg = NULL; + unsigned int i = mem->cnt, cnt; + unsigned long len; + void *base, *addr, *last_addr = NULL; + + sg_init_table(sglist, max_segs); + + *sg_len = 0; + while (sz && i) { + base = page_address(mem->arr[--i].page); + cnt = 1 << mem->arr[i].order; + while (sz && cnt) { + addr = base + PAGE_SIZE * --cnt; + if (last_addr && last_addr + PAGE_SIZE == addr) + continue; + last_addr = addr; + len = PAGE_SIZE; + if (sz < len) + len = sz; + if (sg) + sg = sg_next(sg); + else + sg = sglist; + if (!sg) + return -EINVAL; + sg_set_page(sg, virt_to_page(addr), len, 0); + sz -= len; + *sg_len += 1; + } + } + + if (sg) + sg_mark_end(sg); + + return 0; +} + +/* + * Calculate transfer rate in bytes per second. + */ +static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) +{ + uint64_t ns; + + ns = ts->tv_sec; + ns *= 1000000000; + ns += ts->tv_nsec; + + bytes *= 1000000000; + + while (ns > UINT_MAX) { + bytes >>= 1; + ns >>= 1; + } + + if (!ns) + return 0; + + do_div(bytes, (uint32_t)ns); + + return bytes; +} + +/* + * Print the transfer rate. + */ +static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, + struct timespec *ts1, struct timespec *ts2) +{ + unsigned int rate, sectors = bytes >> 9; + struct timespec ts; + + ts = timespec_sub(*ts2, *ts1); + + rate = mmc_test_rate(bytes, &ts); + + printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " + "seconds (%u kB/s, %u KiB/s)\n", + mmc_hostname(test->card->host), sectors, sectors >> 1, + (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec, + (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024); +} + +/* + * Print the average transfer rate. + */ +static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, + unsigned int count, struct timespec *ts1, + struct timespec *ts2) +{ + unsigned int rate, sectors = bytes >> 9; + uint64_t tot = bytes * count; + struct timespec ts; + + ts = timespec_sub(*ts2, *ts1); + + rate = mmc_test_rate(tot, &ts); + + printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " + "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n", + mmc_hostname(test->card->host), count, sectors, count, + sectors >> 1, (sectors == 1 ? ".5" : ""), + (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, + rate / 1000, rate / 1024); +} + +/* + * Return the card size in sectors. + */ +static unsigned int mmc_test_capacity(struct mmc_card *card) +{ + if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) + return card->ext_csd.sectors; + else + return card->csd.capacity << (card->csd.read_blkbits - 9); +} + /*******************************************************************/ /* Test preparation and cleanup */ /*******************************************************************/ @@ -893,8 +1197,419 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test) return 0; } +#else + +static int mmc_test_no_highmem(struct mmc_test_card *test) +{ + printk(KERN_INFO "%s: Highmem not configured - test skipped\n", + mmc_hostname(test->card->host)); + return 0; +} + #endif /* CONFIG_HIGHMEM */ +/* + * Map sz bytes so that it can be transferred. + */ +static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, + int max_scatter) +{ + struct mmc_test_area *t = &test->area; + + t->blocks = sz >> 9; + + if (max_scatter) { + return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, + t->max_segs, &t->sg_len); + } else { + return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, + &t->sg_len); + } +} + +/* + * Transfer bytes mapped by mmc_test_area_map(). + */ +static int mmc_test_area_transfer(struct mmc_test_card *test, + unsigned int dev_addr, int write) +{ + struct mmc_test_area *t = &test->area; + + return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, + t->blocks, 512, write); +} + +/* + * Map and transfer bytes. + */ +static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, + unsigned int dev_addr, int write, int max_scatter, + int timed) +{ + struct timespec ts1, ts2; + int ret; + + ret = mmc_test_area_map(test, sz, max_scatter); + if (ret) + return ret; + + if (timed) + getnstimeofday(&ts1); + + ret = mmc_test_area_transfer(test, dev_addr, write); + if (ret) + return ret; + + if (timed) + getnstimeofday(&ts2); + + if (timed) + mmc_test_print_rate(test, sz, &ts1, &ts2); + + return 0; +} + +/* + * Write the test area entirely. + */ +static int mmc_test_area_fill(struct mmc_test_card *test) +{ + return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, + 1, 0, 0); +} + +/* + * Erase the test area entirely. + */ +static int mmc_test_area_erase(struct mmc_test_card *test) +{ + struct mmc_test_area *t = &test->area; + + if (!mmc_can_erase(test->card)) + return 0; + + return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9, + MMC_ERASE_ARG); +} + +/* + * Cleanup struct mmc_test_area. + */ +static int mmc_test_area_cleanup(struct mmc_test_card *test) +{ + struct mmc_test_area *t = &test->area; + + kfree(t->sg); + mmc_test_free_mem(t->mem); + + return 0; +} + +/* + * Initialize an area for testing large transfers. The size of the area is the + * preferred erase size which is a good size for optimal transfer speed. Note + * that is typically 4MiB for modern cards. The test area is set to the middle + * of the card because cards may have different charateristics at the front + * (for FAT file system optimization). Optionally, the area is erased (if the + * card supports it) which may improve write performance. Optionally, the area + * is filled with data for subsequent read tests. + */ +static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) +{ + struct mmc_test_area *t = &test->area; + unsigned long min_sz = 64 * 1024; + int ret; + + ret = mmc_test_set_blksize(test, 512); + if (ret) + return ret; + + if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) + t->max_sz = TEST_AREA_MAX_SIZE; + else + t->max_sz = (unsigned long)test->card->pref_erase << 9; + /* + * Try to allocate enough memory for the whole area. Less is OK + * because the same memory can be mapped into the scatterlist more than + * once. + */ + t->mem = mmc_test_alloc_mem(min_sz, t->max_sz); + if (!t->mem) + return -ENOMEM; + + t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE); + t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL); + if (!t->sg) { + ret = -ENOMEM; + goto out_free; + } + + t->dev_addr = mmc_test_capacity(test->card) / 2; + t->dev_addr -= t->dev_addr % (t->max_sz >> 9); + + if (erase) { + ret = mmc_test_area_erase(test); + if (ret) + goto out_free; + } + + if (fill) { + ret = mmc_test_area_fill(test); + if (ret) + goto out_free; + } + + return 0; + +out_free: + mmc_test_area_cleanup(test); + return ret; +} + +/* + * Prepare for large transfers. Do not erase the test area. + */ +static int mmc_test_area_prepare(struct mmc_test_card *test) +{ + return mmc_test_area_init(test, 0, 0); +} + +/* + * Prepare for large transfers. Do erase the test area. + */ +static int mmc_test_area_prepare_erase(struct mmc_test_card *test) +{ + return mmc_test_area_init(test, 1, 0); +} + +/* + * Prepare for large transfers. Erase and fill the test area. + */ +static int mmc_test_area_prepare_fill(struct mmc_test_card *test) +{ + return mmc_test_area_init(test, 1, 1); +} + +/* + * Test best-case performance. Best-case performance is expected from + * a single large transfer. + * + * An additional option (max_scatter) allows the measurement of the same + * transfer but with no contiguous pages in the scatter list. This tests + * the efficiency of DMA to handle scattered pages. + */ +static int mmc_test_best_performance(struct mmc_test_card *test, int write, + int max_scatter) +{ + return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, + write, max_scatter, 1); +} + +/* + * Best-case read performance. + */ +static int mmc_test_best_read_performance(struct mmc_test_card *test) +{ + return mmc_test_best_performance(test, 0, 0); +} + +/* + * Best-case write performance. + */ +static int mmc_test_best_write_performance(struct mmc_test_card *test) +{ + return mmc_test_best_performance(test, 1, 0); +} + +/* + * Best-case read performance into scattered pages. + */ +static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) +{ + return mmc_test_best_performance(test, 0, 1); +} + +/* + * Best-case write performance from scattered pages. + */ +static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) +{ + return mmc_test_best_performance(test, 1, 1); +} + +/* + * Single read performance by transfer size. + */ +static int mmc_test_profile_read_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr; + int ret; + + for (sz = 512; sz < test->area.max_sz; sz <<= 1) { + dev_addr = test->area.dev_addr + (sz >> 9); + ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); + if (ret) + return ret; + } + dev_addr = test->area.dev_addr; + return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); +} + +/* + * Single write performance by transfer size. + */ +static int mmc_test_profile_write_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr; + int ret; + + ret = mmc_test_area_erase(test); + if (ret) + return ret; + for (sz = 512; sz < test->area.max_sz; sz <<= 1) { + dev_addr = test->area.dev_addr + (sz >> 9); + ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); + if (ret) + return ret; + } + ret = mmc_test_area_erase(test); + if (ret) + return ret; + dev_addr = test->area.dev_addr; + return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); +} + +/* + * Single trim performance by transfer size. + */ +static int mmc_test_profile_trim_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr; + struct timespec ts1, ts2; + int ret; + + if (!mmc_can_trim(test->card)) + return RESULT_UNSUP_CARD; + + if (!mmc_can_erase(test->card)) + return RESULT_UNSUP_HOST; + + for (sz = 512; sz < test->area.max_sz; sz <<= 1) { + dev_addr = test->area.dev_addr + (sz >> 9); + getnstimeofday(&ts1); + ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); + if (ret) + return ret; + getnstimeofday(&ts2); + mmc_test_print_rate(test, sz, &ts1, &ts2); + } + dev_addr = test->area.dev_addr; + getnstimeofday(&ts1); + ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); + if (ret) + return ret; + getnstimeofday(&ts2); + mmc_test_print_rate(test, sz, &ts1, &ts2); + return 0; +} + +/* + * Consecutive read performance by transfer size. + */ +static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr, i, cnt; + struct timespec ts1, ts2; + int ret; + + for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { + cnt = test->area.max_sz / sz; + dev_addr = test->area.dev_addr; + getnstimeofday(&ts1); + for (i = 0; i < cnt; i++) { + ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); + if (ret) + return ret; + dev_addr += (sz >> 9); + } + getnstimeofday(&ts2); + mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + } + return 0; +} + +/* + * Consecutive write performance by transfer size. + */ +static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr, i, cnt; + struct timespec ts1, ts2; + int ret; + + for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { + ret = mmc_test_area_erase(test); + if (ret) + return ret; + cnt = test->area.max_sz / sz; + dev_addr = test->area.dev_addr; + getnstimeofday(&ts1); + for (i = 0; i < cnt; i++) { + ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); + if (ret) + return ret; + dev_addr += (sz >> 9); + } + getnstimeofday(&ts2); + mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + } + return 0; +} + +/* + * Consecutive trim performance by transfer size. + */ +static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr, i, cnt; + struct timespec ts1, ts2; + int ret; + + if (!mmc_can_trim(test->card)) + return RESULT_UNSUP_CARD; + + if (!mmc_can_erase(test->card)) + return RESULT_UNSUP_HOST; + + for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { + ret = mmc_test_area_erase(test); + if (ret) + return ret; + ret = mmc_test_area_fill(test); + if (ret) + return ret; + cnt = test->area.max_sz / sz; + dev_addr = test->area.dev_addr; + getnstimeofday(&ts1); + for (i = 0; i < cnt; i++) { + ret = mmc_erase(test->card, dev_addr, sz >> 9, + MMC_TRIM_ARG); + if (ret) + return ret; + dev_addr += (sz >> 9); + } + getnstimeofday(&ts2); + mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + } + return 0; +} + static const struct mmc_test_case mmc_test_cases[] = { { .name = "Basic write (no data verification)", @@ -1040,8 +1755,100 @@ static const struct mmc_test_case mmc_test_cases[] = { .cleanup = mmc_test_cleanup, }, +#else + + { + .name = "Highmem write", + .run = mmc_test_no_highmem, + }, + + { + .name = "Highmem read", + .run = mmc_test_no_highmem, + }, + + { + .name = "Multi-block highmem write", + .run = mmc_test_no_highmem, + }, + + { + .name = "Multi-block highmem read", + .run = mmc_test_no_highmem, + }, + #endif /* CONFIG_HIGHMEM */ + { + .name = "Best-case read performance", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_best_read_performance, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Best-case write performance", + .prepare = mmc_test_area_prepare_erase, + .run = mmc_test_best_write_performance, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Best-case read performance into scattered pages", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_best_read_perf_max_scatter, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Best-case write performance from scattered pages", + .prepare = mmc_test_area_prepare_erase, + .run = mmc_test_best_write_perf_max_scatter, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Single read performance by transfer size", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_profile_read_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Single write performance by transfer size", + .prepare = mmc_test_area_prepare, + .run = mmc_test_profile_write_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Single trim performance by transfer size", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_profile_trim_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Consecutive read performance by transfer size", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_profile_seq_read_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Consecutive write performance by transfer size", + .prepare = mmc_test_area_prepare, + .run = mmc_test_profile_seq_write_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Consecutive trim performance by transfer size", + .prepare = mmc_test_area_prepare, + .run = mmc_test_profile_seq_trim_perf, + .cleanup = mmc_test_area_cleanup, + }, + }; static DEFINE_MUTEX(mmc_test_lock); diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index c77eb49eda0e..e876678176be 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -30,9 +30,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) { /* - * We only like normal block requests. + * We only like normal block requests and discards. */ - if (req->cmd_type != REQ_TYPE_FS) { + if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } @@ -130,6 +130,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); + if (mmc_can_erase(card)) { + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); + mq->queue->limits.max_discard_sectors = UINT_MAX; + if (card->erased_byte == 0) + mq->queue->limits.discard_zeroes_data = 1; + if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { + mq->queue->limits.discard_granularity = + card->erase_size << 9; + mq->queue->limits.discard_alignment = + card->erase_size << 9; + } + if (mmc_can_secure_erase_trim(card)) + queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, + mq->queue); + } #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_hw_segs == 1) { diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 83240faa1dc8..5db49b124ffa 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1050,6 +1050,352 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay) EXPORT_SYMBOL(mmc_detect_change); +void mmc_init_erase(struct mmc_card *card) +{ + unsigned int sz; + + if (is_power_of_2(card->erase_size)) + card->erase_shift = ffs(card->erase_size) - 1; + else + card->erase_shift = 0; + + /* + * It is possible to erase an arbitrarily large area of an SD or MMC + * card. That is not desirable because it can take a long time + * (minutes) potentially delaying more important I/O, and also the + * timeout calculations become increasingly hugely over-estimated. + * Consequently, 'pref_erase' is defined as a guide to limit erases + * to that size and alignment. + * + * For SD cards that define Allocation Unit size, limit erases to one + * Allocation Unit at a time. For MMC cards that define High Capacity + * Erase Size, whether it is switched on or not, limit to that size. + * Otherwise just have a stab at a good value. For modern cards it + * will end up being 4MiB. Note that if the value is too small, it + * can end up taking longer to erase. + */ + if (mmc_card_sd(card) && card->ssr.au) { + card->pref_erase = card->ssr.au; + card->erase_shift = ffs(card->ssr.au) - 1; + } else if (card->ext_csd.hc_erase_size) { + card->pref_erase = card->ext_csd.hc_erase_size; + } else { + sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; + if (sz < 128) + card->pref_erase = 512 * 1024 / 512; + else if (sz < 512) + card->pref_erase = 1024 * 1024 / 512; + else if (sz < 1024) + card->pref_erase = 2 * 1024 * 1024 / 512; + else + card->pref_erase = 4 * 1024 * 1024 / 512; + if (card->pref_erase < card->erase_size) + card->pref_erase = card->erase_size; + else { + sz = card->pref_erase % card->erase_size; + if (sz) + card->pref_erase += card->erase_size - sz; + } + } +} + +static void mmc_set_mmc_erase_timeout(struct mmc_card *card, + struct mmc_command *cmd, + unsigned int arg, unsigned int qty) +{ + unsigned int erase_timeout; + + if (card->ext_csd.erase_group_def & 1) { + /* High Capacity Erase Group Size uses HC timeouts */ + if (arg == MMC_TRIM_ARG) + erase_timeout = card->ext_csd.trim_timeout; + else + erase_timeout = card->ext_csd.hc_erase_timeout; + } else { + /* CSD Erase Group Size uses write timeout */ + unsigned int mult = (10 << card->csd.r2w_factor); + unsigned int timeout_clks = card->csd.tacc_clks * mult; + unsigned int timeout_us; + + /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ + if (card->csd.tacc_ns < 1000000) + timeout_us = (card->csd.tacc_ns * mult) / 1000; + else + timeout_us = (card->csd.tacc_ns / 1000) * mult; + + /* + * ios.clock is only a target. The real clock rate might be + * less but not that much less, so fudge it by multiplying by 2. + */ + timeout_clks <<= 1; + timeout_us += (timeout_clks * 1000) / + (card->host->ios.clock / 1000); + + erase_timeout = timeout_us / 1000; + + /* + * Theoretically, the calculation could underflow so round up + * to 1ms in that case. + */ + if (!erase_timeout) + erase_timeout = 1; + } + + /* Multiplier for secure operations */ + if (arg & MMC_SECURE_ARGS) { + if (arg == MMC_SECURE_ERASE_ARG) + erase_timeout *= card->ext_csd.sec_erase_mult; + else + erase_timeout *= card->ext_csd.sec_trim_mult; + } + + erase_timeout *= qty; + + /* + * Ensure at least a 1 second timeout for SPI as per + * 'mmc_set_data_timeout()' + */ + if (mmc_host_is_spi(card->host) && erase_timeout < 1000) + erase_timeout = 1000; + + cmd->erase_timeout = erase_timeout; +} + +static void mmc_set_sd_erase_timeout(struct mmc_card *card, + struct mmc_command *cmd, unsigned int arg, + unsigned int qty) +{ + if (card->ssr.erase_timeout) { + /* Erase timeout specified in SD Status Register (SSR) */ + cmd->erase_timeout = card->ssr.erase_timeout * qty + + card->ssr.erase_offset; + } else { + /* + * Erase timeout not specified in SD Status Register (SSR) so + * use 250ms per write block. + */ + cmd->erase_timeout = 250 * qty; + } + + /* Must not be less than 1 second */ + if (cmd->erase_timeout < 1000) + cmd->erase_timeout = 1000; +} + +static void mmc_set_erase_timeout(struct mmc_card *card, + struct mmc_command *cmd, unsigned int arg, + unsigned int qty) +{ + if (mmc_card_sd(card)) + mmc_set_sd_erase_timeout(card, cmd, arg, qty); + else + mmc_set_mmc_erase_timeout(card, cmd, arg, qty); +} + +static int mmc_do_erase(struct mmc_card *card, unsigned int from, + unsigned int to, unsigned int arg) +{ + struct mmc_command cmd; + unsigned int qty = 0; + int err; + + /* + * qty is used to calculate the erase timeout which depends on how many + * erase groups (or allocation units in SD terminology) are affected. + * We count erasing part of an erase group as one erase group. + * For SD, the allocation units are always a power of 2. For MMC, the + * erase group size is almost certainly also power of 2, but it does not + * seem to insist on that in the JEDEC standard, so we fall back to + * division in that case. SD may not specify an allocation unit size, + * in which case the timeout is based on the number of write blocks. + * + * Note that the timeout for secure trim 2 will only be correct if the + * number of erase groups specified is the same as the total of all + * preceding secure trim 1 commands. Since the power may have been + * lost since the secure trim 1 commands occurred, it is generally + * impossible to calculate the secure trim 2 timeout correctly. + */ + if (card->erase_shift) + qty += ((to >> card->erase_shift) - + (from >> card->erase_shift)) + 1; + else if (mmc_card_sd(card)) + qty += to - from + 1; + else + qty += ((to / card->erase_size) - + (from / card->erase_size)) + 1; + + if (!mmc_card_blockaddr(card)) { + from <<= 9; + to <<= 9; + } + + memset(&cmd, 0, sizeof(struct mmc_command)); + if (mmc_card_sd(card)) + cmd.opcode = SD_ERASE_WR_BLK_START; + else + cmd.opcode = MMC_ERASE_GROUP_START; + cmd.arg = from; + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + err = mmc_wait_for_cmd(card->host, &cmd, 0); + if (err) { + printk(KERN_ERR "mmc_erase: group start error %d, " + "status %#x\n", err, cmd.resp[0]); + err = -EINVAL; + goto out; + } + + memset(&cmd, 0, sizeof(struct mmc_command)); + if (mmc_card_sd(card)) + cmd.opcode = SD_ERASE_WR_BLK_END; + else + cmd.opcode = MMC_ERASE_GROUP_END; + cmd.arg = to; + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + err = mmc_wait_for_cmd(card->host, &cmd, 0); + if (err) { + printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", + err, cmd.resp[0]); + err = -EINVAL; + goto out; + } + + memset(&cmd, 0, sizeof(struct mmc_command)); + cmd.opcode = MMC_ERASE; + cmd.arg = arg; + cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + mmc_set_erase_timeout(card, &cmd, arg, qty); + err = mmc_wait_for_cmd(card->host, &cmd, 0); + if (err) { + printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", + err, cmd.resp[0]); + err = -EIO; + goto out; + } + + if (mmc_host_is_spi(card->host)) + goto out; + + do { + memset(&cmd, 0, sizeof(struct mmc_command)); + cmd.opcode = MMC_SEND_STATUS; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + /* Do not retry else we can't see errors */ + err = mmc_wait_for_cmd(card->host, &cmd, 0); + if (err || (cmd.resp[0] & 0xFDF92000)) { + printk(KERN_ERR "error %d requesting status %#x\n", + err, cmd.resp[0]); + err = -EIO; + goto out; + } + } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || + R1_CURRENT_STATE(cmd.resp[0]) == 7); +out: + return err; +} + +/** + * mmc_erase - erase sectors. + * @card: card to erase + * @from: first sector to erase + * @nr: number of sectors to erase + * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) + * + * Caller must claim host before calling this function. + */ +int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, + unsigned int arg) +{ + unsigned int rem, to = from + nr; + + if (!(card->host->caps & MMC_CAP_ERASE) || + !(card->csd.cmdclass & CCC_ERASE)) + return -EOPNOTSUPP; + + if (!card->erase_size) + return -EOPNOTSUPP; + + if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) + return -EOPNOTSUPP; + + if ((arg & MMC_SECURE_ARGS) && + !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) + return -EOPNOTSUPP; + + if ((arg & MMC_TRIM_ARGS) && + !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) + return -EOPNOTSUPP; + + if (arg == MMC_SECURE_ERASE_ARG) { + if (from % card->erase_size || nr % card->erase_size) + return -EINVAL; + } + + if (arg == MMC_ERASE_ARG) { + rem = from % card->erase_size; + if (rem) { + rem = card->erase_size - rem; + from += rem; + if (nr > rem) + nr -= rem; + else + return 0; + } + rem = nr % card->erase_size; + if (rem) + nr -= rem; + } + + if (nr == 0) + return 0; + + to = from + nr; + + if (to <= from) + return -EINVAL; + + /* 'from' and 'to' are inclusive */ + to -= 1; + + return mmc_do_erase(card, from, to, arg); +} +EXPORT_SYMBOL(mmc_erase); + +int mmc_can_erase(struct mmc_card *card) +{ + if ((card->host->caps & MMC_CAP_ERASE) && + (card->csd.cmdclass & CCC_ERASE) && card->erase_size) + return 1; + return 0; +} +EXPORT_SYMBOL(mmc_can_erase); + +int mmc_can_trim(struct mmc_card *card) +{ + if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) + return 1; + return 0; +} +EXPORT_SYMBOL(mmc_can_trim); + +int mmc_can_secure_erase_trim(struct mmc_card *card) +{ + if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) + return 1; + return 0; +} +EXPORT_SYMBOL(mmc_can_secure_erase_trim); + +int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, + unsigned int nr) +{ + if (!card->erase_size) + return 0; + if (from % card->erase_size || nr % card->erase_size) + return 0; + return 1; +} +EXPORT_SYMBOL(mmc_erase_group_aligned); void mmc_rescan(struct work_struct *work) { diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index a811c52a1659..9d9eef50e5d1 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -29,6 +29,8 @@ struct mmc_bus_ops { void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); void mmc_detach_bus(struct mmc_host *host); +void mmc_init_erase(struct mmc_card *card); + void mmc_set_chip_select(struct mmc_host *host, int mode); void mmc_set_clock(struct mmc_host *host, unsigned int hz); void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index ccba3869c029..6909a54c39be 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -108,13 +108,23 @@ static int mmc_decode_cid(struct mmc_card *card) return 0; } +static void mmc_set_erase_size(struct mmc_card *card) +{ + if (card->ext_csd.erase_group_def & 1) + card->erase_size = card->ext_csd.hc_erase_size; + else + card->erase_size = card->csd.erase_size; + + mmc_init_erase(card); +} + /* * Given a 128-bit response, decode to our card CSD structure. */ static int mmc_decode_csd(struct mmc_card *card) { struct mmc_csd *csd = &card->csd; - unsigned int e, m; + unsigned int e, m, a, b; u32 *resp = card->raw_csd; /* @@ -152,6 +162,13 @@ static int mmc_decode_csd(struct mmc_card *card) csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd->write_partial = UNSTUFF_BITS(resp, 21, 1); + if (csd->write_blkbits >= 9) { + a = UNSTUFF_BITS(resp, 42, 5); + b = UNSTUFF_BITS(resp, 37, 5); + csd->erase_size = (a + 1) * (b + 1); + csd->erase_size <<= csd->write_blkbits - 9; + } + return 0; } @@ -261,8 +278,30 @@ static int mmc_read_ext_csd(struct mmc_card *card) if (sa_shift > 0 && sa_shift <= 0x17) card->ext_csd.sa_timeout = 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; + card->ext_csd.erase_group_def = + ext_csd[EXT_CSD_ERASE_GROUP_DEF]; + card->ext_csd.hc_erase_timeout = 300 * + ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; + card->ext_csd.hc_erase_size = + ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; + } + + if (card->ext_csd.rev >= 4) { + card->ext_csd.sec_trim_mult = + ext_csd[EXT_CSD_SEC_TRIM_MULT]; + card->ext_csd.sec_erase_mult = + ext_csd[EXT_CSD_SEC_ERASE_MULT]; + card->ext_csd.sec_feature_support = + ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; + card->ext_csd.trim_timeout = 300 * + ext_csd[EXT_CSD_TRIM_MULT]; } + if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) + card->erased_byte = 0xFF; + else + card->erased_byte = 0x0; + out: kfree(ext_csd); @@ -274,6 +313,8 @@ MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], card->raw_csd[2], card->raw_csd[3]); MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); +MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); +MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); @@ -285,6 +326,8 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_cid.attr, &dev_attr_csd.attr, &dev_attr_date.attr, + &dev_attr_erase_size.attr, + &dev_attr_preferred_erase_size.attr, &dev_attr_fwrev.attr, &dev_attr_hwrev.attr, &dev_attr_manfid.attr, @@ -421,6 +464,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, err = mmc_read_ext_csd(card); if (err) goto free_card; + /* Erase size depends on CSD and Extended CSD */ + mmc_set_erase_size(card); } /* diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index e6d7d9fab446..0f5241085557 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -119,6 +119,13 @@ static int mmc_decode_csd(struct mmc_card *card) csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd->write_partial = UNSTUFF_BITS(resp, 21, 1); + + if (UNSTUFF_BITS(resp, 46, 1)) { + csd->erase_size = 1; + } else if (csd->write_blkbits >= 9) { + csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1; + csd->erase_size <<= csd->write_blkbits - 9; + } break; case 1: /* @@ -147,6 +154,7 @@ static int mmc_decode_csd(struct mmc_card *card) csd->r2w_factor = 4; /* Unused */ csd->write_blkbits = 9; csd->write_partial = 0; + csd->erase_size = 1; break; default: printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", @@ -154,6 +162,8 @@ static int mmc_decode_csd(struct mmc_card *card) return -EINVAL; } + card->erase_size = csd->erase_size; + return 0; } @@ -179,10 +189,68 @@ static int mmc_decode_scr(struct mmc_card *card) scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); + if (UNSTUFF_BITS(resp, 55, 1)) + card->erased_byte = 0xFF; + else + card->erased_byte = 0x0; + return 0; } /* + * Fetch and process SD Status register. + */ +static int mmc_read_ssr(struct mmc_card *card) +{ + unsigned int au, es, et, eo; + int err, i; + u32 *ssr; + + if (!(card->csd.cmdclass & CCC_APP_SPEC)) { + printk(KERN_WARNING "%s: card lacks mandatory SD Status " + "function.\n", mmc_hostname(card->host)); + return 0; + } + + ssr = kmalloc(64, GFP_KERNEL); + if (!ssr) + return -ENOMEM; + + err = mmc_app_sd_status(card, ssr); + if (err) { + printk(KERN_WARNING "%s: problem reading SD Status " + "register.\n", mmc_hostname(card->host)); + err = 0; + goto out; + } + + for (i = 0; i < 16; i++) + ssr[i] = be32_to_cpu(ssr[i]); + + /* + * UNSTUFF_BITS only works with four u32s so we have to offset the + * bitfield positions accordingly. + */ + au = UNSTUFF_BITS(ssr, 428 - 384, 4); + if (au > 0 || au <= 9) { + card->ssr.au = 1 << (au + 4); + es = UNSTUFF_BITS(ssr, 408 - 384, 16); + et = UNSTUFF_BITS(ssr, 402 - 384, 6); + eo = UNSTUFF_BITS(ssr, 400 - 384, 2); + if (es && et) { + card->ssr.erase_timeout = (et * 1000) / es; + card->ssr.erase_offset = eo * 1000; + } + } else { + printk(KERN_WARNING "%s: SD Status: Invalid Allocation Unit " + "size.\n", mmc_hostname(card->host)); + } +out: + kfree(ssr); + return err; +} + +/* * Fetches and decodes switch information */ static int mmc_read_switch(struct mmc_card *card) @@ -289,6 +357,8 @@ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], card->raw_csd[2], card->raw_csd[3]); MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]); MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); +MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); +MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); @@ -302,6 +372,8 @@ static struct attribute *sd_std_attrs[] = { &dev_attr_csd.attr, &dev_attr_scr.attr, &dev_attr_date.attr, + &dev_attr_erase_size.attr, + &dev_attr_preferred_erase_size.attr, &dev_attr_fwrev.attr, &dev_attr_hwrev.attr, &dev_attr_manfid.attr, @@ -397,6 +469,16 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, return err; /* + * Fetch and process SD Status register. + */ + err = mmc_read_ssr(card); + if (err) + return err; + + /* Erase init depends on CSD and SSR */ + mmc_init_erase(card); + + /* * Fetch switch information from card. */ err = mmc_read_switch(card); diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 63772e7e7608..797cdb5887fd 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -346,3 +346,51 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group, return 0; } +int mmc_app_sd_status(struct mmc_card *card, void *ssr) +{ + int err; + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_data data; + struct scatterlist sg; + + BUG_ON(!card); + BUG_ON(!card->host); + BUG_ON(!ssr); + + /* NOTE: caller guarantees ssr is heap-allocated */ + + err = mmc_app_cmd(card->host, card); + if (err) + return err; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + + mrq.cmd = &cmd; + mrq.data = &data; + + cmd.opcode = SD_APP_SD_STATUS; + cmd.arg = 0; + cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_ADTC; + + data.blksz = 64; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, ssr, 64); + + mmc_set_data_timeout(&data, card); + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error) + return cmd.error; + if (data.error) + return data.error; + + return 0; +} diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h index 9742d8a30664..ffc2305d905f 100644 --- a/drivers/mmc/core/sd_ops.h +++ b/drivers/mmc/core/sd_ops.h @@ -19,6 +19,7 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca); int mmc_app_send_scr(struct mmc_card *card, u32 *scr); int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp); +int mmc_app_sd_status(struct mmc_card *card, void *ssr); #endif diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 7b0f3ef50f96..1145ea0792e6 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1536,6 +1536,7 @@ static int __devexit mmc_spi_remove(struct spi_device *spi) #if defined(CONFIG_OF) static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { { .compatible = "mmc-spi-slot", }, + {}, }; #endif diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index dc57ef6aef4f..4a8776f8afdd 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -28,6 +28,7 @@ #include <linux/clk.h> #include <linux/mmc/host.h> #include <linux/mmc/core.h> +#include <linux/mmc/mmc.h> #include <linux/io.h> #include <linux/semaphore.h> #include <linux/gpio.h> @@ -78,6 +79,7 @@ #define INT_EN_MASK 0x307F0033 #define BWR_ENABLE (1 << 4) #define BRR_ENABLE (1 << 5) +#define DTO_ENABLE (1 << 20) #define INIT_STREAM (1 << 1) #define DP_SELECT (1 << 21) #define DDIR (1 << 4) @@ -523,7 +525,8 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); } -static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host) +static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, + struct mmc_command *cmd) { unsigned int irq_mask; @@ -532,6 +535,10 @@ static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host) else irq_mask = INT_EN_MASK; + /* Disable timeout for erases */ + if (cmd->opcode == MMC_ERASE) + irq_mask &= ~DTO_ENABLE; + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); OMAP_HSMMC_WRITE(host->base, IE, irq_mask); @@ -782,7 +789,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, mmc_hostname(host->mmc), cmd->opcode, cmd->arg); host->cmd = cmd; - omap_hsmmc_enable_irq(host); + omap_hsmmc_enable_irq(host, cmd); host->response_busy = 0; if (cmd->flags & MMC_RSP_PRESENT) { @@ -2107,7 +2114,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) mmc->max_seg_size = mmc->max_req_size; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | - MMC_CAP_WAIT_WHILE_BUSY; + MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; switch (mmc_slot(host).wires) { case 8: diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index dd1bdd168e66..c51b71174c1d 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c @@ -85,14 +85,14 @@ void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) #ifdef CONFIG_PM -static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state) +static int sdhci_of_suspend(struct platform_device *ofdev, pm_message_t state) { struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); return mmc_suspend_host(host->mmc); } -static int sdhci_of_resume(struct of_device *ofdev) +static int sdhci_of_resume(struct platform_device *ofdev) { struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); @@ -115,7 +115,7 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); } -static int __devinit sdhci_of_probe(struct of_device *ofdev, +static int __devinit sdhci_of_probe(struct platform_device *ofdev, const struct of_device_id *match) { struct device_node *np = ofdev->dev.of_node; @@ -183,7 +183,7 @@ err_addr_map: return ret; } -static int __devexit sdhci_of_remove(struct of_device *ofdev) +static int __devexit sdhci_of_remove(struct platform_device *ofdev) { struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); |