aboutsummaryrefslogtreecommitdiff
path: root/drivers/mmc/mmc_block.c
diff options
context:
space:
mode:
authorPierre Ossman2006-10-04 02:15:41 -0700
committerLinus Torvalds2006-10-04 07:55:15 -0700
commit176f00ffed3ef94a198326fbf6a5db64f1cf73ad (patch)
treeb436c7dad050c7c86333953c3371a55ac472e795 /drivers/mmc/mmc_block.c
parent7104e2d5a85b4b786d6a63568beffe1e185547bb (diff)
[PATCH] mmc: properly use the new multi block-write error handling
Use the new multi block-write error reporting flag and properly tell the block layer how much data was transferred before the error. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/mmc/mmc_block.c')
-rw-r--r--drivers/mmc/mmc_block.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index db0e8ad439a5..c1293f1bda87 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -158,13 +158,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request brq;
int ret;
if (mmc_card_claim_host(card))
goto cmd_err;
do {
- struct mmc_blk_request brq;
struct mmc_command cmd;
u32 readcmd, writecmd;
@@ -278,17 +278,27 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
cmd_err:
mmc_card_release_host(card);
+ ret = 1;
+
/*
- * This is a little draconian, but until we get proper
- * error handling sorted out here, its the best we can
- * do - especially as some hosts have no idea how much
- * data was transferred before the error occurred.
+ * For writes and where the host claims to support proper
+ * error reporting, we first ok the successful blocks.
+ *
+ * For reads we just fail the entire chunk as that should
+ * be safe in all cases.
*/
+ if (rq_data_dir(req) != READ &&
+ (card->host->caps & MMC_CAP_MULTIWRITE)) {
+ spin_lock_irq(&md->lock);
+ ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
+
spin_lock_irq(&md->lock);
- do {
+ while (ret) {
ret = end_that_request_chunk(req, 0,
req->current_nr_sectors << 9);
- } while (ret);
+ }
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);