From 929e27252e8ca69363f81f26af5eaba62cb4c572 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 30 Mar 2011 12:22:23 +0200 Subject: ide: ide_requeue_and_plug() reinstate "always plug" behaviour We see stalls if we don't always ensure that the queue gets run again. Even if rq == NULL, we could have other pending requests in the queue. Signed-off-by: Jens Axboe --- drivers/ide/ide-io.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 0e406d73b2c8..ca27d3090aeb 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -570,8 +570,7 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) spin_unlock_irqrestore(q->queue_lock, flags); /* Use 3ms as that was the old plug delay */ - if (rq) - blk_delay_queue(q, 3); + blk_delay_queue(q, 3); } static int drive_is_ready(ide_drive_t *drive) -- cgit v1.2.3 From a63a5cf84dac7a23a57c800eea5734701e7d3c04 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 1 Apr 2011 21:02:31 +0200 Subject: dm: improve block integrity support The current block integrity (DIF/DIX) support in DM is verifying that all devices' integrity profiles match during DM device resume (which is past the point of no return). To some degree that is unavoidable (stacked DM devices force this late checking). But for most DM devices (which aren't stacking on other DM devices) the ideal time to verify all integrity profiles match is during table load. Introduce the notion of an "initialized" integrity profile: a profile that was blk_integrity_register()'d with a non-NULL 'blk_integrity' template. Add blk_integrity_is_initialized() to allow checking if a profile was initialized. Update DM integrity support to: - check all devices with _initialized_ integrity profiles match during table load; uninitialized profiles (e.g. for underlying DM device(s) of a stacked DM device) are ignored. - disallow a table load that would result in an integrity profile that conflicts with a DM device's existing (in-use) integrity profile - avoid clearing an existing integrity profile - validate all integrity profiles match during resume; but if they don't all we can do is report the mismatch (during resume we're past the point of no return) Signed-off-by: Mike Snitzer Cc: Martin K. Petersen Signed-off-by: Jens Axboe --- block/blk-integrity.c | 12 +++++- drivers/md/dm-table.c | 114 ++++++++++++++++++++++++++++++++++--------------- include/linux/blkdev.h | 2 + 3 files changed, 93 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 54bcba6c02a7..129b9e209a3b 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -30,6 +30,8 @@ static struct kmem_cache *integrity_cachep; +static const char *bi_unsupported_name = "unsupported"; + /** * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements * @q: request queue @@ -358,6 +360,14 @@ static struct kobj_type integrity_ktype = { .release = blk_integrity_release, }; +bool blk_integrity_is_initialized(struct gendisk *disk) +{ + struct blk_integrity *bi = blk_get_integrity(disk); + + return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0); +} +EXPORT_SYMBOL(blk_integrity_is_initialized); + /** * blk_integrity_register - Register a gendisk as being integrity-capable * @disk: struct gendisk pointer to make integrity-aware @@ -407,7 +417,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) bi->get_tag_fn = template->get_tag_fn; bi->tag_size = template->tag_size; } else - bi->name = "unsupported"; + bi->name = bi_unsupported_name; return 0; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 416d4e258df6..cb8380c9767f 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -926,21 +926,81 @@ static int dm_table_build_index(struct dm_table *t) return r; } +/* + * Get a disk whose integrity profile reflects the table's profile. + * If %match_all is true, all devices' profiles must match. + * If %match_all is false, all devices must at least have an + * allocated integrity profile; but uninitialized is ok. + * Returns NULL if integrity support was inconsistent or unavailable. + */ +static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, + bool match_all) +{ + struct list_head *devices = dm_table_get_devices(t); + struct dm_dev_internal *dd = NULL; + struct gendisk *prev_disk = NULL, *template_disk = NULL; + + list_for_each_entry(dd, devices, list) { + template_disk = dd->dm_dev.bdev->bd_disk; + if (!blk_get_integrity(template_disk)) + goto no_integrity; + if (!match_all && !blk_integrity_is_initialized(template_disk)) + continue; /* skip uninitialized profiles */ + else if (prev_disk && + blk_integrity_compare(prev_disk, template_disk) < 0) + goto no_integrity; + prev_disk = template_disk; + } + + return template_disk; + +no_integrity: + if (prev_disk) + DMWARN("%s: integrity not set: %s and %s profile mismatch", + dm_device_name(t->md), + prev_disk->disk_name, + template_disk->disk_name); + return NULL; +} + /* * Register the mapped device for blk_integrity support if - * the underlying devices support it. + * the underlying devices have an integrity profile. But all devices + * may not have matching profiles (checking all devices isn't reliable + * during table load because this table may use other DM device(s) which + * must be resumed before they will have an initialized integity profile). + * Stacked DM devices force a 2 stage integrity profile validation: + * 1 - during load, validate all initialized integrity profiles match + * 2 - during resume, validate all integrity profiles match */ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) { - struct list_head *devices = dm_table_get_devices(t); - struct dm_dev_internal *dd; + struct gendisk *template_disk = NULL; - list_for_each_entry(dd, devices, list) - if (bdev_get_integrity(dd->dm_dev.bdev)) { - t->integrity_supported = 1; - return blk_integrity_register(dm_disk(md), NULL); - } + template_disk = dm_table_get_integrity_disk(t, false); + if (!template_disk) + return 0; + if (!blk_integrity_is_initialized(dm_disk(md))) { + t->integrity_supported = 1; + return blk_integrity_register(dm_disk(md), NULL); + } + + /* + * If DM device already has an initalized integrity + * profile the new profile should not conflict. + */ + if (blk_integrity_is_initialized(template_disk) && + blk_integrity_compare(dm_disk(md), template_disk) < 0) { + DMWARN("%s: conflict with existing integrity profile: " + "%s profile mismatch", + dm_device_name(t->md), + template_disk->disk_name); + return 1; + } + + /* Preserve existing initialized integrity profile */ + t->integrity_supported = 1; return 0; } @@ -1094,41 +1154,27 @@ combine_limits: /* * Set the integrity profile for this device if all devices used have - * matching profiles. + * matching profiles. We're quite deep in the resume path but still + * don't know if all devices (particularly DM devices this device + * may be stacked on) have matching profiles. Even if the profiles + * don't match we have no way to fail (to resume) at this point. */ static void dm_table_set_integrity(struct dm_table *t) { - struct list_head *devices = dm_table_get_devices(t); - struct dm_dev_internal *prev = NULL, *dd = NULL; + struct gendisk *template_disk = NULL; if (!blk_get_integrity(dm_disk(t->md))) return; - list_for_each_entry(dd, devices, list) { - if (prev && - blk_integrity_compare(prev->dm_dev.bdev->bd_disk, - dd->dm_dev.bdev->bd_disk) < 0) { - DMWARN("%s: integrity not set: %s and %s mismatch", - dm_device_name(t->md), - prev->dm_dev.bdev->bd_disk->disk_name, - dd->dm_dev.bdev->bd_disk->disk_name); - goto no_integrity; - } - prev = dd; + template_disk = dm_table_get_integrity_disk(t, true); + if (!template_disk && + blk_integrity_is_initialized(dm_disk(t->md))) { + DMWARN("%s: device no longer has a valid integrity profile", + dm_device_name(t->md)); + return; } - - if (!prev || !bdev_get_integrity(prev->dm_dev.bdev)) - goto no_integrity; - blk_integrity_register(dm_disk(t->md), - bdev_get_integrity(prev->dm_dev.bdev)); - - return; - -no_integrity: - blk_integrity_register(dm_disk(t->md), NULL); - - return; + blk_get_integrity(template_disk)); } void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 16a902f099ac..32176cc8e715 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1206,6 +1206,7 @@ struct blk_integrity { struct kobject kobj; }; +extern bool blk_integrity_is_initialized(struct gendisk *); extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); extern void blk_integrity_unregister(struct gendisk *); extern int blk_integrity_compare(struct gendisk *, struct gendisk *); @@ -1262,6 +1263,7 @@ queue_max_integrity_segments(struct request_queue *q) #define queue_max_integrity_segments(a) (0) #define blk_integrity_merge_rq(a, b, c) (0) #define blk_integrity_merge_bio(a, b, c) (0) +#define blk_integrity_is_initialized(a) (0) #endif /* CONFIG_BLK_DEV_INTEGRITY */ -- cgit v1.2.3 From 782b86e2656762382ae1c2686d8d5c91f7d5eacf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 5 Apr 2011 03:29:57 +0200 Subject: ide: always ensure that blk_delay_queue() is called if we have pending IO Just because we are not requeuing a request does not mean that some aren't pending. So always issue a blk_delay_queue() if either we are requeueing OR there's pending IO. This fixes a boot problem for some IDE boxes. Signed-off-by: Jens Axboe --- drivers/ide/ide-io.c | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ca27d3090aeb..177db6d5b2f5 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -430,6 +430,26 @@ static inline void ide_unlock_host(struct ide_host *host) } } +static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq) +{ + if (rq) + blk_requeue_request(q, rq); + if (rq || blk_peek_request(q)) { + /* Use 3ms as that was the old plug delay */ + blk_delay_queue(q, 3); + } +} + +void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) +{ + struct request_queue *q = drive->queue; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __ide_requeue_and_plug(q, rq); + spin_unlock_irqrestore(q->queue_lock, flags); +} + /* * Issue a new request to a device. */ @@ -550,27 +570,7 @@ plug_device: ide_unlock_host(host); plug_device_2: spin_lock_irq(q->queue_lock); - - if (rq) { - blk_requeue_request(q, rq); - blk_delay_queue(q, queue_run_ms); - } -} - -void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) -{ - struct request_queue *q = drive->queue; - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - - if (rq) - blk_requeue_request(q, rq); - - spin_unlock_irqrestore(q->queue_lock, flags); - - /* Use 3ms as that was the old plug delay */ - blk_delay_queue(q, 3); + __ide_requeue_and_plug(q, rq); } static int drive_is_ready(ide_drive_t *drive) -- cgit v1.2.3