aboutsummaryrefslogtreecommitdiff
path: root/block/blk-settings.c
diff options
context:
space:
mode:
authorMartin K. Petersen2009-11-10 11:50:21 +0100
committerJens Axboe2009-11-10 11:50:21 +0100
commit86b37281411cf1e9bc0a6b5406c45edb7bd9ea5d (patch)
tree729db57dd52054af1bc16b4afb131093dfc9d255 /block/blk-settings.c
parentcf7c25cf91f632a3528669fc0876e1fc8355ff9b (diff)
block: Expose discard granularity
While SSDs track block usage on a per-sector basis, RAID arrays often have allocation blocks that are bigger. Allow the discard granularity and alignment to be set and teach the topology stacking logic how to handle them. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c46
1 files changed, 36 insertions, 10 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 66d4aa8799b7..7f986cafacd5 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -96,7 +96,10 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->max_segment_size = MAX_SEGMENT_SIZE;
lim->max_sectors = BLK_DEF_MAX_SECTORS;
lim->max_hw_sectors = INT_MAX;
- lim->max_discard_sectors = SAFE_MAX_SECTORS;
+ lim->max_discard_sectors = 0;
+ lim->discard_granularity = 0;
+ lim->discard_alignment = 0;
+ lim->discard_misaligned = 0;
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
lim->alignment_offset = 0;
@@ -488,6 +491,16 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
}
EXPORT_SYMBOL(blk_queue_stack_limits);
+static unsigned int lcm(unsigned int a, unsigned int b)
+{
+ if (a && b)
+ return (a * b) / gcd(a, b);
+ else if (b)
+ return b;
+
+ return a;
+}
+
/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top)
@@ -502,6 +515,10 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset)
{
+ int ret;
+
+ ret = 0;
+
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
@@ -531,7 +548,13 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
if (offset &&
(offset & (b->physical_block_size - 1)) != b->alignment_offset) {
t->misaligned = 1;
- return -1;
+ ret = -1;
+ }
+
+ if (offset &&
+ (offset & (b->discard_granularity - 1)) != b->discard_alignment) {
+ t->discard_misaligned = 1;
+ ret = -1;
}
/* If top has no alignment offset, inherit from bottom */
@@ -539,23 +562,26 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->alignment_offset =
b->alignment_offset & (b->physical_block_size - 1);
+ if (!t->discard_alignment)
+ t->discard_alignment =
+ b->discard_alignment & (b->discard_granularity - 1);
+
/* Top device aligned on logical block boundary? */
if (t->alignment_offset & (t->logical_block_size - 1)) {
t->misaligned = 1;
- return -1;
+ ret = -1;
}
- /* Find lcm() of optimal I/O size */
- if (t->io_opt && b->io_opt)
- t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
- else if (b->io_opt)
- t->io_opt = b->io_opt;
+ /* Find lcm() of optimal I/O size and granularity */
+ t->io_opt = lcm(t->io_opt, b->io_opt);
+ t->discard_granularity = lcm(t->discard_granularity,
+ b->discard_granularity);
/* Verify that optimal I/O size is a multiple of io_min */
if (t->io_min && t->io_opt % t->io_min)
- return -1;
+ ret = -1;
- return 0;
+ return ret;
}
EXPORT_SYMBOL(blk_stack_limits);