diff options
author | Martin K. Petersen | 2010-02-26 00:20:39 -0500 |
---|---|---|
committer | Jens Axboe | 2010-02-26 13:58:08 +0100 |
commit | 8a78362c4eefc1deddbefe2c7f38aabbc2429d6b (patch) | |
tree | c095d95af1aec0f9cee5975b1dcdc6bc1d17d401 /drivers | |
parent | 086fa5ff0854c676ec333760f4c0154b3b242616 (diff) |
block: Consolidate phys_segment and hw_segment limits
Except for SCSI no device drivers distinguish between physical and
hardware segment limits. Consolidate the two into a single segment
limit.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers')
28 files changed, 33 insertions, 62 deletions
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 0c82d335c55d..684fe04dbbb7 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -772,7 +772,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev) } blk_queue_segment_boundary(sdev->request_queue, segment_boundary); - blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); + blk_queue_max_segments(sdev->request_queue, sg_tablesize); ata_port_printk(ap, KERN_INFO, "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", (unsigned long long)*ap->host->dev->dma_mask, diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 1c0cd35e1913..459f1bc25a7b 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2534,7 +2534,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); RequestQueue->queuedata = Controller; blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); - blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit); + blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); disk->queue = RequestQueue; sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 030e52d72254..a29e69418a03 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1797,10 +1797,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); /* This is a hardware imposed limit. */ - blk_queue_max_hw_segments(disk->queue, h->maxsgentries); - - /* This is a limit in the driver and could be eliminated. */ - blk_queue_max_phys_segments(disk->queue, h->maxsgentries); + blk_queue_max_segments(disk->queue, h->maxsgentries); blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 6422651ec364..91d11631cec9 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -448,11 +448,8 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); /* This is a hardware imposed limit. */ - blk_queue_max_hw_segments(q, SG_MAX); + blk_queue_max_segments(q, SG_MAX); - /* This is a driver limit and could be eliminated. */ - blk_queue_max_phys_segments(q, SG_MAX); - init_timer(&hba[i]->timer); hba[i]->timer.expires = jiffies + IDA_TIMER; hba[i]->timer.data = (unsigned long)hba[i]; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 9b55e64196fc..4df3b40b1057 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -710,8 +710,7 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); blk_queue_max_hw_sectors(q, max_seg_s >> 9); - blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); - blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS); + blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_max_segment_size(q, max_seg_s); blk_queue_logical_block_size(q, 512); blk_queue_segment_boundary(q, PAGE_SIZE-1); diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ea54ea393553..ddb4f9abd480 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -956,8 +956,7 @@ static int __init pf_init(void) return -ENOMEM; } - blk_queue_max_phys_segments(pf_queue, cluster); - blk_queue_max_hw_segments(pf_queue, cluster); + blk_queue_max_segments(pf_queue, cluster); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { struct gendisk *disk = pf->disk; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 6e1daa24da2f..b72935b8f203 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -950,14 +950,14 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) { if ((pd->settings.size << 9) / CD_FRAMESIZE - <= queue_max_phys_segments(q)) { + <= queue_max_segments(q)) { /* * The cdrom device can handle one segment/frame */ clear_bit(PACKET_MERGE_SEGS, &pd->flags); return 0; } else if ((pd->settings.size << 9) / PAGE_SIZE - <= queue_max_phys_segments(q)) { + <= queue_max_segments(q)) { /* * We can handle this case at the expense of some extra memory * copies during write operations diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 9cd1a4a542b8..bc95469d33c1 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -482,8 +482,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, ps3disk_prepare_flush); - blk_queue_max_phys_segments(queue, -1); - blk_queue_max_hw_segments(queue, -1); + blk_queue_max_segments(queue, -1); blk_queue_max_segment_size(queue, dev->bounce_size); gendisk = alloc_disk(PS3DISK_MINORS); diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 6416b262934b..83ebb390b164 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -751,8 +751,7 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) priv->queue = queue; queue->queuedata = dev; blk_queue_make_request(queue, ps3vram_make_request); - blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS); - blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS); + blk_queue_max_segments(queue, BLK_MAX_HW_SEGMENTS); blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index dd30cddd0f7f..48e8fee9f2d4 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -691,8 +691,7 @@ static int probe_disk(struct vdc_port *port) port->disk = g; - blk_queue_max_hw_segments(q, port->ring_cookies); - blk_queue_max_phys_segments(q, port->ring_cookies); + blk_queue_max_segments(q, port->ring_cookies); blk_queue_max_hw_sectors(q, port->max_xfer_size); g->major = vdc_major; g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 7bd7b2f83116..b70f0fca9a42 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -1518,8 +1518,7 @@ static int carm_init_disks(struct carm_host *host) break; } disk->queue = q; - blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); - blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG); + blk_queue_max_segments(q, CARM_MAX_REQ_SG); blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); q->queuedata = port; diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 352ea24d66e8..2e889838e819 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -2320,8 +2320,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->queue = q; blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); - blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); + blk_queue_max_segments(q, UB_MAX_REQ_SG); blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ blk_queue_max_hw_sectors(q, UB_MAX_SECTORS); blk_queue_logical_block_size(q, lun->capacity.bsize); diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index d44ece7d7b7c..c12b31362ac6 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -471,8 +471,7 @@ retry: } d->disk = g; - blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA); - blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA); + blk_queue_max_segments(q, VIOMAXBLOCKDMA); blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS); g->major = VIODASD_MAJOR; g->first_minor = dev_no << PARTITION_SHIFT; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index f9861aaa1fef..9c09694b2520 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -353,8 +353,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ - blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); - blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); + blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index e789e6c9a422..03c71f7698cb 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -741,7 +741,7 @@ static int __devinit probe_gdrom_setupqueue(void) { blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); /* using DMA so memory will need to be contiguous */ - blk_queue_max_hw_segments(gd.gdrom_rq, 1); + blk_queue_max_segments(gd.gdrom_rq, 1); /* set a large max size to get most from DMA */ blk_queue_max_segment_size(gd.gdrom_rq, 0x40000); gd.disk->queue = gd.gdrom_rq; diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index b1dfd23eb832..cc435be0bc13 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -616,8 +616,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) gendisk->first_minor = deviceno; strncpy(gendisk->disk_name, c->name, sizeof(gendisk->disk_name)); - blk_queue_max_hw_segments(q, 1); - blk_queue_max_phys_segments(q, 1); + blk_queue_max_segments(q, 1); blk_queue_max_hw_sectors(q, 4096 / 512); gendisk->queue = q; gendisk->fops = &viocd_fops; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 1ec8b31277bd..f8c1ae6ad74c 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -790,8 +790,7 @@ static int ide_init_queue(ide_drive_t *drive) max_sg_entries >>= 1; #endif /* CONFIG_PCI */ - blk_queue_max_hw_segments(q, max_sg_entries); - blk_queue_max_phys_segments(q, max_sg_entries); + blk_queue_max_segments(q, max_sg_entries); /* assign drive queue */ drive->queue = q; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ceb24afdc147..509c8f3dd9a5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3739,7 +3739,7 @@ static int bio_fits_rdev(struct bio *bi) if ((bi->bi_size>>9) > queue_max_sectors(q)) return 0; blk_recount_segments(q, bi); - if (bi->bi_phys_segments > queue_max_phys_segments(q)) + if (bi->bi_phys_segments > queue_max_segments(q)) return 0; if (q->merge_bvec_fn) diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 44d4178c4c15..972b87069d55 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -1227,8 +1227,7 @@ static int mspro_block_init_disk(struct memstick_dev *card) blk_queue_bounce_limit(msb->queue, limit); blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); - blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); - blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); + blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); blk_queue_max_segment_size(msb->queue, MSPRO_BLOCK_MAX_PAGES * msb->page_size); diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index d033cfdb516f..2658b1484a2c 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -1065,9 +1065,8 @@ static int i2o_block_probe(struct device *dev) queue = gd->queue; queue->queuedata = i2o_blk_dev; - blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); blk_queue_max_hw_sectors(queue, max_sectors); - blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); + blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); osm_debug("max sectors = %d\n", queue->max_sectors); osm_debug("phys segments = %d\n", queue->max_phys_segments); diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 09b633d5657b..381fe032caa1 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -155,8 +155,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); - blk_queue_max_phys_segments(mq->queue, bouncesz / 512); - blk_queue_max_hw_segments(mq->queue, bouncesz / 512); + blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mq->sg = kmalloc(sizeof(struct scatterlist), @@ -182,8 +181,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); - blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); - blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); + blk_queue_max_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->sg = kmalloc(sizeof(struct scatterlist) * diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 14b1e25b9dcf..8831e9308d05 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2130,8 +2130,7 @@ static void dasd_setup_queue(struct dasd_block *block) blk_queue_logical_block_size(block->request_queue, block->bp_block); max = block->base->discipline->max_blocks << block->s2b_shift; blk_queue_max_hw_sectors(block->request_queue, max); - blk_queue_max_phys_segments(block->request_queue, -1L); - blk_queue_max_hw_segments(block->request_queue, -1L); + blk_queue_max_segments(block->request_queue, -1L); /* with page sized segments we can translate each segement into * one idaw/tidaw */ diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 509ed056fddd..097da8ce6be6 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -223,8 +223,7 @@ tapeblock_setup_device(struct tape_device * device) blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); - blk_queue_max_phys_segments(blkdat->request_queue, -1L); - blk_queue_max_hw_segments(blkdat->request_queue, -1L); + blk_queue_max_segments(blkdat->request_queue, -1L); blk_queue_max_segment_size(blkdat->request_queue, -1L); blk_queue_segment_boundary(blkdat->request_queue, -1L); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 87b536a97cb4..732f6d35b4a8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4195,7 +4195,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) if (tgt->service_parms.class3_parms[0] & 0x80000000) rport->supported_classes |= FC_COS_CLASS3; if (rport->rqst_q) - blk_queue_max_hw_segments(rport->rqst_q, 1); + blk_queue_max_segments(rport->rqst_q, 1); } else tgt_dbg(tgt, "rport add failed\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); @@ -4669,7 +4669,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) } if (shost_to_fc_host(shost)->rqst_q) - blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1); + blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); dev_set_drvdata(dev, vhost); spin_lock(&ibmvfc_driver_lock); list_add_tail(&vhost->queue, &ibmvfc_head); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ac3cca74bdfb..f8fbf47377ae 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1624,8 +1624,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, /* * this limit is imposed by hardware restrictions */ - blk_queue_max_hw_segments(q, shost->sg_tablesize); - blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); + blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, + SCSI_MAX_SG_CHAIN_SEGMENTS)); blk_queue_max_hw_sectors(q, shost->max_sectors); blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 040f751809ea..c996d98636f3 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -287,8 +287,7 @@ sg_open(struct inode *inode, struct file *filp) if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ sdp->sgdebug = 0; q = sdp->device->request_queue; - sdp->sg_tablesize = min(queue_max_hw_segments(q), - queue_max_phys_segments(q)); + sdp->sg_tablesize = queue_max_segments(q); } if ((sfp = sg_add_sfp(sdp, dev))) filp->private_data = sfp; @@ -1376,8 +1375,7 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) sdp->device = scsidp; INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->o_excl_wait); - sdp->sg_tablesize = min(queue_max_hw_segments(q), - queue_max_phys_segments(q)); + sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index d04ea9a6f673..f67d1a159aad 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3983,8 +3983,7 @@ static int st_probe(struct device *dev) return -ENODEV; } - i = min(queue_max_hw_segments(SDp->request_queue), - queue_max_phys_segments(SDp->request_queue)); + i = queue_max_segments(SDp->request_queue); if (st_max_sg_segs < i) i = st_max_sg_segs; buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 62b282844a53..45d908114d11 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c @@ -363,10 +363,7 @@ static int blkvsc_probe(struct device *device) blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); - blk_queue_max_phys_segments(blkdev->gd->queue, - MAX_MULTIPAGE_BUFFER_COUNT); - blk_queue_max_hw_segments(blkdev->gd->queue, - MAX_MULTIPAGE_BUFFER_COUNT); + blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); blk_queue_dma_alignment(blkdev->gd->queue, 511); |