aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin K. Petersen2022-03-02 00:35:52 -0500
committerMartin K. Petersen2022-05-02 16:59:11 -0400
commit7fb019c46eeea4e3cc3ddfd3e01a24e610f34fac (patch)
tree3afda5ac90e05694f52eba36cd14270a50691fef
parente38d9e83a376923454af599b2add53e71cd7508a (diff)
scsi: sd: Switch to using scsi_device VPD pages
Use the VPD pages already provided by the SCSI midlayer. No need to request them individually in the SCSI disk driver. Link: https://lore.kernel.org/r/20220302053559.32147-8-martin.petersen@oracle.com Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/sd.c80
1 files changed, 40 insertions, 40 deletions
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 027738af5a22..1e580e9e4a37 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2844,39 +2844,39 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
unsigned int sector_sz = sdkp->device->sector_size;
- const int vpd_len = 64;
- unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
+ struct scsi_vpd *vpd;
- if (!buffer ||
- /* Block Limits VPD */
- scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
+ rcu_read_lock();
+
+ vpd = rcu_dereference(sdkp->device->vpd_pgb0);
+ if (!vpd || vpd->len < 16)
goto out;
blk_queue_io_min(sdkp->disk->queue,
- get_unaligned_be16(&buffer[6]) * sector_sz);
+ get_unaligned_be16(&vpd->data[6]) * sector_sz);
- sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
- sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
+ sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
+ sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
- if (buffer[3] == 0x3c) {
+ if (vpd->len >= 64) {
unsigned int lba_count, desc_count;
- sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
+ sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
if (!sdkp->lbpme)
goto out;
- lba_count = get_unaligned_be32(&buffer[20]);
- desc_count = get_unaligned_be32(&buffer[24]);
+ lba_count = get_unaligned_be32(&vpd->data[20]);
+ desc_count = get_unaligned_be32(&vpd->data[24]);
if (lba_count && desc_count)
sdkp->max_unmap_blocks = lba_count;
- sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
+ sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
- if (buffer[32] & 0x80)
+ if (vpd->data[32] & 0x80)
sdkp->unmap_alignment =
- get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+ get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
@@ -2898,7 +2898,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
}
out:
- kfree(buffer);
+ rcu_read_unlock();
}
/**
@@ -2908,18 +2908,21 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
struct request_queue *q = sdkp->disk->queue;
- unsigned char *buffer;
+ struct scsi_vpd *vpd;
u16 rot;
- const int vpd_len = 64;
+ u8 zoned;
- buffer = kmalloc(vpd_len, GFP_KERNEL);
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb1);
- if (!buffer ||
- /* Block Device Characteristics VPD */
- scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
- goto out;
+ if (!vpd || vpd->len < 8) {
+ rcu_read_unlock();
+ return;
+ }
- rot = get_unaligned_be16(&buffer[4]);
+ rot = get_unaligned_be16(&vpd->data[4]);
+ zoned = (vpd->data[8] >> 4) & 3;
+ rcu_read_unlock();
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
@@ -2930,7 +2933,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
/* Host-managed */
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
} else {
- sdkp->zoned = (buffer[8] >> 4) & 3;
+ sdkp->zoned = zoned;
if (sdkp->zoned == 1) {
/* Host-aware */
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
@@ -2941,7 +2944,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
}
if (!sdkp->first_scan)
- goto out;
+ return;
if (blk_queue_is_zoned(q)) {
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
@@ -2954,9 +2957,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
sd_printk(KERN_NOTICE, sdkp,
"Drive-managed SMR disk\n");
}
-
- out:
- kfree(buffer);
}
/**
@@ -2965,24 +2965,24 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
*/
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
- unsigned char *buffer;
- const int vpd_len = 8;
+ struct scsi_vpd *vpd;
if (sdkp->lbpme == 0)
return;
- buffer = kmalloc(vpd_len, GFP_KERNEL);
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb2);
- if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
- goto out;
+ if (!vpd || vpd->len < 8) {
+ rcu_read_unlock();
+ return;
+ }
sdkp->lbpvpd = 1;
- sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
- sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
- sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
-
- out:
- kfree(buffer);
+ sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
+ sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
+ sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
+ rcu_read_unlock();
}
static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)