diff options
author | Linus Torvalds | 2021-06-30 12:21:16 -0700 |
---|---|---|
committer | Linus Torvalds | 2021-06-30 12:21:16 -0700 |
commit | 440462198d9c45e48f2d8d9b18c5702d92282f46 (patch) | |
tree | 9aab5db02f35d0cf9034108116b6a483147791ad | |
parent | df668a5fe461bb9d7e899c538acc7197746038f4 (diff) | |
parent | 5ed9b357024dc43f75099f597187df05bcd5173c (diff) |
Merge tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"Pretty calm round, mostly just NVMe and a bit of MD:
- NVMe updates (via Christoph)
- improve the APST configuration algorithm (Alexey Bogoslavsky)
- look for StorageD3Enable on companion ACPI device
(Mario Limonciello)
- allow selecting the network interface for TCP connections
(Martin Belanger)
- misc cleanups (Amit Engel, Chaitanya Kulkarni, Colin Ian King,
Christoph)
- move the ACPI StorageD3 code to drivers/acpi/ and add quirks
for certain AMD CPUs (Mario Limonciello)
- zoned device support for nvmet (Chaitanya Kulkarni)
- fix the rules for changing the serial number in nvmet
(Noam Gottlieb)
- various small fixes and cleanups (Dan Carpenter, JK Kim,
Chaitanya Kulkarni, Hannes Reinecke, Wesley Sheng, Geert
Uytterhoeven, Daniel Wagner)
- MD updates (Via Song)
- iostats rewrite (Guoqing Jiang)
- raid5 lock contention optimization (Gal Ofri)
- Fall through warning fix (Gustavo)
- Misc fixes (Gustavo, Jiapeng)"
* tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block: (78 commits)
nvmet: use NVMET_MAX_NAMESPACES to set nn value
loop: Fix missing discard support when using LOOP_CONFIGURE
nvme.h: add missing nvme_lba_range_type endianness annotations
nvme: remove zeroout memset call for struct
nvme-pci: remove zeroout memset call for struct
nvmet: remove zeroout memset call for struct
nvmet: add ZBD over ZNS backend support
nvmet: add Command Set Identifier support
nvmet: add nvmet_req_bio put helper for backends
nvmet: add req cns error complete helper
block: export blk_next_bio()
nvmet: remove local variable
nvmet: use nvme status value directly
nvmet: use u32 type for the local variable nsid
nvmet: use u32 for nvmet_subsys max_nsid
nvmet: use req->cmd directly in file-ns fast path
nvmet: use req->cmd directly in bdev-ns fast path
nvmet: make ver stable once connection established
nvmet: allow mn change if subsys not discovered
nvmet: make sn stable once connection was established
...
54 files changed, 1467 insertions, 519 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index 7b256131b20b..9f09beadcbe3 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -21,6 +21,7 @@ struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) return new; } +EXPORT_SYMBOL_GPL(blk_next_bio); int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, int flags, diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 675a69de516f..0028b6b51c87 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1368,4 +1368,36 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) return 1; } EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); + +/** + * acpi_storage_d3 - Check if D3 should be used in the suspend path + * @dev: Device to check + * + * Return %true if the platform firmware wants @dev to be programmed + * into D3hot or D3cold (if supported) in the suspend path, or %false + * when there is no specific preference. On some platforms, if this + * hint is ignored, @dev may remain unresponsive after suspending the + * platform as a whole. + * + * Although the property has storage in the name it actually is + * applied to the PCIe slot and plugging in a non-storage device the + * same platform restrictions will likely apply. + */ +bool acpi_storage_d3(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + u8 val; + + if (force_storage_d3()) + return true; + + if (!adev) + return false; + if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", + &val)) + return false; + return val == 1; +} +EXPORT_SYMBOL_GPL(acpi_storage_d3); + #endif /* CONFIG_PM */ diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index b1d2cc342014..d91b560e8867 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -234,6 +234,15 @@ static inline int suspend_nvs_save(void) { return 0; } static inline void suspend_nvs_restore(void) {} #endif +#ifdef CONFIG_X86 +bool force_storage_d3(void); +#else +static inline bool force_storage_d3(void) +{ + return false; +} +#endif + /*-------------------------------------------------------------------------- Device properties -------------------------------------------------------------------------- */ diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index bdc1ba00aee9..f22f23933063 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev) return ret; } + +/* + * AMD systems from Renoir and Lucienne *require* that the NVME controller + * is put into D3 over a Modern Standby / suspend-to-idle cycle. + * + * This is "typically" accomplished using the `StorageD3Enable` + * property in the _DSD that is checked via the `acpi_storage_d3` function + * but this property was introduced after many of these systems launched + * and most OEM systems don't have it in their BIOS. + * + * The Microsoft documentation for StorageD3Enable mentioned that Windows has + * a hardcoded allowlist for D3 support, which was used for these platforms. + * + * This allows quirking on Linux in a similar fashion. + */ +static const struct x86_cpu_id storage_d3_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */ + X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */ + {} +}; + +bool force_storage_d3(void) +{ + return x86_match_cpu(storage_d3_cpu_ids); +} diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index ab41be625a53..8eea2529da20 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c @@ -140,10 +140,8 @@ bail: spin_unlock_irqrestore(&emsgs_lock, flags); } mp = kmemdup(msg, n, GFP_ATOMIC); - if (mp == NULL) { - printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n); + if (!mp) goto bail; - } em->msg = mp; em->flags |= EMFL_VALID; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 69284ebba786..1f740e42e457 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3770,10 +3770,8 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in } new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); - if (!new_net_conf) { - drbd_err(connection, "Allocation of new net_conf failed\n"); + if (!new_net_conf) goto disconnect; - } mutex_lock(&connection->data.mutex); mutex_lock(&connection->resource->conf_update); @@ -4020,10 +4018,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i if (verify_tfm || csums_tfm) { new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); - if (!new_net_conf) { - drbd_err(device, "Allocation of new net_conf failed\n"); + if (!new_net_conf) goto disconnect; - } *new_net_conf = *old_net_conf; @@ -4161,7 +4157,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); if (!new_disk_conf) { - drbd_err(device, "Allocation of new disk_conf failed\n"); put_ldev(device); return -ENOMEM; } @@ -4288,10 +4283,8 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info device = peer_device->device; p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO); - if (!p_uuid) { - drbd_err(device, "kmalloc of p_uuid failed\n"); + if (!p_uuid) return false; - } for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) p_uuid[i] = be64_to_cpu(p->uuid[i]); @@ -5484,8 +5477,7 @@ static int drbd_do_auth(struct drbd_connection *connection) } peers_ch = kmalloc(pi.size, GFP_NOIO); - if (peers_ch == NULL) { - drbd_err(connection, "kmalloc of peers_ch failed\n"); + if (!peers_ch) { rv = -1; goto fail; } @@ -5504,8 +5496,7 @@ static int drbd_do_auth(struct drbd_connection *connection) resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm); response = kmalloc(resp_size, GFP_NOIO); - if (response == NULL) { - drbd_err(connection, "kmalloc of response failed\n"); + if (!response) { rv = -1; goto fail; } @@ -5552,8 +5543,7 @@ static int drbd_do_auth(struct drbd_connection *connection) } right_response = kmalloc(resp_size, GFP_NOIO); - if (right_response == NULL) { - drbd_err(connection, "kmalloc of right_response failed\n"); + if (!right_response) { rv = -1; goto fail; } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index cbed9776f285..87460e0e5c72 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2123,6 +2123,7 @@ static void format_interrupt(void) switch (interpret_errors()) { case 1: cont->error(); + break; case 2: break; case 0: @@ -2330,7 +2331,6 @@ static void rw_interrupt(void) if (!drive_state[current_drive].first_read_date) drive_state[current_drive].first_read_date = jiffies; - nr_sectors = 0; ssize = DIV_ROUND_UP(1 << raw_cmd->cmd[SIZECODE], 4); if (reply_buffer[ST1] & ST1_EOC) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 36e4312f6381..cc0e8c39a48b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1234,6 +1234,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, blk_queue_physical_block_size(lo->lo_queue, bsize); blk_queue_io_min(lo->lo_queue, bsize); + loop_config_discard(lo); loop_update_rotational(lo); loop_update_dio(lo); loop_sysfs_init(lo); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 589cb0f1e030..ff3e7b3f5ad8 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2238,7 +2238,6 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf) static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, size_t len, loff_t *offset) { - struct driver_data *dd = (struct driver_data *)f->private_data; int size = *offset; char *buf; int rv = 0; @@ -2247,11 +2246,8 @@ static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, return 0; buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); - if (!buf) { - dev_err(&dd->pdev->dev, - "Memory allocation: status buffer\n"); + if (!buf) return -ENOMEM; - } size += show_device_status(NULL, buf); @@ -2277,11 +2273,8 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, return 0; buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); - if (!buf) { - dev_err(&dd->pdev->dev, - "Memory allocation: register buffer\n"); + if (!buf) return -ENOMEM; - } size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); @@ -2343,11 +2336,8 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, return 0; buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); - if (!buf) { - dev_err(&dd->pdev->dev, - "Memory allocation: flag buffer\n"); + if (!buf) return -ENOMEM; - } size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", dd->port->flags); @@ -2884,11 +2874,8 @@ static int mtip_hw_init(struct driver_data *dd) dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL, dd->numa_node); - if (!dd->port) { - dev_err(&dd->pdev->dev, - "Memory allocation: port structure\n"); + if (!dd->port) return -ENOMEM; - } /* Continue workqueue setup */ for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) @@ -4002,11 +3989,8 @@ static int mtip_pci_probe(struct pci_dev *pdev, cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id()); dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); - if (dd == NULL) { - dev_err(&pdev->dev, - "Unable to allocate memory for driver data\n"); + if (!dd) return -ENOMEM; - } /* Attach the private data to this PCI device. */ pci_set_drvdata(pdev, dd); diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 0574f4495755..ed182f3dd054 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -74,9 +74,6 @@ struct dma_tracker { struct rsxx_dma *dma; }; -#define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \ - (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS)) - struct dma_tracker_list { spinlock_t lock; int head; @@ -808,7 +805,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, memset(&ctrl->stats, 0, sizeof(ctrl->stats)); - ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); + ctrl->trackers = vmalloc(struct_size(ctrl->trackers, list, + RSXX_MAX_OUTSTANDING_CMDS)); if (!ctrl->trackers) return -ENOMEM; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index c53b38578bb7..c4631e684386 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -981,9 +981,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) } port = kzalloc(sizeof(*port), GFP_KERNEL); - err = -ENOMEM; if (!port) { - printk(KERN_ERR PFX "Cannot allocate vdc_port.\n"); + err = -ENOMEM; goto err_out_release_mdesc; } diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index f01f860b0e62..7b54353ee92b 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -1420,8 +1420,6 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) host = kzalloc(sizeof(*host), GFP_KERNEL); if (!host) { - printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n", - pci_name(pdev)); rc = -ENOMEM; goto err_out_regions; } diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index a8968d9e759b..4eef218108c6 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -236,11 +236,8 @@ static int z2_open(struct block_device *bdev, fmode_t mode) case Z2MINOR_Z2ONLY: z2ram_map = kmalloc(max_z2_map, GFP_KERNEL); - if (z2ram_map == NULL) { - printk(KERN_ERR DEVICE_NAME - ": cannot get mem for z2ram_map\n"); + if (!z2ram_map) goto err_out; - } get_z2ram(); @@ -253,11 +250,8 @@ static int z2_open(struct block_device *bdev, fmode_t mode) case Z2MINOR_CHIPONLY: z2ram_map = kmalloc(max_chip_map, GFP_KERNEL); - if (z2ram_map == NULL) { - printk(KERN_ERR DEVICE_NAME - ": cannot get mem for z2ram_map\n"); + if (!z2ram_map) goto err_out; - } get_chipram(); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index f2014385d48b..0602e82a9516 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -47,7 +47,7 @@ config MD_AUTODETECT If unsure, say Y. config MD_LINEAR - tristate "Linear (append) mode" + tristate "Linear (append) mode (deprecated)" depends on BLK_DEV_MD help If you say Y here, then your multiple devices driver will be able to @@ -158,7 +158,7 @@ config MD_RAID456 If unsure, say Y. config MD_MULTIPATH - tristate "Multipath I/O support" + tristate "Multipath I/O support (deprecated)" depends on BLK_DEV_MD help MD_MULTIPATH provides a simple multi-path personality for use @@ -169,7 +169,7 @@ config MD_MULTIPATH If unsure, say N. config MD_FAULTY - tristate "Faulty test module for MD" + tristate "Faulty test module for MD (deprecated)" depends on BLK_DEV_MD help The "faulty" module allows for a block device that occasionally returns diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index ea3130e11680..e29c6298ef5c 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2616,7 +2616,7 @@ static struct attribute *md_bitmap_attrs[] = { &max_backlog_used.attr, NULL }; -struct attribute_group md_bitmap_group = { +const struct attribute_group md_bitmap_group = { .name = "bitmap", .attrs = md_bitmap_attrs, }; diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c index fda4cb3f936f..c0dc6f2ef4a3 100644 --- a/drivers/md/md-faulty.c +++ b/drivers/md/md-faulty.c @@ -357,7 +357,7 @@ static void raid_exit(void) module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Fault injection personality for MD"); +MODULE_DESCRIPTION("Fault injection personality for MD (deprecated)"); MODULE_ALIAS("md-personality-10"); /* faulty */ MODULE_ALIAS("md-faulty"); MODULE_ALIAS("md-level--5"); diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 63ed8329a98d..1ff51647a682 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -312,7 +312,7 @@ static void linear_exit (void) module_init(linear_init); module_exit(linear_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Linear device concatenation personality for MD"); +MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)"); MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/ MODULE_ALIAS("md-linear"); MODULE_ALIAS("md-level--1"); diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 776bbe542db5..e7d6486f090f 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -471,7 +471,7 @@ static void __exit multipath_exit (void) module_init(multipath_init); module_exit(multipath_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("simple multi-path personality for MD"); +MODULE_DESCRIPTION("simple multi-path personality for MD (deprecated)"); MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ MODULE_ALIAS("md-multipath"); MODULE_ALIAS("md-level--4"); diff --git a/drivers/md/md.c b/drivers/md/md.c index d806be8cc210..ae8fe54ea358 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -441,30 +441,6 @@ check_suspended: } EXPORT_SYMBOL(md_handle_request); -struct md_io { - struct mddev *mddev; - bio_end_io_t *orig_bi_end_io; - void *orig_bi_private; - struct block_device *orig_bi_bdev; - unsigned long start_time; -}; - -static void md_end_io(struct bio *bio) -{ - struct md_io *md_io = bio->bi_private; - struct mddev *mddev = md_io->mddev; - - bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev); - - bio->bi_end_io = md_io->orig_bi_end_io; - bio->bi_private = md_io->orig_bi_private; - - mempool_free(md_io, &mddev->md_io_pool); - - if (bio->bi_end_io) - bio->bi_end_io(bio); -} - static blk_qc_t md_submit_bio(struct bio *bio) { const int rw = bio_data_dir(bio); @@ -489,21 +465,6 @@ static blk_qc_t md_submit_bio(struct bio *bio) return BLK_QC_T_NONE; } - if (bio->bi_end_io != md_end_io) { - struct md_io *md_io; - - md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO); - md_io->mddev = mddev; - md_io->orig_bi_end_io = bio->bi_end_io; - md_io->orig_bi_private = bio->bi_private; - md_io->orig_bi_bdev = bio->bi_bdev; - - bio->bi_end_io = md_end_io; - bio->bi_private = md_io; - - md_io->start_time = bio_start_io_acct(bio); - } - /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; @@ -824,7 +785,7 @@ out_free_new: return ERR_PTR(error); } -static struct attribute_group md_redundancy_group; +static const struct attribute_group md_redundancy_group; void mddev_unlock(struct mddev *mddev) { @@ -841,7 +802,7 @@ void mddev_unlock(struct mddev *mddev) * test it under the same mutex to ensure its correct value * is seen. */ - struct attribute_group *to_remove = mddev->to_remove; + const struct attribute_group *to_remove = mddev->to_remove; mddev->to_remove = NULL; mddev->sysfs_active = 1; mutex_unlock(&mddev->reconfig_mutex); @@ -2379,7 +2340,15 @@ int md_integrity_register(struct mddev *mddev) bdev_get_integrity(reference->bdev)); pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); - if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) { + if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || + (mddev->level != 1 && mddev->level != 10 && + bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) { + /* + * No need to handle the failure of bioset_integrity_create, + * because the function is called by md_run() -> pers->run(), + * md_run calls bioset_exit -> bioset_integrity_free in case + * of failure case. + */ pr_err("md: failed to create integrity pool for %s\n", mdname(mddev)); return -EINVAL; @@ -5538,7 +5507,7 @@ static struct attribute *md_redundancy_attrs[] = { &md_degraded.attr, NULL, }; -static struct attribute_group md_redundancy_group = { +static const struct attribute_group md_redundancy_group = { .name = NULL, .attrs = md_redundancy_attrs, }; @@ -5606,7 +5575,8 @@ static void md_free(struct kobject *ko) bioset_exit(&mddev->bio_set); bioset_exit(&mddev->sync_set); - mempool_exit(&mddev->md_io_pool); + if (mddev->level != 1 && mddev->level != 10) + bioset_exit(&mddev->io_acct_set); kfree(mddev); } @@ -5703,11 +5673,6 @@ static int md_alloc(dev_t dev, char *name) */ mddev->hold_active = UNTIL_STOP; - error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE, - sizeof(struct md_io)); - if (error) - goto abort; - error = -ENOMEM; disk = blk_alloc_disk(NUMA_NO_NODE); if (!disk) @@ -5900,7 +5865,14 @@ int md_run(struct mddev *mddev) if (!bioset_initialized(&mddev->sync_set)) { err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (err) - return err; + goto exit_bio_set; + } + if (mddev->level != 1 && mddev->level != 10 && + !bioset_initialized(&mddev->io_acct_set)) { + err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE, + offsetof(struct md_io_acct, bio_clone), 0); + if (err) + goto exit_sync_set; } spin_lock(&pers_lock); @@ -6028,6 +6000,7 @@ int md_run(struct mddev *mddev) blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); else blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); } if (pers->sync_request) { if (mddev->kobj.sd && @@ -6077,8 +6050,12 @@ bitmap_abort: module_put(pers->owner); md_bitmap_destroy(mddev); abort: - bioset_exit(&mddev->bio_set); + if (mddev->level != 1 && mddev->level != 10) + bioset_exit(&mddev->io_acct_set); +exit_sync_set: bioset_exit(&mddev->sync_set); +exit_bio_set: + bioset_exit(&mddev->bio_set); return err; } EXPORT_SYMBOL_GPL(md_run); @@ -6302,6 +6279,8 @@ void md_stop(struct mddev *mddev) __md_stop(mddev); bioset_exit(&mddev->bio_set); bioset_exit(&mddev->sync_set); + if (mddev->level != 1 && mddev->level != 10) + bioset_exit(&mddev->io_acct_set); } EXPORT_SYMBOL_GPL(md_stop); @@ -8606,6 +8585,41 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, } EXPORT_SYMBOL_GPL(md_submit_discard_bio); +static void md_end_io_acct(struct bio *bio) +{ + struct md_io_acct *md_io_acct = bio->bi_private; + struct bio *orig_bio = md_io_acct->orig_bio; + + orig_bio->bi_status = bio->bi_status; + + bio_end_io_acct(orig_bio, md_io_acct->start_time); + bio_put(bio); + bio_endio(orig_bio); +} + +/* + * Used by personalities that don't already clone the bio and thus can't + * easily add the timestamp to their extended bio structure. + */ +void md_account_bio(struct mddev *mddev, struct bio **bio) +{ + struct md_io_acct *md_io_acct; + struct bio *clone; + + if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue)) + return; + + clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set); + md_io_acct = container_of(clone, struct md_io_acct, bio_clone); + md_io_acct->orig_bio = *bio; + md_io_acct->start_time = bio_start_io_acct(*bio); + + clone->bi_end_io = md_end_io_acct; + clone->bi_private = md_io_acct; + *bio = clone; +} +EXPORT_SYMBOL_GPL(md_account_bio); + /* md_allow_write(mddev) * Calling this ensures that the array is marked 'active' so that writes * may proceed without blocking. It is important to call this before diff --git a/drivers/md/md.h b/drivers/md/md.h index a88086d4110c..832547cf038f 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -481,13 +481,13 @@ struct mddev { atomic_t max_corr_read_errors; /* max read retries */ struct list_head all_mddevs; - struct attribute_group *to_remove; + const struct attribute_group *to_remove; struct bio_set bio_set; struct bio_set sync_set; /* for sync operations like * metadata and bitmap writes */ - mempool_t md_io_pool; + struct bio_set io_acct_set; /* for raid0 and raid5 io accounting */ /* Generic flush handling. * The last to finish preflush schedules a worker to submit @@ -613,7 +613,7 @@ struct md_sysfs_entry { ssize_t (*show)(struct mddev *, char *); ssize_t (*store)(struct mddev *, const char *, size_t); }; -extern struct attribute_group md_bitmap_group; +extern const struct attribute_group md_bitmap_group; static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name) { @@ -684,6 +684,12 @@ struct md_thread { void *private; }; +struct md_io_acct { + struct bio *orig_bio; + unsigned long start_time; + struct bio bio_clone; +}; + #define THREAD_WAKEUP 0 static inline void safe_put_page(struct page *p) @@ -715,6 +721,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio, sector_t start, sector_t size); +void md_account_bio(struct mddev *mddev, struct bio **bio); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e5d7411cba9b..62c8b6adac70 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -546,6 +546,9 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) bio = split; } + if (bio->bi_pool != &mddev->bio_set) + md_account_bio(mddev, &bio); + orig_sector = sector; zone = find_zone(mddev->private, §or); switch (conf->layout) { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ced076ba560e..51f2547c2007 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -300,6 +300,8 @@ static void call_bio_endio(struct r1bio *r1_bio) if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) bio->bi_status = BLK_STS_IOERR; + if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) + bio_end_io_acct(bio, r1_bio->start_time); bio_endio(bio); } @@ -1210,7 +1212,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); int max_sectors; int rdisk; - bool print_msg = !!r1_bio; + bool r1bio_existed = !!r1_bio; char b[BDEVNAME_SIZE]; /* @@ -1220,7 +1222,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, */ gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; - if (print_msg) { + if (r1bio_existed) { /* Need to get the block device name carefully */ struct md_rdev *rdev; rcu_read_lock(); @@ -1252,7 +1254,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, if (rdisk < 0) { /* couldn't find anywhere to read from */ - if (print_msg) { + if (r1bio_existed) { pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", mdname(mddev), b, @@ -1263,7 +1265,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, } mirror = conf->mirrors + rdisk; - if (print_msg) + if (r1bio_existed) pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", mdname(mddev), (unsigned long long)r1_bio->sector, @@ -1292,6 +1294,9 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, r1_bio->read_disk = rdisk; + if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) + r1_bio->start_time = bio_start_io_acct(bio); + read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); r1_bio->bios[rdisk] = read_bio; @@ -1461,6 +1466,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, r1_bio->sectors = max_sectors; } + if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) + r1_bio->start_time = bio_start_io_acct(bio); atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->behind_remaining, 0); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index b7eb09e8c025..ccf10e59b116 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -158,6 +158,7 @@ struct r1bio { sector_t sector; int sectors; unsigned long state; + unsigned long start_time; struct mddev *mddev; /* * original bio going to /dev/mdx diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 13f5e6b2a73d..16977e8e075d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -297,6 +297,8 @@ static void raid_end_bio_io(struct r10bio *r10_bio) if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) bio->bi_status = BLK_STS_IOERR; + if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) + bio_end_io_acct(bio, r10_bio->start_time); bio_endio(bio); /* * Wake up any possible resync thread that waits for the device @@ -1184,6 +1186,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, } slot = r10_bio->read_slot; + if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) + r10_bio->start_time = bio_start_io_acct(bio); read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); r10_bio->devs[slot].bio = read_bio; @@ -1483,6 +1487,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, r10_bio->master_bio = bio; } + if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) + r10_bio->start_time = bio_start_io_acct(bio); atomic_set(&r10_bio->remaining, 1); md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 1461fd55311b..c34bb196790e 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -124,6 +124,7 @@ struct r10bio { sector_t sector; /* virtual sector number */ int sectors; unsigned long state; + unsigned long start_time; struct mddev *mddev; /* * original bio going to /dev/mdx diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7d4ff8a5c55e..b8436e4930ed 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5362,11 +5362,13 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf, */ static void raid5_align_endio(struct bio *bi) { - struct bio* raid_bi = bi->bi_private; + struct md_io_acct *md_io_acct = bi->bi_private; + struct bio *raid_bi = md_io_acct->orig_bio; struct mddev *mddev; struct r5conf *conf; struct md_rdev *rdev; blk_status_t error = bi->bi_status; + unsigned long start_time = md_io_acct->start_time; bio_put(bi); @@ -5378,6 +5380,8 @@ static void raid5_align_endio(struct bio *bi) rdev_dec_pending(rdev, conf->mddev); if (!error) { + if (blk_queue_io_stat(raid_bi->bi_bdev->bd_disk->queue)) + bio_end_io_acct(raid_bi, start_time); bio_endio(raid_bi); if (atomic_dec_and_test(&conf->active_aligned_reads)) wake_up(&conf->wait_for_quiescent); @@ -5396,6 +5400,8 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) struct md_rdev *rdev; sector_t sector, end_sector, first_bad; int bad_sectors, dd_idx; + struct md_io_acct *md_io_acct; + bool did_inc; if (!in_chunk_boundary(mddev, raid_bio)) { pr_debug("%s: non aligned\n", __func__); @@ -5425,29 +5431,46 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); - bio_set_dev(align_bio, rdev->bdev); - align_bio->bi_end_io = raid5_align_endio; - align_bio->bi_private = raid_bio; - align_bio->bi_iter.bi_sector = sector; - - raid_bio->bi_next = (void *)rdev; - - if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad, + if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad, &bad_sectors)) { - bio_put(align_bio); + bio_put(raid_bio); rdev_dec_pending(rdev, mddev); return 0; } + align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set); + md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone); + raid_bio->bi_next = (void *)rdev; + if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue)) + md_io_acct->start_time = bio_start_io_acct(raid_bio); + md_io_acct->orig_bio = raid_bio; + + bio_set_dev(align_bio, rdev->bdev); + align_bio->bi_end_io = raid5_align_endio; + align_bio->bi_private = md_io_acct; + align_bio->bi_iter.bi_sector = sector; + /* No reshape active, so we can trust rdev->data_offset */ align_bio->bi_iter.bi_sector += rdev->data_offset; - spin_lock_irq(&conf->device_lock); - wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0, - conf->device_lock); - atomic_inc(&conf->active_aligned_reads); - spin_unlock_irq(&conf->device_lock); + did_inc = false; + if (conf->quiesce == 0) { + atomic_inc(&conf->active_aligned_reads); + did_inc = true; + } + /* need a memory barrier to detect the race with raid5_quiesce() */ + if (!did_inc || smp_load_acquire(&conf->quiesce) != 0) { + /* quiesce is in progress, so we need to undo io activation and wait + * for it to finish + */ + if (did_inc && atomic_dec_and_test(&conf->active_aligned_reads)) + wake_up(&conf->wait_for_quiescent); + spin_lock_irq(&conf->device_lock); + wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0, + conf->device_lock); + atomic_inc(&conf->active_aligned_reads); + spin_unlock_irq(&conf->device_lock); + } if (mddev->gendisk) trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk), @@ -5796,6 +5819,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) last_sector = bio_end_sector(bi); bi->bi_next = NULL; + md_account_bio(mddev, &bi); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) { int previous; @@ -6928,7 +6952,7 @@ static struct attribute *raid5_attrs[] = { &ppl_write_hint.attr, NULL, }; -static struct attribute_group raid5_attrs_group = { +static const struct attribute_group raid5_attrs_group = { .name = NULL, .attrs = raid5_attrs, }; @@ -8334,7 +8358,10 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce) * active stripes can drain */ r5c_flush_cache(conf, INT_MAX); - conf->quiesce = 2; + /* need a memory barrier to make sure read_one_chunk() sees + * quiesce started and reverts to slow (locked) path. + */ + smp_store_release(&conf->quiesce, 2); wait_event_cmd(conf->wait_for_quiescent, atomic_read(&conf->active_stripes) == 0 && atomic_read(&conf->active_aligned_reads) == 0, diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 494675aeaaad..c3f3d77f1aac 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -21,7 +21,7 @@ config NVME_MULTIPATH help This option enables support for multipath access to NVMe subsystems. If this option is enabled only a single - /dev/nvmeXnY device will show up for each NVMe namespaces, + /dev/nvmeXnY device will show up for each NVMe namespace, even if it is accessible through multiple controllers. config NVME_HWMON diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 01889a8abb6b..80c656dcbbac 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -57,6 +57,26 @@ static bool force_apst; module_param(force_apst, bool, 0644); MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); +static unsigned long apst_primary_timeout_ms = 100; +module_param(apst_primary_timeout_ms, ulong, 0644); +MODULE_PARM_DESC(apst_primary_timeout_ms, + "primary APST timeout in ms"); + +static unsigned long apst_secondary_timeout_ms = 2000; +module_param(apst_secondary_timeout_ms, ulong, 0644); +MODULE_PARM_DESC(apst_secondary_timeout_ms, + "secondary APST timeout in ms"); + +static unsigned long apst_primary_latency_tol_us = 15000; +module_param(apst_primary_latency_tol_us, ulong, 0644); +MODULE_PARM_DESC(apst_primary_latency_tol_us, + "primary APST latency tolerance in us"); + +static unsigned long apst_secondary_latency_tol_us = 100000; +module_param(apst_secondary_latency_tol_us, ulong, 0644); +MODULE_PARM_DESC(apst_secondary_latency_tol_us, + "secondary APST latency tolerance in us"); + static bool streams; module_param(streams, bool, 0644); MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); @@ -701,9 +721,7 @@ EXPORT_SYMBOL_GPL(__nvme_check_ready); static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) { - struct nvme_command c; - - memset(&c, 0, sizeof(c)); + struct nvme_command c = { }; c.directive.opcode = nvme_admin_directive_send; c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); @@ -728,9 +746,8 @@ static int nvme_enable_streams(struct nvme_ctrl *ctrl) static int nvme_get_stream_params(struct nvme_ctrl *ctrl, struct streams_directive_params *s, u32 nsid) { - struct nvme_command c; + struct nvme_command c = { }; - memset(&c, 0, sizeof(c)); memset(s, 0, sizeof(*s)); c.directive.opcode = nvme_admin_directive_recv; @@ -1440,10 +1457,9 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result) { union nvme_result res = { 0 }; - struct nvme_command c; + struct nvme_command c = { }; int ret; - memset(&c, 0, sizeof(c)); c.features.opcode = op; c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); @@ -1522,36 +1538,6 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl) queue_work(nvme_wq, &ctrl->async_event_work); } -/* - * Issue ioctl requests on the first available path. Note that unlike normal - * block layer requests we will not retry failed request on another controller. - */ -struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, - struct nvme_ns_head **head, int *srcu_idx) -{ -#ifdef CONFIG_NVME_MULTIPATH - if (disk->fops == &nvme_ns_head_ops) { - struct nvme_ns *ns; - - *head = disk->private_data; - *srcu_idx = srcu_read_lock(&(*head)->srcu); - ns = nvme_find_path(*head); - if (!ns) - srcu_read_unlock(&(*head)->srcu, *srcu_idx); - return ns; - } -#endif - *head = NULL; - *srcu_idx = -1; - return disk->private_data; -} - -void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) -{ - if (head) - srcu_read_unlock(&head->srcu, idx); -} - static int nvme_ns_open(struct nvme_ns *ns) { @@ -1601,9 +1587,8 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, u32 max_integrity_segments) { - struct blk_integrity integrity; + struct blk_integrity integrity = { }; - memset(&integrity, 0, sizeof(integrity)); switch (pi_type) { case NVME_NS_DPS_PI_TYPE3: integrity.profile = &t10_pi_type3_crc; @@ -1948,30 +1933,45 @@ static char nvme_pr_type(enum pr_type type) } }; +static int nvme_send_ns_head_pr_command(struct block_device *bdev, + struct nvme_command *c, u8 data[16]) +{ + struct nvme_ns_head *head = bdev->bd_disk->private_data; + int srcu_idx = srcu_read_lock(&head->srcu); + struct nvme_ns *ns = nvme_find_path(head); + int ret = -EWOULDBLOCK; + + if (ns) { + c->common.nsid = cpu_to_le32(ns->head->ns_id); + ret = nvme_submit_sync_cmd(ns->queue, c, data, 16); + } + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + +static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, + u8 data[16]) +{ + c->common.nsid = cpu_to_le32(ns->head->ns_id); + return nvme_submit_sync_cmd(ns->queue, c, data, 16); +} + static int nvme_pr_command(struct block_device *bdev, u32 cdw10, u64 key, u64 sa_key, u8 op) { - struct nvme_ns_head *head = NULL; - struct nvme_ns *ns; - struct nvme_command c; - int srcu_idx, ret; + struct nvme_command c = { }; u8 data[16] = { 0, }; - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); - if (unlikely(!ns)) - return -EWOULDBLOCK; - put_unaligned_le64(key, &data[0]); put_unaligned_le64(sa_key, &data[8]); - memset(&c, 0, sizeof(c)); c.common.opcode = op; - c.common.nsid = cpu_to_le32(ns->head->ns_id); c.common.cdw10 = cpu_to_le32(cdw10); - ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); - nvme_put_ns_from_disk(head, srcu_idx); - return ret; + if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && + bdev->bd_disk->fops == &nvme_ns_head_ops) + return nvme_send_ns_head_pr_command(bdev, &c, data); + return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data); } static int nvme_pr_register(struct block_device *bdev, u64 old, @@ -2036,9 +2036,8 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) { struct nvme_ctrl *ctrl = data; - struct nvme_command cmd; + struct nvme_command cmd = { }; - memset(&cmd, 0, sizeof(cmd)); if (send) cmd.common.opcode = nvme_admin_security_send; else @@ -2053,6 +2052,17 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, EXPORT_SYMBOL_GPL(nvme_sec_submit); #endif /* CONFIG_BLK_SED_OPAL */ +#ifdef CONFIG_BLK_DEV_ZONED +static int nvme_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, + data); +} +#else +#define nvme_report_zones NULL +#endif /* CONFIG_BLK_DEV_ZONED */ + static const struct block_device_operations nvme_bdev_ops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, @@ -2218,13 +2228,53 @@ static int nvme_configure_acre(struct nvme_ctrl *ctrl) } /* + * The function checks whether the given total (exlat + enlat) latency of + * a power state allows the latter to be used as an APST transition target. + * It does so by comparing the latency to the primary and secondary latency + * tolerances defined by module params. If there's a match, the corresponding + * timeout value is returned and the matching tolerance index (1 or 2) is + * reported. + */ +static bool nvme_apst_get_transition_time(u64 total_latency, + u64 *transition_time, unsigned *last_index) +{ + if (total_latency <= apst_primary_latency_tol_us) { + if (*last_index == 1) + return false; + *last_index = 1; + *transition_time = apst_primary_timeout_ms; + return true; + } + if (apst_secondary_timeout_ms && + total_latency <= apst_secondary_latency_tol_us) { + if (*last_index <= 2) + return false; + *last_index = 2; + *transition_time = apst_secondary_timeout_ms; + return true; + } + return false; +} + +/* * APST (Autonomous Power State Transition) lets us program a table of power * state transitions that the controller will perform automatically. - * We configure it with a simple heuristic: we are willing to spend at most 2% - * of the time transitioning between power states. Therefore, when running in - * any given state, we will enter the next lower-power non-operational state - * after waiting 50 * (enlat + exlat) microseconds, as long as that state's exit - * latency is under the requested maximum latency. + * + * Depending on module params, one of the two supported techniques will be used: + * + * - If the parameters provide explicit timeouts and tolerances, they will be + * used to build a table with up to 2 non-operational states to transition to. + * The default parameter values were selected based on the values used by + * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic + * regeneration of the APST table in the event of switching between external + * and battery power, the timeouts and tolerances reflect a compromise + * between values used by Microsoft for AC and battery scenarios. + * - If not, we'll configure the table with a simple heuristic: we are willing + * to spend at most 2% of the time transitioning between power states. + * Therefore, when running in any given state, we will enter the next + * lower-power non-operational state after waiting 50 * (enlat + exlat) + * microseconds, as long as that state's exit latency is under the requested + * maximum latency. * * We will not autonomously enter any non-operational state for which the total * latency exceeds ps_max_latency_us. @@ -2240,6 +2290,7 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl) int max_ps = -1; int state; int ret; + unsigned last_lt_index = UINT_MAX; /* * If APST isn't supported or if we haven't been initialized yet, @@ -2298,13 +2349,19 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl) le32_to_cpu(ctrl->psd[state].entry_lat); /* - * This state is good. Use it as the APST idle target for - * higher power states. + * This state is good. It can be used as the APST idle target + * for higher power states. */ - transition_ms = total_latency_us + 19; - do_div(transition_ms, 20); - if (transition_ms > (1 << 24) - 1) - transition_ms = (1 << 24) - 1; + if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { + if (!nvme_apst_get_transition_time(total_latency_us, + &transition_ms, &last_lt_index)) + continue; + } else { + transition_ms = total_latency_us + 19; + do_div(transition_ms, 20); + if (transition_ms > (1 << 24) - 1) + transition_ms = (1 << 24) - 1; + } target = cpu_to_le64((state << 3) | (transition_ms << 8)); if (max_ps == -1) @@ -4068,6 +4125,11 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", opts->host_traddr ?: "none"); + if (ret) + return ret; + + ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", + opts->host_iface ?: "none"); } return ret; } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 34a84d2086c7..1e6a7cc056ca 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -112,6 +112,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) len += scnprintf(buf + len, size - len, "%shost_traddr=%s", (len) ? "," : "", ctrl->opts->host_traddr); + if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE) + len += scnprintf(buf + len, size - len, "%shost_iface=%s", + (len) ? "," : "", ctrl->opts->host_iface); len += scnprintf(buf + len, size - len, "\n"); return len; @@ -187,11 +190,10 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32); */ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) { - struct nvme_command cmd; + struct nvme_command cmd = { }; union nvme_result res; int ret; - memset(&cmd, 0, sizeof(cmd)); cmd.prop_get.opcode = nvme_fabrics_command; cmd.prop_get.fctype = nvme_fabrics_type_property_get; cmd.prop_get.attrib = 1; @@ -233,10 +235,9 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read64); */ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) { - struct nvme_command cmd; + struct nvme_command cmd = { }; int ret; - memset(&cmd, 0, sizeof(cmd)); cmd.prop_set.opcode = nvme_fabrics_command; cmd.prop_set.fctype = nvme_fabrics_type_property_set; cmd.prop_set.attrib = 0; @@ -254,28 +255,23 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) EXPORT_SYMBOL_GPL(nvmf_reg_write32); /** - * nvmf_log_connect_error() - Error-parsing-diagnostic print - * out function for connect() errors. - * - * @ctrl: the specific /dev/nvmeX device that had the error. - * - * @errval: Error code to be decoded in a more human-friendly - * printout. - * - * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM. - * - * @cmd: This is the SQE portion of a submission capsule. - * - * @data: This is the "Data" portion of a submission capsule. + * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for + * connect() errors. + * @ctrl: The specific /dev/nvmeX device that had the error. + * @errval: Error code to be decoded in a more human-friendly + * printout. + * @offset: For use with the NVMe error code + * NVME_SC_CONNECT_INVALID_PARAM. + * @cmd: This is the SQE portion of a submission capsule. + * @data: This is the "Data" portion of a submission capsule. */ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, int errval, int offset, struct nvme_command *cmd, struct nvmf_connect_data *data) { - int err_sctype = errval & (~NVME_SC_DNR); + int err_sctype = errval & ~NVME_SC_DNR; switch (err_sctype) { - case (NVME_SC_CONNECT_INVALID_PARAM): if (offset >> 16) { char *inv_data = "Connect Invalid Data Parameter"; @@ -318,35 +314,30 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, } } break; - case NVME_SC_CONNECT_INVALID_HOST: dev_err(ctrl->device, "Connect for subsystem %s is not allowed, hostnqn: %s\n", data->subsysnqn, data->hostnqn); break; - case NVME_SC_CONNECT_CTRL_BUSY: dev_err(ctrl->device, "Connect command failed: controller is busy or not available\n"); break; - case NVME_SC_CONNECT_FORMAT: dev_err(ctrl->device, "Connect incompatible format: %d", cmd->connect.recfmt); break; - case NVME_SC_HOST_PATH_ERROR: dev_err(ctrl->device, "Connect command failed: host path error\n"); break; - default: dev_err(ctrl->device, "Connect command failed, error wo/DNR bit: %d\n", err_sctype); break; - } /* switch (err_sctype) */ + } } /** @@ -371,12 +362,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, */ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) { - struct nvme_command cmd; + struct nvme_command cmd = { }; union nvme_result res; struct nvmf_connect_data *data; int ret; - memset(&cmd, 0, sizeof(cmd)); cmd.connect.opcode = nvme_fabrics_command; cmd.connect.fctype = nvme_fabrics_type_connect; cmd.connect.qid = 0; @@ -439,12 +429,11 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); */ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll) { - struct nvme_command cmd; + struct nvme_command cmd = { }; struct nvmf_connect_data *data; union nvme_result res; int ret; - memset(&cmd, 0, sizeof(cmd)); cmd.connect.opcode = nvme_fabrics_command; cmd.connect.fctype = nvme_fabrics_type_connect; cmd.connect.qid = cpu_to_le16(qid); @@ -550,6 +539,7 @@ static const match_table_t opt_tokens = { { NVMF_OPT_KATO, "keep_alive_tmo=%d" }, { NVMF_OPT_HOSTNQN, "hostnqn=%s" }, { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, + { NVMF_OPT_HOST_IFACE, "host_iface=%s" }, { NVMF_OPT_HOST_ID, "hostid=%s" }, { NVMF_OPT_DUP_CONNECT, "duplicate_connect" }, { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" }, @@ -759,6 +749,15 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, kfree(opts->host_traddr); opts->host_traddr = p; break; + case NVMF_OPT_HOST_IFACE: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->host_iface); + opts->host_iface = p; + break; case NVMF_OPT_HOST_ID: p = match_strdup(args); if (!p) { @@ -943,6 +942,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts) kfree(opts->trsvcid); kfree(opts->subsysnqn); kfree(opts->host_traddr); + kfree(opts->host_iface); kfree(opts); } EXPORT_SYMBOL_GPL(nvmf_free_options); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index d7f7974dc208..c31dad69a773 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -66,6 +66,7 @@ enum { NVMF_OPT_NR_POLL_QUEUES = 1 << 18, NVMF_OPT_TOS = 1 << 19, NVMF_OPT_FAIL_FAST_TMO = 1 << 20, + NVMF_OPT_HOST_IFACE = 1 << 21, }; /** @@ -83,7 +84,9 @@ enum { * @trsvcid: The transport-specific TRSVCID field for a port on the * subsystem which is adding a controller. * @host_traddr: A transport-specific field identifying the NVME host port - * to use for the connection to the controller. + * to use for the connection to the controller. + * @host_iface: A transport-specific field identifying the NVME host + * interface to use for the connection to the controller. * @queue_size: Number of IO queue elements. * @nr_io_queues: Number of controller IO queues that will be established. * @reconnect_delay: Time between two consecutive reconnect attempts. @@ -108,6 +111,7 @@ struct nvmf_ctrl_options { char *traddr; char *trsvcid; char *host_traddr; + char *host_iface; size_t queue_size; unsigned int nr_io_queues; unsigned int reconnect_delay; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f183f9fa03d0..7600863f7752 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3112,7 +3112,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) } /* FC-NVME supports normal SGL Data Block Descriptors */ - if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) { + if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { dev_err(ctrl->ctrl.device, "Mandatory sgls are not supported!\n"); ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 9557ead02de1..d93928d1e5bd 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -177,6 +177,20 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) metadata, meta_len, lower_32_bits(io.slba), NULL, 0); } +static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, + struct nvme_ns *ns, __u32 nsid) +{ + if (ns && nsid != ns->head->ns_id) { + dev_err(ctrl->device, + "%s: nsid (%u) in cmd does not match nsid (%u)" + "of namespace\n", + current->comm, nsid, ns->head->ns_id); + return false; + } + + return true; +} + static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_passthru_cmd __user *ucmd) { @@ -192,12 +206,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return -EFAULT; if (cmd.flags) return -EINVAL; - if (ns && cmd.nsid != ns->head->ns_id) { - dev_err(ctrl->device, - "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", - current->comm, cmd.nsid, ns->head->ns_id); + if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) return -EINVAL; - } memset(&c, 0, sizeof(c)); c.common.opcode = cmd.opcode; @@ -242,12 +252,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return -EFAULT; if (cmd.flags) return -EINVAL; - if (ns && cmd.nsid != ns->head->ns_id) { - dev_err(ctrl->device, - "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", - current->comm, cmd.nsid, ns->head->ns_id); + if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) return -EINVAL; - } memset(&c, 0, sizeof(c)); c.common.opcode = cmd.opcode; @@ -372,12 +378,13 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #ifdef CONFIG_NVME_MULTIPATH static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp, struct nvme_ns_head *head, int srcu_idx) + __releases(&head->srcu) { struct nvme_ctrl *ctrl = ns->ctrl; int ret; nvme_get_ctrl(ns->ctrl); - nvme_put_ns_from_disk(head, srcu_idx); + srcu_read_unlock(&head->srcu, srcu_idx); ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp); nvme_put_ctrl(ctrl); @@ -387,14 +394,15 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct nvme_ns_head *head = NULL; + struct nvme_ns_head *head = bdev->bd_disk->private_data; void __user *argp = (void __user *)arg; struct nvme_ns *ns; - int srcu_idx, ret; + int srcu_idx, ret = -EWOULDBLOCK; - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); - if (unlikely(!ns)) - return -EWOULDBLOCK; + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (!ns) + goto out_unlock; /* * Handle ioctls that apply to the controller instead of the namespace @@ -402,12 +410,11 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, * deadlock when deleting namespaces using the passthrough interface. */ if (is_ctrl_ioctl(cmd)) - ret = nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); - else { - ret = nvme_ns_ioctl(ns, cmd, argp); - nvme_put_ns_from_disk(head, srcu_idx); - } + return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); + ret = nvme_ns_ioctl(ns, cmd, argp); +out_unlock: + srcu_read_unlock(&head->srcu, srcu_idx); return ret; } @@ -419,21 +426,19 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, container_of(cdev, struct nvme_ns_head, cdev); void __user *argp = (void __user *)arg; struct nvme_ns *ns; - int srcu_idx, ret; + int srcu_idx, ret = -EWOULDBLOCK; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); - if (!ns) { - srcu_read_unlock(&head->srcu, srcu_idx); - return -EWOULDBLOCK; - } + if (!ns) + goto out_unlock; if (is_ctrl_ioctl(cmd)) return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); ret = nvme_ns_ioctl(ns, cmd, argp); - nvme_put_ns_from_disk(head, srcu_idx); - +out_unlock: + srcu_read_unlock(&head->srcu, srcu_idx); return ret; } #endif /* CONFIG_NVME_MULTIPATH */ diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index b5fbdb416022..0ea5298469c3 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -349,6 +349,25 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) nvme_put_ns_head(disk->private_data); } +#ifdef CONFIG_BLK_DEV_ZONED +static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct nvme_ns_head *head = disk->private_data; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (ns) + ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} +#else +#define nvme_ns_head_report_zones NULL +#endif /* CONFIG_BLK_DEV_ZONED */ + const struct block_device_operations nvme_ns_head_ops = { .owner = THIS_MODULE, .submit_bio = nvme_ns_head_submit_bio, @@ -356,7 +375,7 @@ const struct block_device_operations nvme_ns_head_ops = { .release = nvme_ns_head_release, .ioctl = nvme_ns_head_ioctl, .getgeo = nvme_getgeo, - .report_zones = nvme_report_zones, + .report_zones = nvme_ns_head_report_zones, .pr_ops = &nvme_pr_ops, }; @@ -416,11 +435,6 @@ static void nvme_requeue_work(struct work_struct *work) next = bio->bi_next; bio->bi_next = NULL; - /* - * Reset disk to the mpath node and resubmit to select a new - * path. - */ - bio_set_dev(bio, head->disk->part0); submit_bio_noacct(bio); } } @@ -779,6 +793,13 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)) return 0; + if (!ctrl->max_namespaces || + ctrl->max_namespaces > le32_to_cpu(id->nn)) { + dev_err(ctrl->device, + "Invalid MNAN value %u\n", ctrl->max_namespaces); + return -EINVAL; + } + ctrl->anacap = id->anacap; ctrl->anatt = id->anatt; ctrl->nanagrpid = le32_to_cpu(id->nanagrpid); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 0015860ec12b..75420ceacc10 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -674,9 +674,6 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl); void nvme_queue_scan(struct nvme_ctrl *ctrl); int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, void *log, size_t size, u64 offset); -struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, - struct nvme_ns_head **head, int *srcu_idx); -void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx); bool nvme_tryget_ns_head(struct nvme_ns_head *head); void nvme_put_ns_head(struct nvme_ns_head *head); int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, @@ -697,6 +694,7 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[]; extern const struct pr_ops nvme_pr_ops; extern const struct block_device_operations nvme_ns_head_ops; +struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); #ifdef CONFIG_NVME_MULTIPATH static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) { @@ -718,7 +716,6 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); -struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) { @@ -810,17 +807,14 @@ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) #endif /* CONFIG_NVME_MULTIPATH */ int nvme_revalidate_zones(struct nvme_ns *ns); +int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data); #ifdef CONFIG_BLK_DEV_ZONED int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); -int nvme_report_zones(struct gendisk *disk, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data); - blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_zone_mgmt_action action); #else -#define nvme_report_zones NULL - static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_zone_mgmt_action action) @@ -875,6 +869,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) } #endif +static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) +{ + return ctrl->sgls & ((1 << 0) | (1 << 1)); +} + u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); void nvme_execute_passthru_rq(struct request *rq); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a29b170701fc..d3c5086673bc 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -307,13 +307,12 @@ static void nvme_dbbuf_free(struct nvme_queue *nvmeq) static void nvme_dbbuf_set(struct nvme_dev *dev) { - struct nvme_command c; + struct nvme_command c = { }; unsigned int i; if (!dev->dbbuf_dbs) return; - memset(&c, 0, sizeof(c)); c.dbbuf.opcode = nvme_admin_dbbuf; c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); @@ -536,7 +535,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); - if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) + if (!nvme_ctrl_sgl_supported(&dev->ctrl)) return false; if (!iod->nvmeq->qid) return false; @@ -559,7 +558,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req) dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); dma_addr = next_dma_addr; } - } static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) @@ -576,7 +574,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); dma_addr = next_dma_addr; } - } static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req) @@ -855,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, &cmnd->rw, &bv); if (iod->nvmeq->qid && sgl_threshold && - dev->ctrl.sgls & ((1 << 0) | (1 << 1))) + nvme_ctrl_sgl_supported(&dev->ctrl)) return nvme_setup_sgl_simple(dev, req, &cmnd->rw, &bv); } @@ -1032,7 +1029,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) { - u16 tmp = nvmeq->cq_head + 1; + u32 tmp = nvmeq->cq_head + 1; if (tmp == nvmeq->q_depth) { nvmeq->cq_head = 0; @@ -1114,9 +1111,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) { struct nvme_dev *dev = to_nvme_dev(ctrl); struct nvme_queue *nvmeq = &dev->queues[0]; - struct nvme_command c; + struct nvme_command c = { }; - memset(&c, 0, sizeof(c)); c.common.opcode = nvme_admin_async_event; c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; nvme_submit_cmd(nvmeq, &c, true); @@ -1124,9 +1120,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { - struct nvme_command c; + struct nvme_command c = { }; - memset(&c, 0, sizeof(c)); c.delete_queue.opcode = opcode; c.delete_queue.qid = cpu_to_le16(id); @@ -1136,7 +1131,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq, s16 vector) { - struct nvme_command c; + struct nvme_command c = { }; int flags = NVME_QUEUE_PHYS_CONTIG; if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) @@ -1146,7 +1141,6 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ - memset(&c, 0, sizeof(c)); c.create_cq.opcode = nvme_admin_create_cq; c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); c.create_cq.cqid = cpu_to_le16(qid); @@ -1161,7 +1155,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq) { struct nvme_ctrl *ctrl = &dev->ctrl; - struct nvme_command c; + struct nvme_command c = { }; int flags = NVME_QUEUE_PHYS_CONTIG; /* @@ -1176,7 +1170,6 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ - memset(&c, 0, sizeof(c)); c.create_sq.opcode = nvme_admin_create_sq; c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); c.create_sq.sqid = cpu_to_le16(qid); @@ -1257,7 +1250,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_queue *nvmeq = iod->nvmeq; struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; - struct nvme_command cmd; + struct nvme_command cmd = { }; u32 csts = readl(dev->bar + NVME_REG_CSTS); /* If PCI error recovery process is happening, we cannot reset or @@ -1337,7 +1330,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) } iod->aborted = 1; - memset(&cmd, 0, sizeof(cmd)); cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.cid = req->tag; cmd.abort.sqid = cpu_to_le16(nvmeq->qid); @@ -1888,10 +1880,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) { u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; u64 dma_addr = dev->host_mem_descs_dma; - struct nvme_command c; + struct nvme_command c = { }; int ret; - memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_set_features; c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); c.features.dword11 = cpu_to_le32(bits); @@ -2265,9 +2256,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) { struct request_queue *q = nvmeq->dev->ctrl.admin_q; struct request *req; - struct nvme_command cmd; + struct nvme_command cmd = { }; - memset(&cmd, 0, sizeof(cmd)); cmd.delete_queue.opcode = opcode; cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); @@ -2828,54 +2818,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) return 0; } -#ifdef CONFIG_ACPI -static bool nvme_acpi_storage_d3(struct pci_dev *dev) -{ - struct acpi_device *adev; - struct pci_dev *root; - acpi_handle handle; - acpi_status status; - u8 val; - - /* - * Look for _DSD property specifying that the storage device on the port - * must use D3 to support deep platform power savings during - * suspend-to-idle. - */ - root = pcie_find_root_port(dev); - if (!root) - return false; - - adev = ACPI_COMPANION(&root->dev); - if (!adev) - return false; - - /* - * The property is defined in the PXSX device for South complex ports - * and in the PEGP device for North complex ports. - */ - status = acpi_get_handle(adev->handle, "PXSX", &handle); - if (ACPI_FAILURE(status)) { - status = acpi_get_handle(adev->handle, "PEGP", &handle); - if (ACPI_FAILURE(status)) - return false; - } - - if (acpi_bus_get_device(handle, &adev)) - return false; - - if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", - &val)) - return false; - return val == 1; -} -#else -static inline bool nvme_acpi_storage_d3(struct pci_dev *dev) -{ - return false; -} -#endif /* CONFIG_ACPI */ - static void nvme_async_probe(void *data, async_cookie_t cookie) { struct nvme_dev *dev = data; @@ -2925,7 +2867,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) quirks |= check_vendor_combination_bug(pdev); - if (!noacpi && nvme_acpi_storage_d3(pdev)) { + if (!noacpi && acpi_storage_d3(&pdev->dev)) { /* * Some systems use a bios work around to ask for D3 on * platforms that support kernel managed suspend. diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 4697a94c0945..a9e70cefd7ed 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1088,7 +1088,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) { - int ret = -EINVAL; + int ret; bool changed; ret = nvme_rdma_configure_admin_queue(ctrl, new); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 34f4b3402f7c..c7bd37103cf4 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -123,6 +123,7 @@ struct nvme_tcp_ctrl { struct blk_mq_tag_set admin_tag_set; struct sockaddr_storage addr; struct sockaddr_storage src_addr; + struct net_device *ndev; struct nvme_ctrl ctrl; struct work_struct err_work; @@ -1455,6 +1456,20 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, } } + if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { + char *iface = nctrl->opts->host_iface; + sockptr_t optval = KERNEL_SOCKPTR(iface); + + ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, + optval, strlen(iface)); + if (ret) { + dev_err(nctrl->device, + "failed to bind to interface %s queue %d err %d\n", + iface, qid, ret); + goto err_sock; + } + } + queue->hdr_digest = nctrl->opts->hdr_digest; queue->data_digest = nctrl->opts->data_digest; if (queue->hdr_digest || queue->data_digest) { @@ -1973,11 +1988,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) return ret; if (ctrl->icdoff) { + ret = -EOPNOTSUPP; dev_err(ctrl->device, "icdoff is not supported!\n"); goto destroy_admin; } - if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) { + if (!nvme_ctrl_sgl_supported(ctrl)) { + ret = -EOPNOTSUPP; dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); goto destroy_admin; } @@ -2515,6 +2532,16 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, } } + if (opts->mask & NVMF_OPT_HOST_IFACE) { + ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface); + if (!ctrl->ndev) { + pr_err("invalid interface passed: %s\n", + opts->host_iface); + ret = -ENODEV; + goto out_free_ctrl; + } + } + if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { ret = -EALREADY; goto out_free_ctrl; @@ -2571,7 +2598,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = { NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | - NVMF_OPT_TOS, + NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE, .create_ctrl = nvme_tcp_create_ctrl, }; diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 475dd45c3db4..d95010481fce 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -171,8 +171,8 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns, return cb(&zone, idx, data); } -static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data) +int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) { struct nvme_zone_report *report; struct nvme_command c = { }; @@ -180,6 +180,9 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, unsigned int nz, i; size_t buflen; + if (ns->head->ids.csi != NVME_CSI_ZNS) + return -EINVAL; + report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); if (!report) return -ENOMEM; @@ -227,26 +230,6 @@ out_free: return ret; } -int nvme_report_zones(struct gendisk *disk, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data) -{ - struct nvme_ns_head *head = NULL; - struct nvme_ns *ns; - int srcu_idx, ret; - - ns = nvme_get_ns_from_disk(disk, &head, &srcu_idx); - if (unlikely(!ns)) - return -EWOULDBLOCK; - - if (ns->head->ids.csi == NVME_CSI_ZNS) - ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); - else - ret = -EINVAL; - nvme_put_ns_from_disk(head, srcu_idx); - - return ret; -} - blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *c, enum nvme_zone_mgmt_action action) { diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile index ebf91fc4c72e..9837e580fa7e 100644 --- a/drivers/nvme/target/Makefile +++ b/drivers/nvme/target/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \ discovery.o io-cmd-file.o io-cmd-bdev.o nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o +nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o nvme-loop-y += loop.o nvmet-rdma-y += rdma.o nvmet-fc-y += fc.o diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index dcd49a72f2f3..0cb98f2bbc8c 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -162,15 +162,8 @@ out: nvmet_req_complete(req, status); } -static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) +static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) { - u16 status = NVME_SC_INTERNAL; - struct nvme_effects_log *log; - - log = kzalloc(sizeof(*log), GFP_KERNEL); - if (!log) - goto out; - log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0); log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0); log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0); @@ -184,9 +177,45 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0); log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0); log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0); +} - status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); +static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) +{ + log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0); +} + +static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) +{ + struct nvme_effects_log *log; + u16 status = NVME_SC_SUCCESS; + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (!log) { + status = NVME_SC_INTERNAL; + goto out; + } + + switch (req->cmd->get_log_page.csi) { + case NVME_CSI_NVM: + nvmet_get_cmd_effects_nvm(log); + break; + case NVME_CSI_ZNS: + if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { + status = NVME_SC_INVALID_IO_CMD_SET; + goto free; + } + nvmet_get_cmd_effects_nvm(log); + nvmet_get_cmd_effects_zns(log); + break; + default: + status = NVME_SC_INVALID_LOG_PAGE; + goto free; + } + + status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); +free: kfree(log); out: nvmet_req_complete(req, status); @@ -313,22 +342,6 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); } -static u16 nvmet_set_model_number(struct nvmet_subsys *subsys) -{ - u16 status = 0; - - mutex_lock(&subsys->lock); - if (!subsys->model_number) { - subsys->model_number = - kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); - if (!subsys->model_number) - status = NVME_SC_INTERNAL; - } - mutex_unlock(&subsys->lock); - - return status; -} - static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -337,14 +350,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) u32 cmd_capsule_size; u16 status = 0; - /* - * If there is no model number yet, set it now. It will then remain - * stable for the life time of the subsystem. - */ - if (!subsys->model_number) { - status = nvmet_set_model_number(subsys); - if (status) - goto out; + if (!subsys->subsys_discovered) { + mutex_lock(&subsys->lock); + subsys->subsys_discovered = true; + mutex_unlock(&subsys->lock); } id = kzalloc(sizeof(*id), GFP_KERNEL); @@ -357,9 +366,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->vid = 0; id->ssvid = 0; - memset(id->sn, ' ', sizeof(id->sn)); - bin2hex(id->sn, &ctrl->subsys->serial, - min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); + memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, strlen(subsys->model_number), ' '); memcpy_and_pad(id->fr, sizeof(id->fr), @@ -415,7 +422,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); - id->nn = cpu_to_le32(ctrl->subsys->max_nsid); + id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | NVME_CTRL_ONCS_WRITE_ZEROES); @@ -635,6 +642,12 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req) goto out; } + status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI, + NVME_NIDT_CSI_LEN, + &req->ns->csi, &off); + if (status) + goto out; + if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, off) != NVME_IDENTIFY_DATA_SIZE - off) status = NVME_SC_INTERNAL | NVME_SC_DNR; @@ -643,6 +656,23 @@ out: nvmet_req_complete(req, status); } +static bool nvmet_handle_identify_desclist(struct nvmet_req *req) +{ + switch (req->cmd->identify.csi) { + case NVME_CSI_NVM: + nvmet_execute_identify_desclist(req); + return true; + case NVME_CSI_ZNS: + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { + nvmet_execute_identify_desclist(req); + return true; + } + return false; + default: + return false; + } +} + static void nvmet_execute_identify(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) @@ -650,19 +680,54 @@ static void nvmet_execute_identify(struct nvmet_req *req) switch (req->cmd->identify.cns) { case NVME_ID_CNS_NS: - return nvmet_execute_identify_ns(req); + switch (req->cmd->identify.csi) { + case NVME_CSI_NVM: + return nvmet_execute_identify_ns(req); + default: + break; + } + break; + case NVME_ID_CNS_CS_NS: + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { + switch (req->cmd->identify.csi) { + case NVME_CSI_ZNS: + return nvmet_execute_identify_cns_cs_ns(req); + default: + break; + } + } + break; case NVME_ID_CNS_CTRL: - return nvmet_execute_identify_ctrl(req); + switch (req->cmd->identify.csi) { + case NVME_CSI_NVM: + return nvmet_execute_identify_ctrl(req); + } + break; + case NVME_ID_CNS_CS_CTRL: + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { + switch (req->cmd->identify.csi) { + case NVME_CSI_ZNS: + return nvmet_execute_identify_cns_cs_ctrl(req); + default: + break; + } + } + break; case NVME_ID_CNS_NS_ACTIVE_LIST: - return nvmet_execute_identify_nslist(req); + switch (req->cmd->identify.csi) { + case NVME_CSI_NVM: + return nvmet_execute_identify_nslist(req); + default: + break; + } + break; case NVME_ID_CNS_NS_DESC_LIST: - return nvmet_execute_identify_desclist(req); + if (nvmet_handle_identify_desclist(req) == true) + return; + break; } - pr_debug("unhandled identify cns %d on qid %d\n", - req->cmd->identify.cns, req->sq->qid); - req->error_loc = offsetof(struct nvme_identify, cns); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_cns_error_complete(req); } /* diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 65a0cf99f557..273555127188 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1007,13 +1007,26 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, NVME_MINOR(subsys->ver)); } -static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, - const char *page, size_t count) +static ssize_t +nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys, + const char *page, size_t count) { - struct nvmet_subsys *subsys = to_subsys(item); int major, minor, tertiary = 0; int ret; + if (subsys->subsys_discovered) { + if (NVME_TERTIARY(subsys->ver)) + pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n", + NVME_MAJOR(subsys->ver), + NVME_MINOR(subsys->ver), + NVME_TERTIARY(subsys->ver)); + else + pr_err("Can't set version number. %llu.%llu is already assigned\n", + NVME_MAJOR(subsys->ver), + NVME_MINOR(subsys->ver)); + return -EINVAL; + } + /* passthru subsystems use the underlying controller's version */ if (nvmet_passthru_ctrl(subsys)) return -EINVAL; @@ -1022,35 +1035,84 @@ static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, if (ret != 2 && ret != 3) return -EINVAL; - down_write(&nvmet_config_sem); subsys->ver = NVME_VS(major, minor, tertiary); - up_write(&nvmet_config_sem); return count; } + +static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_subsys *subsys = to_subsys(item); + ssize_t ret; + + down_write(&nvmet_config_sem); + mutex_lock(&subsys->lock); + ret = nvmet_subsys_attr_version_store_locked(subsys, page, count); + mutex_unlock(&subsys->lock); + up_write(&nvmet_config_sem); + + return ret; +} CONFIGFS_ATTR(nvmet_subsys_, attr_version); +/* See Section 1.5 of NVMe 1.4 */ +static bool nvmet_is_ascii(const char c) +{ + return c >= 0x20 && c <= 0x7e; +} + static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); - return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial); + return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial); } -static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, - const char *page, size_t count) +static ssize_t +nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys, + const char *page, size_t count) { - u64 serial; + int pos, len = strcspn(page, "\n"); - if (sscanf(page, "%llx\n", &serial) != 1) + if (subsys->subsys_discovered) { + pr_err("Can't set serial number. %s is already assigned\n", + subsys->serial); return -EINVAL; + } + + if (!len || len > NVMET_SN_MAX_SIZE) { + pr_err("Serial Number can not be empty or exceed %d Bytes\n", + NVMET_SN_MAX_SIZE); + return -EINVAL; + } + + for (pos = 0; pos < len; pos++) { + if (!nvmet_is_ascii(page[pos])) { + pr_err("Serial Number must contain only ASCII strings\n"); + return -EINVAL; + } + } + + memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' '); + + return count; +} + +static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_subsys *subsys = to_subsys(item); + ssize_t ret; down_write(&nvmet_config_sem); - to_subsys(item)->serial = serial; + mutex_lock(&subsys->lock); + ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count); + mutex_unlock(&subsys->lock); up_write(&nvmet_config_sem); - return count; + return ret; } CONFIGFS_ATTR(nvmet_subsys_, attr_serial); @@ -1118,20 +1180,8 @@ static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); - int ret; - - mutex_lock(&subsys->lock); - ret = snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number ? - subsys->model_number : NVMET_DEFAULT_CTRL_MODEL); - mutex_unlock(&subsys->lock); - - return ret; -} -/* See Section 1.5 of NVMe 1.4 */ -static bool nvmet_is_ascii(const char c) -{ - return c >= 0x20 && c <= 0x7e; + return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number); } static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, @@ -1139,7 +1189,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, { int pos = 0, len; - if (subsys->model_number) { + if (subsys->subsys_discovered) { pr_err("Can't set model number. %s is already assigned\n", subsys->model_number); return -EINVAL; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b20b8d0a1144..ac7210a3ea1c 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -16,6 +16,7 @@ #include "nvmet.h" struct workqueue_struct *buffered_io_wq; +struct workqueue_struct *zbd_wq; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static DEFINE_IDA(cntlid_ida); @@ -43,43 +44,34 @@ DECLARE_RWSEM(nvmet_ana_sem); inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) { - u16 status; - switch (errno) { case 0: - status = NVME_SC_SUCCESS; - break; + return NVME_SC_SUCCESS; case -ENOSPC: req->error_loc = offsetof(struct nvme_rw_command, length); - status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; - break; + return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; case -EREMOTEIO: req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; - break; + return NVME_SC_LBA_RANGE | NVME_SC_DNR; case -EOPNOTSUPP: req->error_loc = offsetof(struct nvme_common_command, opcode); switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: - status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; - break; + return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; default: - status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } break; case -ENODATA: req->error_loc = offsetof(struct nvme_rw_command, nsid); - status = NVME_SC_ACCESS_DENIED; - break; + return NVME_SC_ACCESS_DENIED; case -EIO: fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); - status = NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_SC_DNR; } - - return status; } u16 nvmet_report_invalid_opcode(struct nvmet_req *req) @@ -122,11 +114,11 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) return 0; } -static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys) +static u32 nvmet_max_nsid(struct nvmet_subsys *subsys) { - unsigned long nsid = 0; struct nvmet_ns *cur; unsigned long idx; + u32 nsid = 0; xa_for_each(&subsys->namespaces, idx, cur) nsid = cur->nsid; @@ -141,14 +133,13 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen) static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) { - u16 status = NVME_SC_INTERNAL | NVME_SC_DNR; struct nvmet_req *req; mutex_lock(&ctrl->lock); while (ctrl->nr_async_event_cmds) { req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, status); + nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); mutex_lock(&ctrl->lock); } mutex_unlock(&ctrl->lock); @@ -412,7 +403,6 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); - INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); } @@ -693,6 +683,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) uuid_gen(&ns->uuid); ns->buffered_io = false; + ns->csi = NVME_CSI_NVM; return ns; } @@ -895,10 +886,18 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return ret; } - if (req->ns->file) - return nvmet_file_parse_io_cmd(req); - - return nvmet_bdev_parse_io_cmd(req); + switch (req->ns->csi) { + case NVME_CSI_NVM: + if (req->ns->file) + return nvmet_file_parse_io_cmd(req); + return nvmet_bdev_parse_io_cmd(req); + case NVME_CSI_ZNS: + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) + return nvmet_bdev_zns_parse_io_cmd(req); + return NVME_SC_INVALID_IO_CMD_SET; + default: + return NVME_SC_INVALID_IO_CMD_SET; + } } bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, @@ -1119,6 +1118,17 @@ static inline u8 nvmet_cc_iocqes(u32 cc) return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; } +static inline bool nvmet_css_supported(u8 cc_css) +{ + switch (cc_css <<= NVME_CC_CSS_SHIFT) { + case NVME_CC_CSS_NVM: + case NVME_CC_CSS_CSI: + return true; + default: + return false; + } +} + static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); @@ -1138,7 +1148,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) if (nvmet_cc_mps(ctrl->cc) != 0 || nvmet_cc_ams(ctrl->cc) != 0 || - nvmet_cc_css(ctrl->cc) != 0) { + !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) { ctrl->csts = NVME_CSTS_CFS; return; } @@ -1189,6 +1199,8 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl) { /* command sets supported: NVMe command set: */ ctrl->cap = (1ULL << 37); + /* Controller supports one or more I/O Command Sets */ + ctrl->cap |= (1ULL << 43); /* CC.EN timeout in 500msec units: */ ctrl->cap |= (15ULL << 24); /* maximum queue entries supported: */ @@ -1358,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, INIT_LIST_HEAD(&ctrl->async_events); INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); + INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); @@ -1499,6 +1512,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, enum nvme_subsys_type type) { struct nvmet_subsys *subsys; + char serial[NVMET_SN_MAX_SIZE / 2]; + int ret; subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); if (!subsys) @@ -1506,7 +1521,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, subsys->ver = NVMET_DEFAULT_VS; /* generate a random serial number as our controllers are ephemeral: */ - get_random_bytes(&subsys->serial, sizeof(subsys->serial)); + get_random_bytes(&serial, sizeof(serial)); + bin2hex(subsys->serial, &serial, sizeof(serial)); + + subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); + if (!subsys->model_number) { + ret = -ENOMEM; + goto free_subsys; + } switch (type) { case NVME_NQN_NVME: @@ -1517,15 +1539,15 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, break; default: pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); - kfree(subsys); - return ERR_PTR(-EINVAL); + ret = -EINVAL; + goto free_mn; } subsys->type = type; subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, GFP_KERNEL); if (!subsys->subsysnqn) { - kfree(subsys); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto free_mn; } subsys->cntlid_min = NVME_CNTLID_MIN; subsys->cntlid_max = NVME_CNTLID_MAX; @@ -1537,6 +1559,12 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, INIT_LIST_HEAD(&subsys->hosts); return subsys; + +free_mn: + kfree(subsys->model_number); +free_subsys: + kfree(subsys); + return ERR_PTR(ret); } static void nvmet_subsys_free(struct kref *ref) @@ -1575,11 +1603,15 @@ static int __init nvmet_init(void) nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; + zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0); + if (!zbd_wq) + return -ENOMEM; + buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", WQ_MEM_RECLAIM, 0); if (!buffered_io_wq) { error = -ENOMEM; - goto out; + goto out_free_zbd_work_queue; } error = nvmet_init_discovery(); @@ -1595,7 +1627,8 @@ out_exit_discovery: nvmet_exit_discovery(); out_free_work_queue: destroy_workqueue(buffered_io_wq); -out: +out_free_zbd_work_queue: + destroy_workqueue(zbd_wq); return error; } @@ -1605,6 +1638,7 @@ static void __exit nvmet_exit(void) nvmet_exit_discovery(); ida_destroy(&cntlid_ida); destroy_workqueue(buffered_io_wq); + destroy_workqueue(zbd_wq); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index fc3645fc2c24..7aa62bc6ae84 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -244,7 +244,6 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl *id; - const char model[] = "Linux"; u16 status = 0; if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) @@ -262,11 +261,10 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) goto out; } - memset(id->sn, ' ', sizeof(id->sn)); - bin2hex(id->sn, &ctrl->subsys->serial, - min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); + memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); memset(id->fr, ' ', sizeof(id->fr)); - memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); + memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number, + strlen(ctrl->subsys->model_number), ' '); memcpy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE), ' '); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 19e113240fff..22b5108168a6 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -2511,13 +2511,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, int ret; /* - * if there is no nvmet mapping to the targetport there - * shouldn't be requests. just terminate them. - */ - if (!tgtport->pe) - goto transport_error; - - /* * Fused commands are currently not supported in the linux * implementation. * @@ -2544,7 +2537,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, fod->req.cmd = &fod->cmdiubuf.sqe; fod->req.cqe = &fod->rspiubuf.cqe; - fod->req.port = tgtport->pe->port; + if (tgtport->pe) + fod->req.port = tgtport->pe->port; /* clear any response payload */ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 429263ca9b97..0fc2781ab970 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -47,6 +47,14 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) id->nows = to0based(ql->io_opt / ql->logical_block_size); } +void nvmet_bdev_ns_disable(struct nvmet_ns *ns) +{ + if (ns->bdev) { + blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ); + ns->bdev = NULL; + } +} + static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) { struct blk_integrity *bi = bdev_get_integrity(ns->bdev); @@ -86,15 +94,15 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10)) nvmet_bdev_ns_enable_integrity(ns); - return 0; -} - -void nvmet_bdev_ns_disable(struct nvmet_ns *ns) -{ - if (ns->bdev) { - blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ); - ns->bdev = NULL; + if (bdev_is_zoned(ns->bdev)) { + if (!nvmet_bdev_zns_enable(ns)) { + nvmet_bdev_ns_disable(ns); + return -EINVAL; + } + ns->csi = NVME_CSI_ZNS; } + + return 0; } void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns) @@ -102,7 +110,7 @@ void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns) ns->size = i_size_read(ns->bdev->bd_inode); } -static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) +u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) { u16 status = NVME_SC_SUCCESS; @@ -164,8 +172,7 @@ static void nvmet_bio_done(struct bio *bio) struct nvmet_req *req = bio->bi_private; nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); - if (bio != &req->b.inline_bio) - bio_put(bio); + nvmet_req_bio_put(req, bio); } #ifdef CONFIG_BLK_DEV_INTEGRITY @@ -174,11 +181,10 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, { struct blk_integrity *bi; struct bio_integrity_payload *bip; - struct block_device *bdev = req->ns->bdev; int rc; size_t resid, len; - bi = bdev_get_integrity(bdev); + bi = bdev_get_integrity(req->ns->bdev); if (unlikely(!bi)) { pr_err("Unable to locate bio_integrity\n"); return -ENODEV; @@ -430,9 +436,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) { - struct nvme_command *cmd = req->cmd; - - switch (cmd->common.opcode) { + switch (req->cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_bdev_execute_rw; diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 7fdbdc496597..1dd1a0fe2e81 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -385,9 +385,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) { - struct nvme_command *cmd = req->cmd; - - switch (cmd->common.opcode) { + switch (req->cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_file_execute_rw; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 53aea9a8056e..06dd3d537f07 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -28,6 +28,7 @@ #define NVMET_NO_ERROR_LOC ((u16)-1) #define NVMET_DEFAULT_CTRL_MODEL "Linux" #define NVMET_MN_MAX_SIZE 40 +#define NVMET_SN_MAX_SIZE 20 /* * Supported optional AENs: @@ -82,6 +83,7 @@ struct nvmet_ns { struct pci_dev *p2p_dev; int pi_type; int metadata_size; + u8 csi; }; static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) @@ -217,7 +219,7 @@ struct nvmet_subsys { struct xarray namespaces; unsigned int nr_namespaces; - unsigned int max_nsid; + u32 max_nsid; u16 cntlid_min; u16 cntlid_max; @@ -229,7 +231,8 @@ struct nvmet_subsys { u16 max_qid; u64 ver; - u64 serial; + char serial[NVMET_SN_MAX_SIZE]; + bool subsys_discovered; char *subsysnqn; bool pi_support; @@ -247,6 +250,10 @@ struct nvmet_subsys { unsigned int admin_timeout; unsigned int io_timeout; #endif /* CONFIG_NVME_TARGET_PASSTHRU */ + +#ifdef CONFIG_BLK_DEV_ZONED + u8 zasl; +#endif /* CONFIG_BLK_DEV_ZONED */ }; static inline struct nvmet_subsys *to_subsys(struct config_item *item) @@ -332,6 +339,12 @@ struct nvmet_req { struct work_struct work; bool use_workqueue; } p; +#ifdef CONFIG_BLK_DEV_ZONED + struct { + struct bio inline_bio; + struct work_struct zmgmt_work; + } z; +#endif /* CONFIG_BLK_DEV_ZONED */ }; int sg_cnt; int metadata_sg_cnt; @@ -351,6 +364,7 @@ struct nvmet_req { }; extern struct workqueue_struct *buffered_io_wq; +extern struct workqueue_struct *zbd_wq; static inline void nvmet_set_result(struct nvmet_req *req, u32 result) { @@ -400,6 +414,7 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req); void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); +u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req); u16 nvmet_parse_admin_cmd(struct nvmet_req *req); u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); @@ -527,6 +542,14 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns); int nvmet_file_ns_revalidate(struct nvmet_ns *ns); void nvmet_ns_revalidate(struct nvmet_ns *ns); +u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts); + +bool nvmet_bdev_zns_enable(struct nvmet_ns *ns); +void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req); +void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req); +void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req); +void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req); +void nvmet_bdev_execute_zone_append(struct nvmet_req *req); static inline u32 nvmet_rw_data_len(struct nvmet_req *req) { @@ -622,4 +645,18 @@ static inline bool nvmet_use_inline_bvec(struct nvmet_req *req) req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC; } +static inline void nvmet_req_cns_error_complete(struct nvmet_req *req) +{ + pr_debug("unhandled identify cns %d on qid %d\n", + req->cmd->identify.cns, req->sq->qid); + req->error_loc = offsetof(struct nvme_identify, cns); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); +} + +static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio) +{ + if (bio != &req->b.inline_bio) + bio_put(bio); +} + #endif /* _NVMET_H */ diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 39b1473f7204..fced52de33ce 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -206,8 +206,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) for_each_sg(req->sg, sg, req->sg_cnt, i) { if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, sg->offset) < sg->length) { - if (bio != &req->p.inline_bio) - bio_put(bio); + nvmet_req_bio_put(req, bio); return -EINVAL; } } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 7d607f435e36..891174ccd44b 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1257,7 +1257,7 @@ out_err: static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) { - struct ib_qp_init_attr qp_attr; + struct ib_qp_init_attr qp_attr = { }; struct nvmet_rdma_device *ndev = queue->dev; int nr_cqe, ret, i, factor; @@ -1275,7 +1275,6 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) goto out; } - memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_context = queue; qp_attr.event_handler = nvmet_rdma_qp_event; qp_attr.send_cq = queue->cq; diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c new file mode 100644 index 000000000000..17f8b7a45f21 --- /dev/null +++ b/drivers/nvme/target/zns.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe ZNS-ZBD command implementation. + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/nvme.h> +#include <linux/blkdev.h> +#include "nvmet.h" + +/* + * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0 + * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k + * as page_shift value. When calculating the ZASL use shift by 12. + */ +#define NVMET_MPSMIN_SHIFT 12 + +static inline u8 nvmet_zasl(unsigned int zone_append_sects) +{ + /* + * Zone Append Size Limit (zasl) is expressed as a power of 2 value + * with the minimum memory page size (i.e. 12) as unit. + */ + return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9)); +} + +static int validate_conv_zones_cb(struct blk_zone *z, + unsigned int i, void *data) +{ + if (z->type == BLK_ZONE_TYPE_CONVENTIONAL) + return -EOPNOTSUPP; + return 0; +} + +bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) +{ + struct request_queue *q = ns->bdev->bd_disk->queue; + u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q)); + struct gendisk *bd_disk = ns->bdev->bd_disk; + int ret; + + if (ns->subsys->zasl) { + if (ns->subsys->zasl > zasl) + return false; + } + ns->subsys->zasl = zasl; + + /* + * Generic zoned block devices may have a smaller last zone which is + * not supported by ZNS. Exclude zoned drives that have such smaller + * last zone. + */ + if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) + return false; + /* + * ZNS does not define a conventional zone type. If the underlying + * device has a bitmap set indicating the existence of conventional + * zones, reject the device. Otherwise, use report zones to detect if + * the device has conventional zones. + */ + if (ns->bdev->bd_disk->queue->conv_zones_bitmap) + return false; + + ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk), + validate_conv_zones_cb, NULL); + if (ret < 0) + return false; + + ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); + + return true; +} + +void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req) +{ + u8 zasl = req->sq->ctrl->subsys->zasl; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvme_id_ctrl_zns *id; + u16 status; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) { + status = NVME_SC_INTERNAL; + goto out; + } + + if (ctrl->ops->get_mdts) + id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl); + else + id->zasl = zasl; + + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + + kfree(id); +out: + nvmet_req_complete(req, status); +} + +void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) +{ + struct nvme_id_ns_zns *id_zns; + u64 zsze; + u16 status; + + if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { + req->error_loc = offsetof(struct nvme_identify, nsid); + status = NVME_SC_INVALID_NS | NVME_SC_DNR; + goto out; + } + + id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL); + if (!id_zns) { + status = NVME_SC_INTERNAL; + goto out; + } + + status = nvmet_req_find_ns(req); + if (status) { + status = NVME_SC_INTERNAL; + goto done; + } + + if (!bdev_is_zoned(req->ns->bdev)) { + req->error_loc = offsetof(struct nvme_identify, nsid); + status = NVME_SC_INVALID_NS | NVME_SC_DNR; + goto done; + } + + nvmet_ns_revalidate(req->ns); + zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> + req->ns->blksize_shift; + id_zns->lbafe[0].zsze = cpu_to_le64(zsze); + id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev)); + id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev)); + +done: + status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); + kfree(id_zns); +out: + nvmet_req_complete(req, status); +} + +static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) +{ + sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); + u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; + + if (sect >= get_capacity(req->ns->bdev->bd_disk)) { + req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); + return NVME_SC_LBA_RANGE | NVME_SC_DNR; + } + + if (out_bufsize < sizeof(struct nvme_zone_report)) { + req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); + return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + } + + if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { + req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); + return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + } + + switch (req->cmd->zmr.pr) { + case 0: + case 1: + break; + default: + req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); + return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + } + + switch (req->cmd->zmr.zrasf) { + case NVME_ZRASF_ZONE_REPORT_ALL: + case NVME_ZRASF_ZONE_STATE_EMPTY: + case NVME_ZRASF_ZONE_STATE_IMP_OPEN: + case NVME_ZRASF_ZONE_STATE_EXP_OPEN: + case NVME_ZRASF_ZONE_STATE_CLOSED: + case NVME_ZRASF_ZONE_STATE_FULL: + case NVME_ZRASF_ZONE_STATE_READONLY: + case NVME_ZRASF_ZONE_STATE_OFFLINE: + break; + default: + req->error_loc = + offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); + return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + } + + return NVME_SC_SUCCESS; +} + +struct nvmet_report_zone_data { + struct nvmet_req *req; + u64 out_buf_offset; + u64 out_nr_zones; + u64 nr_zones; + u8 zrasf; +}; + +static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d) +{ + static const unsigned int nvme_zrasf_to_blk_zcond[] = { + [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY, + [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN, + [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN, + [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED, + [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY, + [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL, + [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE, + }; + struct nvmet_report_zone_data *rz = d; + + if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL && + z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf]) + return 0; + + if (rz->nr_zones < rz->out_nr_zones) { + struct nvme_zone_descriptor zdesc = { }; + u16 status; + + zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); + zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); + zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); + zdesc.za = z->reset ? 1 << 2 : 0; + zdesc.zs = z->cond << 4; + zdesc.zt = z->type; + + status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, + sizeof(zdesc)); + if (status) + return -EINVAL; + + rz->out_buf_offset += sizeof(zdesc); + } + + rz->nr_zones++; + + return 0; +} + +static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) +{ + unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); + + return blkdev_nr_zones(req->ns->bdev->bd_disk) - + (sect >> ilog2(bdev_zone_sectors(req->ns->bdev))); +} + +static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize) +{ + if (bufsize <= sizeof(struct nvme_zone_report)) + return 0; + + return (bufsize - sizeof(struct nvme_zone_report)) / + sizeof(struct nvme_zone_descriptor); +} + +static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); + sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); + unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req); + u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; + __le64 nr_zones; + u16 status; + int ret; + struct nvmet_report_zone_data rz_data = { + .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize), + /* leave the place for report zone header */ + .out_buf_offset = sizeof(struct nvme_zone_report), + .zrasf = req->cmd->zmr.zrasf, + .nr_zones = 0, + .req = req, + }; + + status = nvmet_bdev_validate_zone_mgmt_recv(req); + if (status) + goto out; + + if (!req_slba_nr_zones) { + status = NVME_SC_SUCCESS; + goto out; + } + + ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, + nvmet_bdev_report_zone_cb, &rz_data); + if (ret < 0) { + status = NVME_SC_INTERNAL; + goto out; + } + + /* + * When partial bit is set nr_zones must indicate the number of zone + * descriptors actually transferred. + */ + if (req->cmd->zmr.pr) + rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones); + + nr_zones = cpu_to_le64(rz_data.nr_zones); + status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); + +out: + nvmet_req_complete(req, status); +} + +void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) +{ + INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); + queue_work(zbd_wq, &req->z.zmgmt_work); +} + +static inline enum req_opf zsa_req_op(u8 zsa) +{ + switch (zsa) { + case NVME_ZONE_OPEN: + return REQ_OP_ZONE_OPEN; + case NVME_ZONE_CLOSE: + return REQ_OP_ZONE_CLOSE; + case NVME_ZONE_FINISH: + return REQ_OP_ZONE_FINISH; + case NVME_ZONE_RESET: + return REQ_OP_ZONE_RESET; + default: + return REQ_OP_LAST; + } +} + +static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) +{ + switch (ret) { + case 0: + return NVME_SC_SUCCESS; + case -EINVAL: + case -EIO: + return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + default: + return NVME_SC_INTERNAL; + } +} + +struct nvmet_zone_mgmt_send_all_data { + unsigned long *zbitmap; + struct nvmet_req *req; +}; + +static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d) +{ + struct nvmet_zone_mgmt_send_all_data *data = d; + + switch (zsa_req_op(data->req->cmd->zms.zsa)) { + case REQ_OP_ZONE_OPEN: + switch (z->cond) { + case BLK_ZONE_COND_CLOSED: + break; + default: + return 0; + } + break; + case REQ_OP_ZONE_CLOSE: + switch (z->cond) { + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + break; + default: + return 0; + } + break; + case REQ_OP_ZONE_FINISH: + switch (z->cond) { + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: + break; + default: + return 0; + } + break; + default: + return -EINVAL; + } + + set_bit(i, data->zbitmap); + + return 0; +} + +static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) +{ + struct block_device *bdev = req->ns->bdev; + unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk); + struct request_queue *q = bdev_get_queue(bdev); + struct bio *bio = NULL; + sector_t sector = 0; + int ret; + struct nvmet_zone_mgmt_send_all_data d = { + .req = req, + }; + + d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)), + GFP_NOIO, q->node); + if (!d.zbitmap) { + ret = -ENOMEM; + goto out; + } + + /* Scan and build bitmap of the eligible zones */ + ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d); + if (ret != nr_zones) { + if (ret > 0) + ret = -EIO; + goto out; + } else { + /* We scanned all the zones */ + ret = 0; + } + + while (sector < get_capacity(bdev->bd_disk)) { + if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) { + bio = blk_next_bio(bio, 0, GFP_KERNEL); + bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; + bio->bi_iter.bi_sector = sector; + bio_set_dev(bio, bdev); + /* This may take a while, so be nice to others */ + cond_resched(); + } + sector += blk_queue_zone_sectors(q); + } + + if (bio) { + ret = submit_bio_wait(bio); + bio_put(bio); + } + +out: + kfree(d.zbitmap); + + return blkdev_zone_mgmt_errno_to_nvme_status(ret); +} + +static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) +{ + int ret; + + switch (zsa_req_op(req->cmd->zms.zsa)) { + case REQ_OP_ZONE_RESET: + ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, + get_capacity(req->ns->bdev->bd_disk), + GFP_KERNEL); + if (ret < 0) + return blkdev_zone_mgmt_errno_to_nvme_status(ret); + break; + case REQ_OP_ZONE_OPEN: + case REQ_OP_ZONE_CLOSE: + case REQ_OP_ZONE_FINISH: + return nvmet_bdev_zone_mgmt_emulate_all(req); + default: + /* this is needed to quiet compiler warning */ + req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); + return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + } + + return NVME_SC_SUCCESS; +} + +static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); + sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); + enum req_opf op = zsa_req_op(req->cmd->zms.zsa); + struct block_device *bdev = req->ns->bdev; + sector_t zone_sectors = bdev_zone_sectors(bdev); + u16 status = NVME_SC_SUCCESS; + int ret; + + if (op == REQ_OP_LAST) { + req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); + status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + goto out; + } + + /* when select all bit is set slba field is ignored */ + if (req->cmd->zms.select_all) { + status = nvmet_bdev_execute_zmgmt_send_all(req); + goto out; + } + + if (sect >= get_capacity(bdev->bd_disk)) { + req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); + status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + goto out; + } + + if (sect & (zone_sectors - 1)) { + req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + goto out; + } + + ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL); + if (ret < 0) + status = blkdev_zone_mgmt_errno_to_nvme_status(ret); + +out: + nvmet_req_complete(req, status); +} + +void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req) +{ + INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); + queue_work(zbd_wq, &req->z.zmgmt_work); +} + +static void nvmet_bdev_zone_append_bio_done(struct bio *bio) +{ + struct nvmet_req *req = bio->bi_private; + + if (bio->bi_status == BLK_STS_OK) { + req->cqe->result.u64 = + nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); + } + + nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); + nvmet_req_bio_put(req, bio); +} + +void nvmet_bdev_execute_zone_append(struct nvmet_req *req) +{ + sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); + u16 status = NVME_SC_SUCCESS; + unsigned int total_len = 0; + struct scatterlist *sg; + struct bio *bio; + int sg_cnt; + + /* Request is completed on len mismatch in nvmet_check_transter_len() */ + if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) + return; + + if (!req->sg_cnt) { + nvmet_req_complete(req, 0); + return; + } + + if (sect >= get_capacity(req->ns->bdev->bd_disk)) { + req->error_loc = offsetof(struct nvme_rw_command, slba); + status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + goto out; + } + + if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { + req->error_loc = offsetof(struct nvme_rw_command, slba); + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + goto out; + } + + if (nvmet_use_inline_bvec(req)) { + bio = &req->z.inline_bio; + bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + } else { + bio = bio_alloc(GFP_KERNEL, req->sg_cnt); + } + + bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; + bio->bi_end_io = nvmet_bdev_zone_append_bio_done; + bio_set_dev(bio, req->ns->bdev); + bio->bi_iter.bi_sector = sect; + bio->bi_private = req; + if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) + bio->bi_opf |= REQ_FUA; + + for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { + struct page *p = sg_page(sg); + unsigned int l = sg->length; + unsigned int o = sg->offset; + unsigned int ret; + + ret = bio_add_zone_append_page(bio, p, l, o); + if (ret != sg->length) { + status = NVME_SC_INTERNAL; + goto out_put_bio; + } + total_len += sg->length; + } + + if (total_len != nvmet_rw_data_len(req)) { + status = NVME_SC_INTERNAL | NVME_SC_DNR; + goto out_put_bio; + } + + submit_bio(bio); + return; + +out_put_bio: + nvmet_req_bio_put(req, bio); +out: + nvmet_req_complete(req, status); +} + +u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->common.opcode) { + case nvme_cmd_zone_append: + req->execute = nvmet_bdev_execute_zone_append; + return 0; + case nvme_cmd_zone_mgmt_recv: + req->execute = nvmet_bdev_execute_zone_mgmt_recv; + return 0; + case nvme_cmd_zone_mgmt_send: + req->execute = nvmet_bdev_execute_zone_mgmt_send; + return 0; + default: + return nvmet_bdev_parse_io_cmd(req); + } +} diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig index 8adabde685f1..328da35da390 100644 --- a/fs/pstore/Kconfig +++ b/fs/pstore/Kconfig @@ -173,6 +173,7 @@ config PSTORE_BLK tristate "Log panic/oops to a block device" depends on PSTORE depends on BLOCK + depends on BROKEN select PSTORE_ZONE default n help diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 83726f607be1..c8ec7803b1b6 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1005,6 +1005,7 @@ int acpi_dev_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); +bool acpi_storage_d3(struct device *dev); #else static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } @@ -1012,6 +1013,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return 0; } +static inline bool acpi_storage_d3(struct device *dev) +{ + return false; +} #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) diff --git a/include/linux/bio.h b/include/linux/bio.h index d2b98efb5cc5..2203b686e1f0 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -818,4 +818,6 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) bio->bi_opf |= REQ_NOWAIT; } +struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); + #endif /* __LINUX_BIO_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index edcbd60b88b9..b7c4c4130b65 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -636,8 +636,8 @@ struct nvme_lba_range_type { __u8 type; __u8 attributes; __u8 rsvd2[14]; - __u64 slba; - __u64 nlb; + __le64 slba; + __le64 nlb; __u8 guid[16]; __u8 rsvd48[16]; }; @@ -944,6 +944,13 @@ struct nvme_zone_mgmt_recv_cmd { enum { NVME_ZRA_ZONE_REPORT = 0, NVME_ZRASF_ZONE_REPORT_ALL = 0, + NVME_ZRASF_ZONE_STATE_EMPTY = 0x01, + NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02, + NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03, + NVME_ZRASF_ZONE_STATE_CLOSED = 0x04, + NVME_ZRASF_ZONE_STATE_READONLY = 0x05, + NVME_ZRASF_ZONE_STATE_FULL = 0x06, + NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07, NVME_REPORT_ZONE_PARTIAL = 1, }; @@ -1504,6 +1511,7 @@ enum { NVME_SC_NS_WRITE_PROTECTED = 0x20, NVME_SC_CMD_INTERRUPTED = 0x21, NVME_SC_TRANSIENT_TR_ERR = 0x22, + NVME_SC_INVALID_IO_CMD_SET = 0x2C, NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, |