diff options
author | Linus Torvalds | 2021-09-09 11:48:27 -0700 |
---|---|---|
committer | Linus Torvalds | 2021-09-09 11:48:27 -0700 |
commit | 70868a180501d17fea58153c649d56bc18435315 (patch) | |
tree | 6615036e49c6d86375bcf84edcb436de47667412 /drivers | |
parent | 2e5fd489a4e5fcc97b035c03ace724c1d481a4c1 (diff) | |
parent | 2b922a9d064f8e86b53b04f5819917b7a04142ed (diff) |
Merge tag 'cxl-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
Pull CXL (Compute Express Link) updates from Dan Williams:
- Fix detection of CXL host bridges to filter out disabled ACPI0016
devices in the ACPI DSDT.
- Fix kernel lockdown integration to disable raw commands when raw PCI
access is disabled.
- Fix a broken debug message.
- Add support for "Get Partition Info". I.e. enumerate the split
between volatile and persistent capacity on bi-modal CXL memory
expanders.
- Re-factor the core by subject area. This is a work in progress.
- Prepare libnvdimm to understand CXL labels in addition to EFI labels.
This is a work in progress.
* tag 'cxl-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (25 commits)
cxl/registers: Fix Documentation warning
cxl/pmem: Fix Documentation warning
cxl/uapi: Fix defined but not used warnings
cxl/pci: Fix debug message in cxl_probe_regs()
cxl/pci: Fix lockdown level
cxl/acpi: Do not add DSDT disabled ACPI0016 host bridge ports
libnvdimm/labels: Add claim class helpers
libnvdimm/labels: Add type-guid helpers
libnvdimm/labels: Add blk special cases for nlabel and position helpers
libnvdimm/labels: Add blk isetcookie set / validation helpers
libnvdimm/labels: Add a checksum calculation helper
libnvdimm/labels: Introduce label setter helpers
libnvdimm/labels: Add isetcookie validation helper
libnvdimm/labels: Introduce getters for namespace label fields
cxl/mem: Adjust ram/pmem range to represent DPA ranges
cxl/mem: Account for partitionable space in ram/pmem ranges
cxl/pci: Store memory capacity values
cxl/pci: Simplify register setup
cxl/pci: Ignore unknown register block types
cxl/core: Move memdev management to core
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cxl/Makefile | 4 | ||||
-rw-r--r-- | drivers/cxl/acpi.c | 12 | ||||
-rw-r--r-- | drivers/cxl/core/Makefile | 8 | ||||
-rw-r--r-- | drivers/cxl/core/bus.c (renamed from drivers/cxl/core.c) | 464 | ||||
-rw-r--r-- | drivers/cxl/core/core.h | 20 | ||||
-rw-r--r-- | drivers/cxl/core/memdev.c | 246 | ||||
-rw-r--r-- | drivers/cxl/core/pmem.c | 230 | ||||
-rw-r--r-- | drivers/cxl/core/regs.c | 249 | ||||
-rw-r--r-- | drivers/cxl/cxl.h | 1 | ||||
-rw-r--r-- | drivers/cxl/cxlmem.h (renamed from drivers/cxl/mem.h) | 35 | ||||
-rw-r--r-- | drivers/cxl/pci.c | 439 | ||||
-rw-r--r-- | drivers/cxl/pci.h | 1 | ||||
-rw-r--r-- | drivers/cxl/pmem.c | 2 | ||||
-rw-r--r-- | drivers/nvdimm/label.c | 256 | ||||
-rw-r--r-- | drivers/nvdimm/label.h | 1 | ||||
-rw-r--r-- | drivers/nvdimm/namespace_devs.c | 113 | ||||
-rw-r--r-- | drivers/nvdimm/nd.h | 150 |
17 files changed, 1344 insertions, 887 deletions
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile index 32954059b37b..d1aaabc940f3 100644 --- a/drivers/cxl/Makefile +++ b/drivers/cxl/Makefile @@ -1,11 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_CXL_BUS) += cxl_core.o +obj-$(CONFIG_CXL_BUS) += core/ obj-$(CONFIG_CXL_MEM) += cxl_pci.o obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o -ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -cxl_core-y := core.o cxl_pci-y := pci.o cxl_acpi-y := acpi.o cxl_pmem-y := pmem.o diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 8ae89273f58e..54e9d4d2cf5f 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev) { struct acpi_device *adev = to_acpi_device(dev); + if (!acpi_pci_find_root(adev->handle)) + return NULL; + if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) return adev; return NULL; @@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg) if (!bridge) return 0; - pci_root = acpi_pci_find_root(bridge->handle); - if (!pci_root) - return -ENXIO; - dport = find_dport_by_dev(root_port, match); if (!dport) { dev_dbg(host, "host bridge expected and not found\n"); @@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg) return PTR_ERR(port); dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev)); + /* + * Note that this lookup already succeeded in + * to_cxl_host_bridge(), so no need to check for failure here + */ + pci_root = acpi_pci_find_root(bridge->handle); ctx = (struct cxl_walk_context){ .dev = host, .root = pci_root->bus, diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile new file mode 100644 index 000000000000..0fdbf3c6ac1a --- /dev/null +++ b/drivers/cxl/core/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CXL_BUS) += cxl_core.o + +ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl +cxl_core-y := bus.o +cxl_core-y += pmem.o +cxl_core-y += regs.o +cxl_core-y += memdev.o diff --git a/drivers/cxl/core.c b/drivers/cxl/core/bus.c index 2b90b7c3b9d7..267d8042bec2 100644 --- a/drivers/cxl/core.c +++ b/drivers/cxl/core/bus.c @@ -6,14 +6,22 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/idr.h> -#include "cxl.h" -#include "mem.h" +#include <cxlmem.h> +#include <cxl.h> +#include "core.h" /** * DOC: cxl core * - * The CXL core provides a sysfs hierarchy for control devices and a rendezvous - * point for cross-device interleave coordination through cxl ports. + * The CXL core provides a set of interfaces that can be consumed by CXL aware + * drivers. The interfaces allow for creation, modification, and destruction of + * regions, memory devices, ports, and decoders. CXL aware drivers must register + * with the CXL core via these interfaces in order to be able to participate in + * cross-device interleave coordination. The CXL core also establishes and + * maintains the bridge to the nvdimm subsystem. + * + * CXL core introduces sysfs hierarchy to control the devices that are + * instantiated by the core. */ static DEFINE_IDA(cxl_port_ida); @@ -30,7 +38,7 @@ static struct attribute *cxl_base_attributes[] = { NULL, }; -static struct attribute_group cxl_base_attribute_group = { +struct attribute_group cxl_base_attribute_group = { .attrs = cxl_base_attributes, }; @@ -507,11 +515,6 @@ err: return ERR_PTR(rc); } -static void unregister_dev(void *dev) -{ - device_unregister(dev); -} - struct cxl_decoder * devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, resource_size_t base, resource_size_t len, @@ -536,7 +539,7 @@ devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, if (rc) goto err; - rc = devm_add_action_or_reset(host, unregister_dev, dev); + rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev); if (rc) return ERR_PTR(rc); return cxld; @@ -548,429 +551,6 @@ err: EXPORT_SYMBOL_GPL(devm_cxl_add_decoder); /** - * cxl_probe_component_regs() - Detect CXL Component register blocks - * @dev: Host device of the @base mapping - * @base: Mapping containing the HDM Decoder Capability Header - * @map: Map object describing the register block information found - * - * See CXL 2.0 8.2.4 Component Register Layout and Definition - * See CXL 2.0 8.2.5.5 CXL Device Register Interface - * - * Probe for component register information and return it in map object. - */ -void cxl_probe_component_regs(struct device *dev, void __iomem *base, - struct cxl_component_reg_map *map) -{ - int cap, cap_count; - u64 cap_array; - - *map = (struct cxl_component_reg_map) { 0 }; - - /* - * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in - * CXL 2.0 8.2.4 Table 141. - */ - base += CXL_CM_OFFSET; - - cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET); - - if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { - dev_err(dev, - "Couldn't locate the CXL.cache and CXL.mem capability array header./n"); - return; - } - - /* It's assumed that future versions will be backward compatible */ - cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array); - - for (cap = 1; cap <= cap_count; cap++) { - void __iomem *register_block; - u32 hdr; - int decoder_cnt; - u16 cap_id, offset; - u32 length; - - hdr = readl(base + cap * 0x4); - - cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr); - offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr); - register_block = base + offset; - - switch (cap_id) { - case CXL_CM_CAP_CAP_ID_HDM: - dev_dbg(dev, "found HDM decoder capability (0x%x)\n", - offset); - - hdr = readl(register_block); - - decoder_cnt = cxl_hdm_decoder_count(hdr); - length = 0x20 * decoder_cnt + 0x10; - - map->hdm_decoder.valid = true; - map->hdm_decoder.offset = CXL_CM_OFFSET + offset; - map->hdm_decoder.size = length; - break; - default: - dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id, - offset); - break; - } - } -} -EXPORT_SYMBOL_GPL(cxl_probe_component_regs); - -static void cxl_nvdimm_bridge_release(struct device *dev) -{ - struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); - - kfree(cxl_nvb); -} - -static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = { - &cxl_base_attribute_group, - NULL, -}; - -static const struct device_type cxl_nvdimm_bridge_type = { - .name = "cxl_nvdimm_bridge", - .release = cxl_nvdimm_bridge_release, - .groups = cxl_nvdimm_bridge_attribute_groups, -}; - -struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) -{ - if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type, - "not a cxl_nvdimm_bridge device\n")) - return NULL; - return container_of(dev, struct cxl_nvdimm_bridge, dev); -} -EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge); - -static struct cxl_nvdimm_bridge * -cxl_nvdimm_bridge_alloc(struct cxl_port *port) -{ - struct cxl_nvdimm_bridge *cxl_nvb; - struct device *dev; - - cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL); - if (!cxl_nvb) - return ERR_PTR(-ENOMEM); - - dev = &cxl_nvb->dev; - cxl_nvb->port = port; - cxl_nvb->state = CXL_NVB_NEW; - device_initialize(dev); - device_set_pm_not_required(dev); - dev->parent = &port->dev; - dev->bus = &cxl_bus_type; - dev->type = &cxl_nvdimm_bridge_type; - - return cxl_nvb; -} - -static void unregister_nvb(void *_cxl_nvb) -{ - struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; - bool flush; - - /* - * If the bridge was ever activated then there might be in-flight state - * work to flush. Once the state has been changed to 'dead' then no new - * work can be queued by user-triggered bind. - */ - device_lock(&cxl_nvb->dev); - flush = cxl_nvb->state != CXL_NVB_NEW; - cxl_nvb->state = CXL_NVB_DEAD; - device_unlock(&cxl_nvb->dev); - - /* - * Even though the device core will trigger device_release_driver() - * before the unregister, it does not know about the fact that - * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver - * release not and flush it before tearing down the nvdimm device - * hierarchy. - */ - device_release_driver(&cxl_nvb->dev); - if (flush) - flush_work(&cxl_nvb->state_work); - device_unregister(&cxl_nvb->dev); -} - -struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, - struct cxl_port *port) -{ - struct cxl_nvdimm_bridge *cxl_nvb; - struct device *dev; - int rc; - - if (!IS_ENABLED(CONFIG_CXL_PMEM)) - return ERR_PTR(-ENXIO); - - cxl_nvb = cxl_nvdimm_bridge_alloc(port); - if (IS_ERR(cxl_nvb)) - return cxl_nvb; - - dev = &cxl_nvb->dev; - rc = dev_set_name(dev, "nvdimm-bridge"); - if (rc) - goto err; - - rc = device_add(dev); - if (rc) - goto err; - - rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); - if (rc) - return ERR_PTR(rc); - - return cxl_nvb; - -err: - put_device(dev); - return ERR_PTR(rc); -} -EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge); - -static void cxl_nvdimm_release(struct device *dev) -{ - struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); - - kfree(cxl_nvd); -} - -static const struct attribute_group *cxl_nvdimm_attribute_groups[] = { - &cxl_base_attribute_group, - NULL, -}; - -static const struct device_type cxl_nvdimm_type = { - .name = "cxl_nvdimm", - .release = cxl_nvdimm_release, - .groups = cxl_nvdimm_attribute_groups, -}; - -bool is_cxl_nvdimm(struct device *dev) -{ - return dev->type == &cxl_nvdimm_type; -} -EXPORT_SYMBOL_GPL(is_cxl_nvdimm); - -struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) -{ - if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev), - "not a cxl_nvdimm device\n")) - return NULL; - return container_of(dev, struct cxl_nvdimm, dev); -} -EXPORT_SYMBOL_GPL(to_cxl_nvdimm); - -static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) -{ - struct cxl_nvdimm *cxl_nvd; - struct device *dev; - - cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL); - if (!cxl_nvd) - return ERR_PTR(-ENOMEM); - - dev = &cxl_nvd->dev; - cxl_nvd->cxlmd = cxlmd; - device_initialize(dev); - device_set_pm_not_required(dev); - dev->parent = &cxlmd->dev; - dev->bus = &cxl_bus_type; - dev->type = &cxl_nvdimm_type; - - return cxl_nvd; -} - -int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) -{ - struct cxl_nvdimm *cxl_nvd; - struct device *dev; - int rc; - - cxl_nvd = cxl_nvdimm_alloc(cxlmd); - if (IS_ERR(cxl_nvd)) - return PTR_ERR(cxl_nvd); - - dev = &cxl_nvd->dev; - rc = dev_set_name(dev, "pmem%d", cxlmd->id); - if (rc) - goto err; - - rc = device_add(dev); - if (rc) - goto err; - - dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), - dev_name(dev)); - - return devm_add_action_or_reset(host, unregister_dev, dev); - -err: - put_device(dev); - return rc; -} -EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm); - -/** - * cxl_probe_device_regs() - Detect CXL Device register blocks - * @dev: Host device of the @base mapping - * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface - * @map: Map object describing the register block information found - * - * Probe for device register information and return it in map object. - */ -void cxl_probe_device_regs(struct device *dev, void __iomem *base, - struct cxl_device_reg_map *map) -{ - int cap, cap_count; - u64 cap_array; - - *map = (struct cxl_device_reg_map){ 0 }; - - cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET); - if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) != - CXLDEV_CAP_ARRAY_CAP_ID) - return; - - cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array); - - for (cap = 1; cap <= cap_count; cap++) { - u32 offset, length; - u16 cap_id; - - cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK, - readl(base + cap * 0x10)); - offset = readl(base + cap * 0x10 + 0x4); - length = readl(base + cap * 0x10 + 0x8); - - switch (cap_id) { - case CXLDEV_CAP_CAP_ID_DEVICE_STATUS: - dev_dbg(dev, "found Status capability (0x%x)\n", offset); - - map->status.valid = true; - map->status.offset = offset; - map->status.size = length; - break; - case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX: - dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset); - map->mbox.valid = true; - map->mbox.offset = offset; - map->mbox.size = length; - break; - case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX: - dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset); - break; - case CXLDEV_CAP_CAP_ID_MEMDEV: - dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset); - map->memdev.valid = true; - map->memdev.offset = offset; - map->memdev.size = length; - break; - default: - if (cap_id >= 0x8000) - dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset); - else - dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset); - break; - } - } -} -EXPORT_SYMBOL_GPL(cxl_probe_device_regs); - -static void __iomem *devm_cxl_iomap_block(struct device *dev, - resource_size_t addr, - resource_size_t length) -{ - void __iomem *ret_val; - struct resource *res; - - res = devm_request_mem_region(dev, addr, length, dev_name(dev)); - if (!res) { - resource_size_t end = addr + length - 1; - - dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end); - return NULL; - } - - ret_val = devm_ioremap(dev, addr, length); - if (!ret_val) - dev_err(dev, "Failed to map region %pr\n", res); - - return ret_val; -} - -int cxl_map_component_regs(struct pci_dev *pdev, - struct cxl_component_regs *regs, - struct cxl_register_map *map) -{ - struct device *dev = &pdev->dev; - resource_size_t phys_addr; - resource_size_t length; - - phys_addr = pci_resource_start(pdev, map->barno); - phys_addr += map->block_offset; - - phys_addr += map->component_map.hdm_decoder.offset; - length = map->component_map.hdm_decoder.size; - regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length); - if (!regs->hdm_decoder) - return -ENOMEM; - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_map_component_regs); - -int cxl_map_device_regs(struct pci_dev *pdev, - struct cxl_device_regs *regs, - struct cxl_register_map *map) -{ - struct device *dev = &pdev->dev; - resource_size_t phys_addr; - - phys_addr = pci_resource_start(pdev, map->barno); - phys_addr += map->block_offset; - - if (map->device_map.status.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.status.offset; - length = map->device_map.status.size; - regs->status = devm_cxl_iomap_block(dev, addr, length); - if (!regs->status) - return -ENOMEM; - } - - if (map->device_map.mbox.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.mbox.offset; - length = map->device_map.mbox.size; - regs->mbox = devm_cxl_iomap_block(dev, addr, length); - if (!regs->mbox) - return -ENOMEM; - } - - if (map->device_map.memdev.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.memdev.offset; - length = map->device_map.memdev.size; - regs->memdev = devm_cxl_iomap_block(dev, addr, length); - if (!regs->memdev) - return -ENOMEM; - } - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_map_device_regs); - -/** * __cxl_driver_register - register a driver for the cxl bus * @cxl_drv: cxl driver structure to attach * @owner: owning module/driver @@ -1053,12 +633,26 @@ EXPORT_SYMBOL_GPL(cxl_bus_type); static __init int cxl_core_init(void) { - return bus_register(&cxl_bus_type); + int rc; + + rc = cxl_memdev_init(); + if (rc) + return rc; + + rc = bus_register(&cxl_bus_type); + if (rc) + goto err; + return 0; + +err: + cxl_memdev_exit(); + return rc; } static void cxl_core_exit(void) { bus_unregister(&cxl_bus_type); + cxl_memdev_exit(); } module_init(cxl_core_init); diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h new file mode 100644 index 000000000000..036a3c8106b4 --- /dev/null +++ b/drivers/cxl/core/core.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2020 Intel Corporation. */ + +#ifndef __CXL_CORE_H__ +#define __CXL_CORE_H__ + +extern const struct device_type cxl_nvdimm_bridge_type; +extern const struct device_type cxl_nvdimm_type; + +extern struct attribute_group cxl_base_attribute_group; + +static inline void unregister_cxl_dev(void *dev) +{ + device_unregister(dev); +} + +int cxl_memdev_init(void); +void cxl_memdev_exit(void); + +#endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c new file mode 100644 index 000000000000..a9c317e32010 --- /dev/null +++ b/drivers/cxl/core/memdev.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2020 Intel Corporation. */ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/pci.h> +#include <cxlmem.h> +#include "core.h" + +/* + * An entire PCI topology full of devices should be enough for any + * config + */ +#define CXL_MEM_MAX_DEVS 65536 + +static int cxl_mem_major; +static DEFINE_IDA(cxl_memdev_ida); + +static void cxl_memdev_release(struct device *dev) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + + ida_free(&cxl_memdev_ida, cxlmd->id); + kfree(cxlmd); +} + +static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid, + kgid_t *gid) +{ + return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); +} + +static ssize_t firmware_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + + return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version); +} +static DEVICE_ATTR_RO(firmware_version); + +static ssize_t payload_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + + return sysfs_emit(buf, "%zu\n", cxlm->payload_size); +} +static DEVICE_ATTR_RO(payload_max); + +static ssize_t label_storage_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + + return sysfs_emit(buf, "%zu\n", cxlm->lsa_size); +} +static DEVICE_ATTR_RO(label_storage_size); + +static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + unsigned long long len = range_len(&cxlm->ram_range); + + return sysfs_emit(buf, "%#llx\n", len); +} + +static struct device_attribute dev_attr_ram_size = + __ATTR(size, 0444, ram_size_show, NULL); + +static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + unsigned long long len = range_len(&cxlm->pmem_range); + + return sysfs_emit(buf, "%#llx\n", len); +} + +static struct device_attribute dev_attr_pmem_size = + __ATTR(size, 0444, pmem_size_show, NULL); + +static struct attribute *cxl_memdev_attributes[] = { + &dev_attr_firmware_version.attr, + &dev_attr_payload_max.attr, + &dev_attr_label_storage_size.attr, + NULL, +}; + +static struct attribute *cxl_memdev_pmem_attributes[] = { + &dev_attr_pmem_size.attr, + NULL, +}; + +static struct attribute *cxl_memdev_ram_attributes[] = { + &dev_attr_ram_size.attr, + NULL, +}; + +static struct attribute_group cxl_memdev_attribute_group = { + .attrs = cxl_memdev_attributes, +}; + +static struct attribute_group cxl_memdev_ram_attribute_group = { + .name = "ram", + .attrs = cxl_memdev_ram_attributes, +}; + +static struct attribute_group cxl_memdev_pmem_attribute_group = { + .name = "pmem", + .attrs = cxl_memdev_pmem_attributes, +}; + +static const struct attribute_group *cxl_memdev_attribute_groups[] = { + &cxl_memdev_attribute_group, + &cxl_memdev_ram_attribute_group, + &cxl_memdev_pmem_attribute_group, + NULL, +}; + +static const struct device_type cxl_memdev_type = { + .name = "cxl_memdev", + .release = cxl_memdev_release, + .devnode = cxl_memdev_devnode, + .groups = cxl_memdev_attribute_groups, +}; + +static void cxl_memdev_unregister(void *_cxlmd) +{ + struct cxl_memdev *cxlmd = _cxlmd; + struct device *dev = &cxlmd->dev; + struct cdev *cdev = &cxlmd->cdev; + const struct cdevm_file_operations *cdevm_fops; + + cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops); + cdevm_fops->shutdown(dev); + + cdev_device_del(&cxlmd->cdev, dev); + put_device(dev); +} + +static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm, + const struct file_operations *fops) +{ + struct pci_dev *pdev = cxlm->pdev; + struct cxl_memdev *cxlmd; + struct device *dev; + struct cdev *cdev; + int rc; + + cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL); + if (!cxlmd) + return ERR_PTR(-ENOMEM); + + rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL); + if (rc < 0) + goto err; + cxlmd->id = rc; + + dev = &cxlmd->dev; + device_initialize(dev); + dev->parent = &pdev->dev; + dev->bus = &cxl_bus_type; + dev->devt = MKDEV(cxl_mem_major, cxlmd->id); + dev->type = &cxl_memdev_type; + device_set_pm_not_required(dev); + + cdev = &cxlmd->cdev; + cdev_init(cdev, fops); + return cxlmd; + +err: + kfree(cxlmd); + return ERR_PTR(rc); +} + +struct cxl_memdev * +devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, + const struct cdevm_file_operations *cdevm_fops) +{ + struct cxl_memdev *cxlmd; + struct device *dev; + struct cdev *cdev; + int rc; + + cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops); + if (IS_ERR(cxlmd)) + return cxlmd; + + dev = &cxlmd->dev; + rc = dev_set_name(dev, "mem%d", cxlmd->id); + if (rc) + goto err; + + /* + * Activate ioctl operations, no cxl_memdev_rwsem manipulation + * needed as this is ordered with cdev_add() publishing the device. + */ + cxlmd->cxlm = cxlm; + + cdev = &cxlmd->cdev; + rc = cdev_device_add(cdev, dev); + if (rc) + goto err; + + rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd); + if (rc) + return ERR_PTR(rc); + return cxlmd; + +err: + /* + * The cdev was briefly live, shutdown any ioctl operations that + * saw that state. + */ + cdevm_fops->shutdown(dev); + put_device(dev); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(devm_cxl_add_memdev); + +__init int cxl_memdev_init(void) +{ + dev_t devt; + int rc; + + rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl"); + if (rc) + return rc; + + cxl_mem_major = MAJOR(devt); + + return 0; +} + +void cxl_memdev_exit(void) +{ + unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS); +} diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c new file mode 100644 index 000000000000..d24570f5b8ba --- /dev/null +++ b/drivers/cxl/core/pmem.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2020 Intel Corporation. */ +#include <linux/device.h> +#include <linux/slab.h> +#include <cxlmem.h> +#include <cxl.h> +#include "core.h" + +/** + * DOC: cxl pmem + * + * The core CXL PMEM infrastructure supports persistent memory + * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL + * 'bridge' device is added at the root of a CXL device topology if + * platform firmware advertises at least one persistent memory capable + * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus' + * device. Then for each cxl_memdev in the CXL device topology a bridge + * device is added to host a LIBNVDIMM dimm object. When these bridges + * are registered native LIBNVDIMM uapis are translated to CXL + * operations, for example, namespace label access commands. + */ + +static void cxl_nvdimm_bridge_release(struct device *dev) +{ + struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); + + kfree(cxl_nvb); +} + +static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = { + &cxl_base_attribute_group, + NULL, +}; + +const struct device_type cxl_nvdimm_bridge_type = { + .name = "cxl_nvdimm_bridge", + .release = cxl_nvdimm_bridge_release, + .groups = cxl_nvdimm_bridge_attribute_groups, +}; + +struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) +{ + if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type, + "not a cxl_nvdimm_bridge device\n")) + return NULL; + return container_of(dev, struct cxl_nvdimm_bridge, dev); +} +EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge); + +static struct cxl_nvdimm_bridge * +cxl_nvdimm_bridge_alloc(struct cxl_port *port) +{ + struct cxl_nvdimm_bridge *cxl_nvb; + struct device *dev; + + cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL); + if (!cxl_nvb) + return ERR_PTR(-ENOMEM); + + dev = &cxl_nvb->dev; + cxl_nvb->port = port; + cxl_nvb->state = CXL_NVB_NEW; + device_initialize(dev); + device_set_pm_not_required(dev); + dev->parent = &port->dev; + dev->bus = &cxl_bus_type; + dev->type = &cxl_nvdimm_bridge_type; + + return cxl_nvb; +} + +static void unregister_nvb(void *_cxl_nvb) +{ + struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; + bool flush; + + /* + * If the bridge was ever activated then there might be in-flight state + * work to flush. Once the state has been changed to 'dead' then no new + * work can be queued by user-triggered bind. + */ + device_lock(&cxl_nvb->dev); + flush = cxl_nvb->state != CXL_NVB_NEW; + cxl_nvb->state = CXL_NVB_DEAD; + device_unlock(&cxl_nvb->dev); + + /* + * Even though the device core will trigger device_release_driver() + * before the unregister, it does not know about the fact that + * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver + * release not and flush it before tearing down the nvdimm device + * hierarchy. + */ + device_release_driver(&cxl_nvb->dev); + if (flush) + flush_work(&cxl_nvb->state_work); + device_unregister(&cxl_nvb->dev); +} + +/** + * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology + * @host: platform firmware root device + * @port: CXL port at the root of a CXL topology + * + * Return: bridge device that can host cxl_nvdimm objects + */ +struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, + struct cxl_port *port) +{ + struct cxl_nvdimm_bridge *cxl_nvb; + struct device *dev; + int rc; + + if (!IS_ENABLED(CONFIG_CXL_PMEM)) + return ERR_PTR(-ENXIO); + + cxl_nvb = cxl_nvdimm_bridge_alloc(port); + if (IS_ERR(cxl_nvb)) + return cxl_nvb; + + dev = &cxl_nvb->dev; + rc = dev_set_name(dev, "nvdimm-bridge"); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); + if (rc) + return ERR_PTR(rc); + + return cxl_nvb; + +err: + put_device(dev); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge); + +static void cxl_nvdimm_release(struct device *dev) +{ + struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); + + kfree(cxl_nvd); +} + +static const struct attribute_group *cxl_nvdimm_attribute_groups[] = { + &cxl_base_attribute_group, + NULL, +}; + +const struct device_type cxl_nvdimm_type = { + .name = "cxl_nvdimm", + .release = cxl_nvdimm_release, + .groups = cxl_nvdimm_attribute_groups, +}; + +bool is_cxl_nvdimm(struct device *dev) +{ + return dev->type == &cxl_nvdimm_type; +} +EXPORT_SYMBOL_GPL(is_cxl_nvdimm); + +struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) +{ + if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev), + "not a cxl_nvdimm device\n")) + return NULL; + return container_of(dev, struct cxl_nvdimm, dev); +} +EXPORT_SYMBOL_GPL(to_cxl_nvdimm); + +static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) +{ + struct cxl_nvdimm *cxl_nvd; + struct device *dev; + + cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL); + if (!cxl_nvd) + return ERR_PTR(-ENOMEM); + + dev = &cxl_nvd->dev; + cxl_nvd->cxlmd = cxlmd; + device_initialize(dev); + device_set_pm_not_required(dev); + dev->parent = &cxlmd->dev; + dev->bus = &cxl_bus_type; + dev->type = &cxl_nvdimm_type; + + return cxl_nvd; +} + +/** + * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm + * @host: same host as @cxlmd + * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations + * + * Return: 0 on success negative error code on failure. + */ +int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) +{ + struct cxl_nvdimm *cxl_nvd; + struct device *dev; + int rc; + + cxl_nvd = cxl_nvdimm_alloc(cxlmd); + if (IS_ERR(cxl_nvd)) + return PTR_ERR(cxl_nvd); + + dev = &cxl_nvd->dev; + rc = dev_set_name(dev, "pmem%d", cxlmd->id); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), + dev_name(dev)); + + return devm_add_action_or_reset(host, unregister_cxl_dev, dev); + +err: + put_device(dev); + return rc; +} +EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm); diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c new file mode 100644 index 000000000000..41de4a136ecd --- /dev/null +++ b/drivers/cxl/core/regs.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2020 Intel Corporation. */ +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <cxlmem.h> + +/** + * DOC: cxl registers + * + * CXL device capabilities are enumerated by PCI DVSEC (Designated + * Vendor-specific) and / or descriptors provided by platform firmware. + * They can be defined as a set like the device and component registers + * mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and + * Extended Capabilities, or they can be individual capabilities + * appended to bridged and endpoint devices. + * + * Provide common infrastructure for enumerating and mapping these + * discrete capabilities. + */ + +/** + * cxl_probe_component_regs() - Detect CXL Component register blocks + * @dev: Host device of the @base mapping + * @base: Mapping containing the HDM Decoder Capability Header + * @map: Map object describing the register block information found + * + * See CXL 2.0 8.2.4 Component Register Layout and Definition + * See CXL 2.0 8.2.5.5 CXL Device Register Interface + * + * Probe for component register information and return it in map object. + */ +void cxl_probe_component_regs(struct device *dev, void __iomem *base, + struct cxl_component_reg_map *map) +{ + int cap, cap_count; + u64 cap_array; + + *map = (struct cxl_component_reg_map) { 0 }; + + /* + * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in + * CXL 2.0 8.2.4 Table 141. + */ + base += CXL_CM_OFFSET; + + cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET); + + if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { + dev_err(dev, + "Couldn't locate the CXL.cache and CXL.mem capability array header./n"); + return; + } + + /* It's assumed that future versions will be backward compatible */ + cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array); + + for (cap = 1; cap <= cap_count; cap++) { + void __iomem *register_block; + u32 hdr; + int decoder_cnt; + u16 cap_id, offset; + u32 length; + + hdr = readl(base + cap * 0x4); + + cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr); + offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr); + register_block = base + offset; + + switch (cap_id) { + case CXL_CM_CAP_CAP_ID_HDM: + dev_dbg(dev, "found HDM decoder capability (0x%x)\n", + offset); + + hdr = readl(register_block); + + decoder_cnt = cxl_hdm_decoder_count(hdr); + length = 0x20 * decoder_cnt + 0x10; + + map->hdm_decoder.valid = true; + map->hdm_decoder.offset = CXL_CM_OFFSET + offset; + map->hdm_decoder.size = length; + break; + default: + dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id, + offset); + break; + } + } +} +EXPORT_SYMBOL_GPL(cxl_probe_component_regs); + +/** + * cxl_probe_device_regs() - Detect CXL Device register blocks + * @dev: Host device of the @base mapping + * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface + * @map: Map object describing the register block information found + * + * Probe for device register information and return it in map object. + */ +void cxl_probe_device_regs(struct device *dev, void __iomem *base, + struct cxl_device_reg_map *map) +{ + int cap, cap_count; + u64 cap_array; + + *map = (struct cxl_device_reg_map){ 0 }; + + cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET); + if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) != + CXLDEV_CAP_ARRAY_CAP_ID) + return; + + cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array); + + for (cap = 1; cap <= cap_count; cap++) { + u32 offset, length; + u16 cap_id; + + cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK, + readl(base + cap * 0x10)); + offset = readl(base + cap * 0x10 + 0x4); + length = readl(base + cap * 0x10 + 0x8); + + switch (cap_id) { + case CXLDEV_CAP_CAP_ID_DEVICE_STATUS: + dev_dbg(dev, "found Status capability (0x%x)\n", offset); + + map->status.valid = true; + map->status.offset = offset; + map->status.size = length; + break; + case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX: + dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset); + map->mbox.valid = true; + map->mbox.offset = offset; + map->mbox.size = length; + break; + case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX: + dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset); + break; + case CXLDEV_CAP_CAP_ID_MEMDEV: + dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset); + map->memdev.valid = true; + map->memdev.offset = offset; + map->memdev.size = length; + break; + default: + if (cap_id >= 0x8000) + dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset); + else + dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset); + break; + } + } +} +EXPORT_SYMBOL_GPL(cxl_probe_device_regs); + +static void __iomem *devm_cxl_iomap_block(struct device *dev, + resource_size_t addr, + resource_size_t length) +{ + void __iomem *ret_val; + struct resource *res; + + res = devm_request_mem_region(dev, addr, length, dev_name(dev)); + if (!res) { + resource_size_t end = addr + length - 1; + + dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end); + return NULL; + } + + ret_val = devm_ioremap(dev, addr, length); + if (!ret_val) + dev_err(dev, "Failed to map region %pr\n", res); + + return ret_val; +} + +int cxl_map_component_regs(struct pci_dev *pdev, + struct cxl_component_regs *regs, + struct cxl_register_map *map) +{ + struct device *dev = &pdev->dev; + resource_size_t phys_addr; + resource_size_t length; + + phys_addr = pci_resource_start(pdev, map->barno); + phys_addr += map->block_offset; + + phys_addr += map->component_map.hdm_decoder.offset; + length = map->component_map.hdm_decoder.size; + regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length); + if (!regs->hdm_decoder) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_map_component_regs); + +int cxl_map_device_regs(struct pci_dev *pdev, + struct cxl_device_regs *regs, + struct cxl_register_map *map) +{ + struct device *dev = &pdev->dev; + resource_size_t phys_addr; + + phys_addr = pci_resource_start(pdev, map->barno); + phys_addr += map->block_offset; + + if (map->device_map.status.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.status.offset; + length = map->device_map.status.size; + regs->status = devm_cxl_iomap_block(dev, addr, length); + if (!regs->status) + return -ENOMEM; + } + + if (map->device_map.mbox.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.mbox.offset; + length = map->device_map.mbox.size; + regs->mbox = devm_cxl_iomap_block(dev, addr, length); + if (!regs->mbox) + return -ENOMEM; + } + + if (map->device_map.memdev.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.memdev.offset; + length = map->device_map.memdev.size; + regs->memdev = devm_cxl_iomap_block(dev, addr, length); + if (!regs->memdev) + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_map_device_regs); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index b6bda39a59e3..53927f9fa77e 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -140,7 +140,6 @@ struct cxl_device_reg_map { }; struct cxl_register_map { - struct list_head list; u64 block_offset; u8 reg_type; u8 barno; diff --git a/drivers/cxl/mem.h b/drivers/cxl/cxlmem.h index 8f02d02b26b4..6c0b1e2ea97c 100644 --- a/drivers/cxl/mem.h +++ b/drivers/cxl/cxlmem.h @@ -28,11 +28,20 @@ (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \ CXLMDEV_RESET_NEEDED_NOT) -/* - * An entire PCI topology full of devices should be enough for any - * config +/** + * struct cdevm_file_operations - devm coordinated cdev file operations + * @fops: file operations that are synchronized against @shutdown + * @shutdown: disconnect driver data + * + * @shutdown is invoked in the devres release path to disconnect any + * driver instance data from @dev. It assumes synchronization with any + * fops operation that requires driver data. After @shutdown an + * operation may only reference @device data. */ -#define CXL_MEM_MAX_DEVS 65536 +struct cdevm_file_operations { + struct file_operations fops; + void (*shutdown)(struct device *dev); +}; /** * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device @@ -48,6 +57,15 @@ struct cxl_memdev { int id; }; +static inline struct cxl_memdev *to_cxl_memdev(struct device *dev) +{ + return container_of(dev, struct cxl_memdev, dev); +} + +struct cxl_memdev * +devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, + const struct cdevm_file_operations *cdevm_fops); + /** * struct cxl_mem - A CXL memory device * @pdev: The PCI device associated with this CXL device. @@ -77,5 +95,14 @@ struct cxl_mem { struct range pmem_range; struct range ram_range; + u64 total_bytes; + u64 volatile_only_bytes; + u64 persistent_only_bytes; + u64 partition_align_bytes; + + u64 active_volatile_bytes; + u64 active_persistent_bytes; + u64 next_volatile_bytes; + u64 next_persistent_bytes; }; #endif /* __CXL_MEM_H__ */ diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 4cf351a3cf99..8e45aa07d662 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -12,9 +12,9 @@ #include <linux/pci.h> #include <linux/io.h> #include <linux/io-64-nonatomic-lo-hi.h> +#include "cxlmem.h" #include "pci.h" #include "cxl.h" -#include "mem.h" /** * DOC: cxl pci @@ -64,6 +64,15 @@ enum opcode { CXL_MBOX_OP_MAX = 0x10000 }; +/* + * CXL 2.0 - Memory capacity multiplier + * See Section 8.2.9.5 + * + * Volatile, Persistent, and Partition capacities are specified to be in + * multiples of 256MB - define a multiplier to convert to/from bytes. + */ +#define CXL_CAPACITY_MULTIPLIER SZ_256M + /** * struct mbox_cmd - A command to be submitted to hardware. * @opcode: (input) The command set and command submitted to hardware. @@ -94,8 +103,6 @@ struct mbox_cmd { #define CXL_MBOX_SUCCESS 0 }; -static int cxl_mem_major; -static DEFINE_IDA(cxl_memdev_ida); static DECLARE_RWSEM(cxl_memdev_rwsem); static struct dentry *cxl_debugfs; static bool cxl_raw_allow_all; @@ -568,7 +575,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode) if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) return false; - if (security_locked_down(LOCKDOWN_NONE)) + if (security_locked_down(LOCKDOWN_PCI_ACCESS)) return false; if (cxl_raw_allow_all) @@ -806,13 +813,25 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file) return 0; } -static const struct file_operations cxl_memdev_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = cxl_memdev_ioctl, - .open = cxl_memdev_open, - .release = cxl_memdev_release_file, - .compat_ioctl = compat_ptr_ioctl, - .llseek = noop_llseek, +static void cxl_memdev_shutdown(struct device *dev) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + + down_write(&cxl_memdev_rwsem); + cxlmd->cxlm = NULL; + up_write(&cxl_memdev_rwsem); +} + +static const struct cdevm_file_operations cxl_memdev_fops = { + .fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = cxl_memdev_ioctl, + .open = cxl_memdev_open, + .release = cxl_memdev_release_file, + .compat_ioctl = compat_ptr_ioctl, + .llseek = noop_llseek, + }, + .shutdown = cxl_memdev_shutdown, }; static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode) @@ -1022,8 +1041,8 @@ static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, !dev_map->memdev.valid) { dev_err(dev, "registers not found: %s%s%s\n", !dev_map->status.valid ? "status " : "", - !dev_map->mbox.valid ? "status " : "", - !dev_map->memdev.valid ? "status " : ""); + !dev_map->mbox.valid ? "mbox " : "", + !dev_map->memdev.valid ? "memdev " : ""); return -ENXIO; } @@ -1081,9 +1100,8 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) struct device *dev = &pdev->dev; u32 regloc_size, regblocks; void __iomem *base; - int regloc, i; - struct cxl_register_map *map, *n; - LIST_HEAD(register_maps); + int regloc, i, n_maps; + struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; int ret = 0; regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); @@ -1102,20 +1120,12 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; - for (i = 0; i < regblocks; i++, regloc += 8) { + for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { u32 reg_lo, reg_hi; u8 reg_type; u64 offset; u8 bar; - map = kzalloc(sizeof(*map), GFP_KERNEL); - if (!map) { - ret = -ENOMEM; - goto free_maps; - } - - list_add(&map->list, ®ister_maps); - pci_read_config_dword(pdev, regloc, ®_lo); pci_read_config_dword(pdev, regloc + 4, ®_hi); @@ -1125,12 +1135,15 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", bar, offset, reg_type); + /* Ignore unknown register block types */ + if (reg_type > CXL_REGLOC_RBI_MEMDEV) + continue; + base = cxl_mem_map_regblock(cxlm, bar, offset); - if (!base) { - ret = -ENOMEM; - goto free_maps; - } + if (!base) + return -ENOMEM; + map = &maps[n_maps]; map->barno = bar; map->block_offset = offset; map->reg_type = reg_type; @@ -1141,240 +1154,22 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) cxl_mem_unmap_regblock(cxlm, base); if (ret) - goto free_maps; + return ret; + + n_maps++; } pci_release_mem_regions(pdev); - list_for_each_entry(map, ®ister_maps, list) { - ret = cxl_map_regs(cxlm, map); + for (i = 0; i < n_maps; i++) { + ret = cxl_map_regs(cxlm, &maps[i]); if (ret) - goto free_maps; - } - -free_maps: - list_for_each_entry_safe(map, n, ®ister_maps, list) { - list_del(&map->list); - kfree(map); + break; } return ret; } -static struct cxl_memdev *to_cxl_memdev(struct device *dev) -{ - return container_of(dev, struct cxl_memdev, dev); -} - -static void cxl_memdev_release(struct device *dev) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - - ida_free(&cxl_memdev_ida, cxlmd->id); - kfree(cxlmd); -} - -static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid, - kgid_t *gid) -{ - return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); -} - -static ssize_t firmware_version_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version); -} -static DEVICE_ATTR_RO(firmware_version); - -static ssize_t payload_max_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%zu\n", cxlm->payload_size); -} -static DEVICE_ATTR_RO(payload_max); - -static ssize_t label_storage_size_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%zu\n", cxlm->lsa_size); -} -static DEVICE_ATTR_RO(label_storage_size); - -static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - unsigned long long len = range_len(&cxlm->ram_range); - - return sysfs_emit(buf, "%#llx\n", len); -} - -static struct device_attribute dev_attr_ram_size = - __ATTR(size, 0444, ram_size_show, NULL); - -static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - unsigned long long len = range_len(&cxlm->pmem_range); - - return sysfs_emit(buf, "%#llx\n", len); -} - -static struct device_attribute dev_attr_pmem_size = - __ATTR(size, 0444, pmem_size_show, NULL); - -static struct attribute *cxl_memdev_attributes[] = { - &dev_attr_firmware_version.attr, - &dev_attr_payload_max.attr, - &dev_attr_label_storage_size.attr, - NULL, -}; - -static struct attribute *cxl_memdev_pmem_attributes[] = { - &dev_attr_pmem_size.attr, - NULL, -}; - -static struct attribute *cxl_memdev_ram_attributes[] = { - &dev_attr_ram_size.attr, - NULL, -}; - -static struct attribute_group cxl_memdev_attribute_group = { - .attrs = cxl_memdev_attributes, -}; - -static struct attribute_group cxl_memdev_ram_attribute_group = { - .name = "ram", - .attrs = cxl_memdev_ram_attributes, -}; - -static struct attribute_group cxl_memdev_pmem_attribute_group = { - .name = "pmem", - .attrs = cxl_memdev_pmem_attributes, -}; - -static const struct attribute_group *cxl_memdev_attribute_groups[] = { - &cxl_memdev_attribute_group, - &cxl_memdev_ram_attribute_group, - &cxl_memdev_pmem_attribute_group, - NULL, -}; - -static const struct device_type cxl_memdev_type = { - .name = "cxl_memdev", - .release = cxl_memdev_release, - .devnode = cxl_memdev_devnode, - .groups = cxl_memdev_attribute_groups, -}; - -static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd) -{ - down_write(&cxl_memdev_rwsem); - cxlmd->cxlm = NULL; - up_write(&cxl_memdev_rwsem); -} - -static void cxl_memdev_unregister(void *_cxlmd) -{ - struct cxl_memdev *cxlmd = _cxlmd; - struct device *dev = &cxlmd->dev; - - cdev_device_del(&cxlmd->cdev, dev); - cxl_memdev_shutdown(cxlmd); - put_device(dev); -} - -static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm) -{ - struct pci_dev *pdev = cxlm->pdev; - struct cxl_memdev *cxlmd; - struct device *dev; - struct cdev *cdev; - int rc; - - cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL); - if (!cxlmd) - return ERR_PTR(-ENOMEM); - - rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL); - if (rc < 0) - goto err; - cxlmd->id = rc; - - dev = &cxlmd->dev; - device_initialize(dev); - dev->parent = &pdev->dev; - dev->bus = &cxl_bus_type; - dev->devt = MKDEV(cxl_mem_major, cxlmd->id); - dev->type = &cxl_memdev_type; - device_set_pm_not_required(dev); - - cdev = &cxlmd->cdev; - cdev_init(cdev, &cxl_memdev_fops); - return cxlmd; - -err: - kfree(cxlmd); - return ERR_PTR(rc); -} - -static struct cxl_memdev *devm_cxl_add_memdev(struct device *host, - struct cxl_mem *cxlm) -{ - struct cxl_memdev *cxlmd; - struct device *dev; - struct cdev *cdev; - int rc; - - cxlmd = cxl_memdev_alloc(cxlm); - if (IS_ERR(cxlmd)) - return cxlmd; - - dev = &cxlmd->dev; - rc = dev_set_name(dev, "mem%d", cxlmd->id); - if (rc) - goto err; - - /* - * Activate ioctl operations, no cxl_memdev_rwsem manipulation - * needed as this is ordered with cdev_add() publishing the device. - */ - cxlmd->cxlm = cxlm; - - cdev = &cxlmd->cdev; - rc = cdev_device_add(cdev, dev); - if (rc) - goto err; - - rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd); - if (rc) - return ERR_PTR(rc); - return cxlmd; - -err: - /* - * The cdev was briefly live, shutdown any ioctl operations that - * saw that state. - */ - cxl_memdev_shutdown(cxlmd); - put_device(dev); - return ERR_PTR(rc); -} - static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) { u32 remaining = size; @@ -1469,6 +1264,53 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) } /** + * cxl_mem_get_partition_info - Get partition info + * @cxlm: The device to act on + * @active_volatile_bytes: returned active volatile capacity + * @active_persistent_bytes: returned active persistent capacity + * @next_volatile_bytes: return next volatile capacity + * @next_persistent_bytes: return next persistent capacity + * + * Retrieve the current partition info for the device specified. If not 0, the + * 'next' values are pending and take affect on next cold reset. + * + * Return: 0 if no error: or the result of the mailbox command. + * + * See CXL @8.2.9.5.2.1 Get Partition Info + */ +static int cxl_mem_get_partition_info(struct cxl_mem *cxlm, + u64 *active_volatile_bytes, + u64 *active_persistent_bytes, + u64 *next_volatile_bytes, + u64 *next_persistent_bytes) +{ + struct cxl_mbox_get_partition_info { + __le64 active_volatile_cap; + __le64 active_persistent_cap; + __le64 next_volatile_cap; + __le64 next_persistent_cap; + } __packed pi; + int rc; + + rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO, + NULL, 0, &pi, sizeof(pi)); + if (rc) + return rc; + + *active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap); + *active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap); + *next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap); + *next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap); + + *active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER; + *active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER; + *next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER; + *next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER; + + return 0; +} + +/** * cxl_mem_enumerate_cmds() - Enumerate commands for a device. * @cxlm: The device. * @@ -1564,16 +1406,27 @@ static int cxl_mem_identify(struct cxl_mem *cxlm) if (rc < 0) return rc; - /* - * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias. - * For now, only the capacity is exported in sysfs - */ - cxlm->ram_range.start = 0; - cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1; + cxlm->total_bytes = le64_to_cpu(id.total_capacity); + cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER; + + cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity); + cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER; - cxlm->pmem_range.start = 0; - cxlm->pmem_range.end = - le64_to_cpu(id.persistent_capacity) * SZ_256M - 1; + cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity); + cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER; + + cxlm->partition_align_bytes = le64_to_cpu(id.partition_align); + cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER; + + dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n" + " total_bytes = %#llx\n" + " volatile_only_bytes = %#llx\n" + " persistent_only_bytes = %#llx\n" + " partition_align_bytes = %#llx\n", + cxlm->total_bytes, + cxlm->volatile_only_bytes, + cxlm->persistent_only_bytes, + cxlm->partition_align_bytes); cxlm->lsa_size = le32_to_cpu(id.lsa_size); memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); @@ -1581,6 +1434,49 @@ static int cxl_mem_identify(struct cxl_mem *cxlm) return 0; } +static int cxl_mem_create_range_info(struct cxl_mem *cxlm) +{ + int rc; + + if (cxlm->partition_align_bytes == 0) { + cxlm->ram_range.start = 0; + cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; + cxlm->pmem_range.start = cxlm->volatile_only_bytes; + cxlm->pmem_range.end = cxlm->volatile_only_bytes + + cxlm->persistent_only_bytes - 1; + return 0; + } + + rc = cxl_mem_get_partition_info(cxlm, + &cxlm->active_volatile_bytes, + &cxlm->active_persistent_bytes, + &cxlm->next_volatile_bytes, + &cxlm->next_persistent_bytes); + if (rc < 0) { + dev_err(&cxlm->pdev->dev, "Failed to query partition information\n"); + return rc; + } + + dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n" + " active_volatile_bytes = %#llx\n" + " active_persistent_bytes = %#llx\n" + " next_volatile_bytes = %#llx\n" + " next_persistent_bytes = %#llx\n", + cxlm->active_volatile_bytes, + cxlm->active_persistent_bytes, + cxlm->next_volatile_bytes, + cxlm->next_persistent_bytes); + + cxlm->ram_range.start = 0; + cxlm->ram_range.end = cxlm->active_volatile_bytes - 1; + + cxlm->pmem_range.start = cxlm->active_volatile_bytes; + cxlm->pmem_range.end = cxlm->active_volatile_bytes + + cxlm->active_persistent_bytes - 1; + + return 0; +} + static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct cxl_memdev *cxlmd; @@ -1611,7 +1507,11 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm); + rc = cxl_mem_create_range_info(cxlm); + if (rc) + return rc; + + cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); @@ -1640,25 +1540,15 @@ static struct pci_driver cxl_mem_driver = { static __init int cxl_mem_init(void) { struct dentry *mbox_debugfs; - dev_t devt; int rc; /* Double check the anonymous union trickery in struct cxl_regs */ BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != offsetof(struct cxl_regs, device_regs.memdev)); - rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl"); - if (rc) - return rc; - - cxl_mem_major = MAJOR(devt); - rc = pci_register_driver(&cxl_mem_driver); - if (rc) { - unregister_chrdev_region(MKDEV(cxl_mem_major, 0), - CXL_MEM_MAX_DEVS); + if (rc) return rc; - } cxl_debugfs = debugfs_create_dir("cxl", NULL); mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); @@ -1672,7 +1562,6 @@ static __exit void cxl_mem_exit(void) { debugfs_remove_recursive(cxl_debugfs); pci_unregister_driver(&cxl_mem_driver); - unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS); } MODULE_LICENSE("GPL v2"); diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h index dad7a831f65f..8c1a58813816 100644 --- a/drivers/cxl/pci.h +++ b/drivers/cxl/pci.h @@ -25,6 +25,7 @@ #define CXL_REGLOC_RBI_COMPONENT 1 #define CXL_REGLOC_RBI_VIRT 2 #define CXL_REGLOC_RBI_MEMDEV 3 +#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1 #define CXL_REGLOC_ADDR_MASK GENMASK(31, 16) diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 0088e41dd2f3..9652c3ee41e7 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -6,7 +6,7 @@ #include <linux/ndctl.h> #include <linux/async.h> #include <linux/slab.h> -#include "mem.h" +#include "cxlmem.h" #include "cxl.h" /* diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 9251441fd8a3..7f473f9db300 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -346,29 +346,45 @@ static bool preamble_next(struct nvdimm_drvdata *ndd, free, nslot); } +static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + u64 sum, sum_save; + + if (!namespace_label_has(ndd, checksum)) + return true; + + sum_save = nsl_get_checksum(ndd, nd_label); + nsl_set_checksum(ndd, nd_label, 0); + sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); + nsl_set_checksum(ndd, nd_label, sum_save); + return sum == sum_save; +} + +static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + u64 sum; + + if (!namespace_label_has(ndd, checksum)) + return; + nsl_set_checksum(ndd, nd_label, 0); + sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); + nsl_set_checksum(ndd, nd_label, sum); +} + static bool slot_valid(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, u32 slot) { + bool valid; + /* check that we are written where we expect to be written */ - if (slot != __le32_to_cpu(nd_label->slot)) + if (slot != nsl_get_slot(ndd, nd_label)) return false; - - /* check checksum */ - if (namespace_label_has(ndd, checksum)) { - u64 sum, sum_save; - - sum_save = __le64_to_cpu(nd_label->checksum); - nd_label->checksum = __cpu_to_le64(0); - sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); - nd_label->checksum = __cpu_to_le64(sum_save); - if (sum != sum_save) { - dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n", - slot, sum); - return false; - } - } - - return true; + valid = nsl_validate_checksum(ndd, nd_label); + if (!valid) + dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot); + return valid; } int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) @@ -395,13 +411,13 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) continue; memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); - flags = __le32_to_cpu(nd_label->flags); + flags = nsl_get_flags(ndd, nd_label); if (test_bit(NDD_NOBLK, &nvdimm->flags)) flags &= ~NSLABEL_FLAG_LOCAL; nd_label_gen_id(&label_id, label_uuid, flags); res = nvdimm_allocate_dpa(ndd, &label_id, - __le64_to_cpu(nd_label->dpa), - __le64_to_cpu(nd_label->rawsize)); + nsl_get_dpa(ndd, nd_label), + nsl_get_rawsize(ndd, nd_label)); nd_dbg_dpa(nd_region, ndd, res, "reserve\n"); if (!res) return -EBUSY; @@ -548,9 +564,9 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd) nd_label = to_label(ndd, slot); if (!slot_valid(ndd, nd_label, slot)) { - u32 label_slot = __le32_to_cpu(nd_label->slot); - u64 size = __le64_to_cpu(nd_label->rawsize); - u64 dpa = __le64_to_cpu(nd_label->dpa); + u32 label_slot = nsl_get_slot(ndd, nd_label); + u64 size = nsl_get_rawsize(ndd, nd_label); + u64 dpa = nsl_get_dpa(ndd, nd_label); dev_dbg(ndd->dev, "slot%d invalid slot: %d dpa: %llx size: %llx\n", @@ -708,7 +724,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd, - (unsigned long) to_namespace_index(ndd, 0); } -enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid) +static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid) { if (guid_equal(guid, &nvdimm_btt_guid)) return NVDIMM_CCLASS_BTT; @@ -756,6 +772,45 @@ static void reap_victim(struct nd_mapping *nd_mapping, victim->label = NULL; } +static void nsl_set_type_guid(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, guid_t *guid) +{ + if (namespace_label_has(ndd, type_guid)) + guid_copy(&nd_label->type_guid, guid); +} + +bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, guid_t *guid) +{ + if (!namespace_label_has(ndd, type_guid)) + return true; + if (!guid_equal(&nd_label->type_guid, guid)) { + dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid, + &nd_label->type_guid); + return false; + } + return true; +} + +static void nsl_set_claim_class(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + enum nvdimm_claim_class claim_class) +{ + if (!namespace_label_has(ndd, abstraction_guid)) + return; + guid_copy(&nd_label->abstraction_guid, + to_abstraction_guid(claim_class, + &nd_label->abstraction_guid)); +} + +enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + if (!namespace_label_has(ndd, abstraction_guid)) + return NVDIMM_CCLASS_NONE; + return to_nvdimm_cclass(&nd_label->abstraction_guid); +} + static int __pmem_label_update(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, int pos, unsigned long flags) @@ -797,29 +852,18 @@ static int __pmem_label_update(struct nd_region *nd_region, nd_label = to_label(ndd, slot); memset(nd_label, 0, sizeof_namespace_label(ndd)); memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN); - if (nspm->alt_name) - memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN); - nd_label->flags = __cpu_to_le32(flags); - nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); - nd_label->position = __cpu_to_le16(pos); - nd_label->isetcookie = __cpu_to_le64(cookie); - nd_label->rawsize = __cpu_to_le64(resource_size(res)); - nd_label->lbasize = __cpu_to_le64(nspm->lbasize); - nd_label->dpa = __cpu_to_le64(res->start); - nd_label->slot = __cpu_to_le32(slot); - if (namespace_label_has(ndd, type_guid)) - guid_copy(&nd_label->type_guid, &nd_set->type_guid); - if (namespace_label_has(ndd, abstraction_guid)) - guid_copy(&nd_label->abstraction_guid, - to_abstraction_guid(ndns->claim_class, - &nd_label->abstraction_guid)); - if (namespace_label_has(ndd, checksum)) { - u64 sum; - - nd_label->checksum = __cpu_to_le64(0); - sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); - nd_label->checksum = __cpu_to_le64(sum); - } + nsl_set_name(ndd, nd_label, nspm->alt_name); + nsl_set_flags(ndd, nd_label, flags); + nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings); + nsl_set_position(ndd, nd_label, pos); + nsl_set_isetcookie(ndd, nd_label, cookie); + nsl_set_rawsize(ndd, nd_label, resource_size(res)); + nsl_set_lbasize(ndd, nd_label, nspm->lbasize); + nsl_set_dpa(ndd, nd_label, res->start); + nsl_set_slot(ndd, nd_label, slot); + nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); + nsl_set_claim_class(ndd, nd_label, ndns->claim_class); + nsl_calculate_checksum(ndd, nd_label); nd_dbg_dpa(nd_region, ndd, res, "\n"); /* update label */ @@ -879,9 +923,9 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd, struct resource *res; for_each_dpa_resource(ndd, res) { - if (res->start != __le64_to_cpu(nd_label->dpa)) + if (res->start != nsl_get_dpa(ndd, nd_label)) continue; - if (resource_size(res) != __le64_to_cpu(nd_label->rawsize)) + if (resource_size(res) != nsl_get_rawsize(ndd, nd_label)) continue; return res; } @@ -890,6 +934,59 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd, } /* + * Use the presence of the type_guid as a flag to determine isetcookie + * usage and nlabel + position policy for blk-aperture namespaces. + */ +static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 isetcookie) +{ + if (namespace_label_has(ndd, type_guid)) { + nsl_set_isetcookie(ndd, nd_label, isetcookie); + return; + } + nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */ +} + +bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 isetcookie) +{ + if (!namespace_label_has(ndd, type_guid)) + return true; + + if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) { + dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie, + nsl_get_isetcookie(ndd, nd_label)); + return false; + } + + return true; +} + +static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, int nlabel, + bool first) +{ + if (!namespace_label_has(ndd, type_guid)) { + nsl_set_nlabel(ndd, nd_label, 0); /* N/A */ + return; + } + nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff); +} + +static void nsl_set_blk_position(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + bool first) +{ + if (!namespace_label_has(ndd, type_guid)) { + nsl_set_position(ndd, nd_label, 0); + return; + } + nsl_set_position(ndd, nd_label, first ? 0 : 0xffff); +} + +/* * 1/ Account all the labels that can be freed after this update * 2/ Allocate and write the label to the staging (next) index * 3/ Record the resources in the namespace device @@ -1017,50 +1114,21 @@ static int __blk_label_update(struct nd_region *nd_region, nd_label = to_label(ndd, slot); memset(nd_label, 0, sizeof_namespace_label(ndd)); memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN); - if (nsblk->alt_name) - memcpy(nd_label->name, nsblk->alt_name, - NSLABEL_NAME_LEN); - nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL); - - /* - * Use the presence of the type_guid as a flag to - * determine isetcookie usage and nlabel + position - * policy for blk-aperture namespaces. - */ - if (namespace_label_has(ndd, type_guid)) { - if (i == min_dpa_idx) { - nd_label->nlabel = __cpu_to_le16(nsblk->num_resources); - nd_label->position = __cpu_to_le16(0); - } else { - nd_label->nlabel = __cpu_to_le16(0xffff); - nd_label->position = __cpu_to_le16(0xffff); - } - nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2); - } else { - nd_label->nlabel = __cpu_to_le16(0); /* N/A */ - nd_label->position = __cpu_to_le16(0); /* N/A */ - nd_label->isetcookie = __cpu_to_le64(0); /* N/A */ - } - - nd_label->dpa = __cpu_to_le64(res->start); - nd_label->rawsize = __cpu_to_le64(resource_size(res)); - nd_label->lbasize = __cpu_to_le64(nsblk->lbasize); - nd_label->slot = __cpu_to_le32(slot); - if (namespace_label_has(ndd, type_guid)) - guid_copy(&nd_label->type_guid, &nd_set->type_guid); - if (namespace_label_has(ndd, abstraction_guid)) - guid_copy(&nd_label->abstraction_guid, - to_abstraction_guid(ndns->claim_class, - &nd_label->abstraction_guid)); - - if (namespace_label_has(ndd, checksum)) { - u64 sum; - - nd_label->checksum = __cpu_to_le64(0); - sum = nd_fletcher64(nd_label, - sizeof_namespace_label(ndd), 1); - nd_label->checksum = __cpu_to_le64(sum); - } + nsl_set_name(ndd, nd_label, nsblk->alt_name); + nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL); + + nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources, + i == min_dpa_idx); + nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx); + nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2); + + nsl_set_dpa(ndd, nd_label, res->start); + nsl_set_rawsize(ndd, nd_label, resource_size(res)); + nsl_set_lbasize(ndd, nd_label, nsblk->lbasize); + nsl_set_slot(ndd, nd_label, slot); + nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); + nsl_set_claim_class(ndd, nd_label, ndns->claim_class); + nsl_calculate_checksum(ndd, nd_label); /* update label */ offset = nd_label_offset(ndd, nd_label); diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 956b6d1bd8cc..31f94fad7b92 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -135,7 +135,6 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n); u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd); bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot); u32 nd_label_nfree(struct nvdimm_drvdata *ndd); -enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid); struct nd_region; struct nd_namespace_pmem; struct nd_namespace_blk; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 745478213ff2..4cec171c934d 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1235,7 +1235,7 @@ static int namespace_update_uuid(struct nd_region *nd_region, if (!nd_label) continue; nd_label_gen_id(&label_id, nd_label->uuid, - __le32_to_cpu(nd_label->flags)); + nsl_get_flags(ndd, nd_label)); if (strcmp(old_label_id.id, label_id.id) == 0) set_bit(ND_LABEL_REAP, &label_ent->flags); } @@ -1847,28 +1847,21 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, list_for_each_entry(label_ent, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; u16 position, nlabel; - u64 isetcookie; if (!nd_label) continue; - isetcookie = __le64_to_cpu(nd_label->isetcookie); - position = __le16_to_cpu(nd_label->position); - nlabel = __le16_to_cpu(nd_label->nlabel); + position = nsl_get_position(ndd, nd_label); + nlabel = nsl_get_nlabel(ndd, nd_label); - if (isetcookie != cookie) + if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) continue; if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0) continue; - if (namespace_label_has(ndd, type_guid) - && !guid_equal(&nd_set->type_guid, - &nd_label->type_guid)) { - dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", - &nd_set->type_guid, - &nd_label->type_guid); + if (!nsl_validate_type_guid(ndd, nd_label, + &nd_set->type_guid)) continue; - } if (found_uuid) { dev_dbg(ndd->dev, "duplicate entry for uuid\n"); @@ -1923,8 +1916,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) */ hw_start = nd_mapping->start; hw_end = hw_start + nd_mapping->size; - pmem_start = __le64_to_cpu(nd_label->dpa); - pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize); + pmem_start = nsl_get_dpa(ndd, nd_label); + pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label); if (pmem_start >= hw_start && pmem_start < hw_end && pmem_end <= hw_end && pmem_end > hw_start) /* pass */; @@ -1947,14 +1940,16 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) * @nd_label: target pmem namespace label to evaluate */ static struct device *create_namespace_pmem(struct nd_region *nd_region, - struct nd_namespace_index *nsindex, - struct nd_namespace_label *nd_label) + struct nd_mapping *nd_mapping, + struct nd_namespace_label *nd_label) { + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + struct nd_namespace_index *nsindex = + to_namespace_index(ndd, ndd->ns_current); u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex); u64 altcookie = nd_region_interleave_set_altcookie(nd_region); struct nd_label_ent *label_ent; struct nd_namespace_pmem *nspm; - struct nd_mapping *nd_mapping; resource_size_t size = 0; struct resource *res; struct device *dev; @@ -1966,10 +1961,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, return ERR_PTR(-ENXIO); } - if (__le64_to_cpu(nd_label->isetcookie) != cookie) { + if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) { dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", nd_label->uuid); - if (__le64_to_cpu(nd_label->isetcookie) != altcookie) + if (!nsl_validate_isetcookie(ndd, nd_label, altcookie)) return ERR_PTR(-EAGAIN); dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", @@ -2037,20 +2032,18 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, continue; } - size += __le64_to_cpu(label0->rawsize); - if (__le16_to_cpu(label0->position) != 0) + ndd = to_ndd(nd_mapping); + size += nsl_get_rawsize(ndd, label0); + if (nsl_get_position(ndd, label0) != 0) continue; WARN_ON(nspm->alt_name || nspm->uuid); - nspm->alt_name = kmemdup((void __force *) label0->name, - NSLABEL_NAME_LEN, GFP_KERNEL); + nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0), + NSLABEL_NAME_LEN, GFP_KERNEL); nspm->uuid = kmemdup((void __force *) label0->uuid, NSLABEL_UUID_LEN, GFP_KERNEL); - nspm->lbasize = __le64_to_cpu(label0->lbasize); - ndd = to_ndd(nd_mapping); - if (namespace_label_has(ndd, abstraction_guid)) - nspm->nsio.common.claim_class - = to_nvdimm_cclass(&label0->abstraction_guid); - + nspm->lbasize = nsl_get_lbasize(ndd, label0); + nspm->nsio.common.claim_class = + nsl_get_claim_class(ndd, label0); } if (!nspm->alt_name || !nspm->uuid) { @@ -2237,7 +2230,7 @@ static int add_namespace_resource(struct nd_region *nd_region, if (is_namespace_blk(devs[i])) { res = nsblk_add_resource(nd_region, ndd, to_nd_namespace_blk(devs[i]), - __le64_to_cpu(nd_label->dpa)); + nsl_get_dpa(ndd, nd_label)); if (!res) return -ENXIO; nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count); @@ -2265,21 +2258,10 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, struct device *dev = NULL; struct resource *res; - if (namespace_label_has(ndd, type_guid)) { - if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) { - dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", - &nd_set->type_guid, - &nd_label->type_guid); - return ERR_PTR(-EAGAIN); - } - - if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) { - dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", - nd_set->cookie2, - __le64_to_cpu(nd_label->isetcookie)); - return ERR_PTR(-EAGAIN); - } - } + if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid)) + return ERR_PTR(-EAGAIN); + if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2)) + return ERR_PTR(-EAGAIN); nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); if (!nsblk) @@ -2288,23 +2270,19 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, dev->type = &namespace_blk_device_type; dev->parent = &nd_region->dev; nsblk->id = -1; - nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); - nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, - GFP_KERNEL); - if (namespace_label_has(ndd, abstraction_guid)) - nsblk->common.claim_class - = to_nvdimm_cclass(&nd_label->abstraction_guid); + nsblk->lbasize = nsl_get_lbasize(ndd, nd_label); + nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL); + nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label); if (!nsblk->uuid) goto blk_err; - memcpy(name, nd_label->name, NSLABEL_NAME_LEN); + nsl_get_name(ndd, nd_label, name); if (name[0]) { - nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, - GFP_KERNEL); + nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL); if (!nsblk->alt_name) goto blk_err; } res = nsblk_add_resource(nd_region, ndd, nsblk, - __le64_to_cpu(nd_label->dpa)); + nsl_get_dpa(ndd, nd_label)); if (!res) goto blk_err; nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count); @@ -2345,6 +2323,7 @@ static struct device **scan_labels(struct nd_region *nd_region) struct device *dev, **devs = NULL; struct nd_label_ent *label_ent, *e; struct nd_mapping *nd_mapping = &nd_region->mapping[0]; + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; /* "safe" because create_namespace_pmem() might list_move() label_ent */ @@ -2355,7 +2334,7 @@ static struct device **scan_labels(struct nd_region *nd_region) if (!nd_label) continue; - flags = __le32_to_cpu(nd_label->flags); + flags = nsl_get_flags(ndd, nd_label); if (is_nd_blk(&nd_region->dev) == !!(flags & NSLABEL_FLAG_LOCAL)) /* pass, region matches label type */; @@ -2363,9 +2342,9 @@ static struct device **scan_labels(struct nd_region *nd_region) continue; /* skip labels that describe extents outside of the region */ - if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start || - __le64_to_cpu(nd_label->dpa) > map_end) - continue; + if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start || + nsl_get_dpa(ndd, nd_label) > map_end) + continue; i = add_namespace_resource(nd_region, nd_label, devs, count); if (i < 0) @@ -2381,13 +2360,9 @@ static struct device **scan_labels(struct nd_region *nd_region) if (is_nd_blk(&nd_region->dev)) dev = create_namespace_blk(nd_region, nd_label, count); - else { - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_namespace_index *nsindex; - - nsindex = to_namespace_index(ndd, ndd->ns_current); - dev = create_namespace_pmem(nd_region, nsindex, nd_label); - } + else + dev = create_namespace_pmem(nd_region, nd_mapping, + nd_label); if (IS_ERR(dev)) { switch (PTR_ERR(dev)) { @@ -2571,10 +2546,10 @@ static int init_active_labels(struct nd_region *nd_region) break; label = nd_label_active(ndd, j); if (test_bit(NDD_NOBLK, &nvdimm->flags)) { - u32 flags = __le32_to_cpu(label->flags); + u32 flags = nsl_get_flags(ndd, label); flags &= ~NSLABEL_FLAG_LOCAL; - label->flags = __cpu_to_le32(flags); + nsl_set_flags(ndd, label, flags); } label_ent->label = label; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 696b55556d4d..5467ebbb4a6b 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -35,6 +35,156 @@ struct nvdimm_drvdata { struct kref kref; }; +static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return nd_label->name; +} + +static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u8 *name) +{ + return memcpy(name, nd_label->name, NSLABEL_NAME_LEN); +} + +static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u8 *name) +{ + if (!name) + return NULL; + return memcpy(nd_label->name, name, NSLABEL_NAME_LEN); +} + +static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le32_to_cpu(nd_label->slot); +} + +static inline void nsl_set_slot(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u32 slot) +{ + nd_label->slot = __cpu_to_le32(slot); +} + +static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->checksum); +} + +static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 checksum) +{ + nd_label->checksum = __cpu_to_le64(checksum); +} + +static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le32_to_cpu(nd_label->flags); +} + +static inline void nsl_set_flags(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u32 flags) +{ + nd_label->flags = __cpu_to_le32(flags); +} + +static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->dpa); +} + +static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u64 dpa) +{ + nd_label->dpa = __cpu_to_le64(dpa); +} + +static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->rawsize); +} + +static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 rawsize) +{ + nd_label->rawsize = __cpu_to_le64(rawsize); +} + +static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->isetcookie); +} + +static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 isetcookie) +{ + nd_label->isetcookie = __cpu_to_le64(isetcookie); +} + +static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 cookie) +{ + return cookie == __le64_to_cpu(nd_label->isetcookie); +} + +static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le16_to_cpu(nd_label->position); +} + +static inline void nsl_set_position(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u16 position) +{ + nd_label->position = __cpu_to_le16(position); +} + + +static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le16_to_cpu(nd_label->nlabel); +} + +static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u16 nlabel) +{ + nd_label->nlabel = __cpu_to_le16(nlabel); +} + +static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->lbasize); +} + +static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 lbasize) +{ + nd_label->lbasize = __cpu_to_le64(lbasize); +} + +bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 isetcookie); +bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, guid_t *guid); +enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label); + struct nd_region_data { int ns_count; int ns_active; |