From 99e222a5f1b67dd17c2c780f4eb9a694707d3bf7 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:12:09 -0700 Subject: cxl/pci: Make 'struct cxl_mem' device type generic In preparation for adding a unit test provider of a cxl_memdev, convert the 'struct cxl_mem' driver context to carry a generic device rather than a pci device. Note, some dev_dbg() lines needed extra reformatting per clang-format. This conversion also allows the cxl_mem_create() and devm_cxl_add_memdev() calling conventions to be simplified. The "host" for a cxl_memdev, must be the same device for the driver that allocated @cxlm. Acked-by: Ben Widawsky Reviewed-by: Jonathan Cameron Reviewed-by: Ben Widawsky Link: https://lore.kernel.org/r/163116432973.2460985.7553504957932024222.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/memdev.c | 7 ++--- drivers/cxl/cxlmem.h | 6 ++-- drivers/cxl/pci.c | 75 ++++++++++++++++++++++------------------------- 3 files changed, 41 insertions(+), 47 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index a9c317e32010..331ef7d6c598 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -149,7 +149,6 @@ static void cxl_memdev_unregister(void *_cxlmd) static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm, const struct file_operations *fops) { - struct pci_dev *pdev = cxlm->pdev; struct cxl_memdev *cxlmd; struct device *dev; struct cdev *cdev; @@ -166,7 +165,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm, dev = &cxlmd->dev; device_initialize(dev); - dev->parent = &pdev->dev; + dev->parent = cxlm->dev; dev->bus = &cxl_bus_type; dev->devt = MKDEV(cxl_mem_major, cxlmd->id); dev->type = &cxl_memdev_type; @@ -182,7 +181,7 @@ err: } struct cxl_memdev * -devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, +devm_cxl_add_memdev(struct cxl_mem *cxlm, const struct cdevm_file_operations *cdevm_fops) { struct cxl_memdev *cxlmd; @@ -210,7 +209,7 @@ devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, if (rc) goto err; - rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd); + rc = devm_add_action_or_reset(cxlm->dev, cxl_memdev_unregister, cxlmd); if (rc) return ERR_PTR(rc); return cxlmd; diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 6c0b1e2ea97c..d5334df83fb2 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -63,12 +63,12 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev) } struct cxl_memdev * -devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, +devm_cxl_add_memdev(struct cxl_mem *cxlm, const struct cdevm_file_operations *cdevm_fops); /** * struct cxl_mem - A CXL memory device - * @pdev: The PCI device associated with this CXL device. + * @dev: The device associated with this CXL device. * @cxlmd: Logical memory device chardev / interface * @regs: Parsed register blocks * @payload_size: Size of space for payload @@ -82,7 +82,7 @@ devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, * @ram_range: Volatile memory capacity information. */ struct cxl_mem { - struct pci_dev *pdev; + struct device *dev; struct cxl_memdev *cxlmd; struct cxl_regs regs; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 8e45aa07d662..c1e1d12e24b6 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -250,7 +250,7 @@ static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) cpu_relax(); } - dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms", + dev_dbg(cxlm->dev, "Doorbell wait took %dms", jiffies_to_msecs(end) - jiffies_to_msecs(start)); return 0; } @@ -268,7 +268,7 @@ static bool cxl_is_security_command(u16 opcode) static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, struct mbox_cmd *mbox_cmd) { - struct device *dev = &cxlm->pdev->dev; + struct device *dev = cxlm->dev; dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", mbox_cmd->opcode, mbox_cmd->size_in); @@ -300,6 +300,7 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, struct mbox_cmd *mbox_cmd) { void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; + struct device *dev = cxlm->dev; u64 cmd_reg, status_reg; size_t out_len; int rc; @@ -325,8 +326,7 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, /* #1 */ if (cxl_doorbell_busy(cxlm)) { - dev_err_ratelimited(&cxlm->pdev->dev, - "Mailbox re-busy after acquiring\n"); + dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n"); return -EBUSY; } @@ -345,7 +345,7 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); /* #4 */ - dev_dbg(&cxlm->pdev->dev, "Sending command\n"); + dev_dbg(dev, "Sending command\n"); writel(CXLDEV_MBOX_CTRL_DOORBELL, cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); @@ -362,7 +362,7 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); if (mbox_cmd->return_code != 0) { - dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n"); + dev_dbg(dev, "Mailbox operation had an error\n"); return 0; } @@ -399,7 +399,7 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, */ static int cxl_mem_mbox_get(struct cxl_mem *cxlm) { - struct device *dev = &cxlm->pdev->dev; + struct device *dev = cxlm->dev; u64 md_status; int rc; @@ -502,7 +502,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, u64 in_payload, u64 out_payload, s32 *size_out, u32 *retval) { - struct device *dev = &cxlm->pdev->dev; + struct device *dev = cxlm->dev; struct mbox_cmd mbox_cmd = { .opcode = cmd->opcode, .size_in = cmd->info.size_in, @@ -925,20 +925,19 @@ static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) */ cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); if (cxlm->payload_size < 256) { - dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)", + dev_err(cxlm->dev, "Mailbox is too small (%zub)", cxlm->payload_size); return -ENXIO; } - dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu", + dev_dbg(cxlm->dev, "Mailbox payload sized %zu", cxlm->payload_size); return 0; } -static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev) +static struct cxl_mem *cxl_mem_create(struct device *dev) { - struct device *dev = &pdev->dev; struct cxl_mem *cxlm; cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL); @@ -948,7 +947,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev) } mutex_init(&cxlm->mbox_mutex); - cxlm->pdev = pdev; + cxlm->dev = dev; cxlm->enabled_cmds = devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), sizeof(unsigned long), @@ -964,9 +963,9 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev) static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, u8 bar, u64 offset) { - struct pci_dev *pdev = cxlm->pdev; - struct device *dev = &pdev->dev; void __iomem *addr; + struct device *dev = cxlm->dev; + struct pci_dev *pdev = to_pci_dev(dev); /* Basic sanity check that BAR is big enough */ if (pci_resource_len(pdev, bar) < offset) { @@ -989,7 +988,7 @@ static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) { - pci_iounmap(cxlm->pdev, base); + pci_iounmap(to_pci_dev(cxlm->dev), base); } static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) @@ -1018,10 +1017,9 @@ static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, struct cxl_register_map *map) { - struct pci_dev *pdev = cxlm->pdev; - struct device *dev = &pdev->dev; struct cxl_component_reg_map *comp_map; struct cxl_device_reg_map *dev_map; + struct device *dev = cxlm->dev; switch (map->reg_type) { case CXL_REGLOC_RBI_COMPONENT: @@ -1057,8 +1055,8 @@ static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) { - struct pci_dev *pdev = cxlm->pdev; - struct device *dev = &pdev->dev; + struct device *dev = cxlm->dev; + struct pci_dev *pdev = to_pci_dev(dev); switch (map->reg_type) { case CXL_REGLOC_RBI_COMPONENT: @@ -1096,13 +1094,12 @@ static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, */ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) { - struct pci_dev *pdev = cxlm->pdev; - struct device *dev = &pdev->dev; - u32 regloc_size, regblocks; void __iomem *base; - int regloc, i, n_maps; + u32 regloc_size, regblocks; + int regloc, i, n_maps, ret = 0; + struct device *dev = cxlm->dev; + struct pci_dev *pdev = to_pci_dev(dev); struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; - int ret = 0; regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); if (!regloc) { @@ -1226,7 +1223,7 @@ static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); if (!cmd) { - dev_dbg(&cxlm->pdev->dev, + dev_dbg(cxlm->dev, "Opcode 0x%04x unsupported by driver", opcode); continue; } @@ -1323,7 +1320,7 @@ static int cxl_mem_get_partition_info(struct cxl_mem *cxlm, static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm) { struct cxl_mbox_get_supported_logs *gsl; - struct device *dev = &cxlm->pdev->dev; + struct device *dev = cxlm->dev; struct cxl_mem_command *cmd; int i, rc; @@ -1418,15 +1415,14 @@ static int cxl_mem_identify(struct cxl_mem *cxlm) cxlm->partition_align_bytes = le64_to_cpu(id.partition_align); cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER; - dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n" + dev_dbg(cxlm->dev, + "Identify Memory Device\n" " total_bytes = %#llx\n" " volatile_only_bytes = %#llx\n" " persistent_only_bytes = %#llx\n" " partition_align_bytes = %#llx\n", - cxlm->total_bytes, - cxlm->volatile_only_bytes, - cxlm->persistent_only_bytes, - cxlm->partition_align_bytes); + cxlm->total_bytes, cxlm->volatile_only_bytes, + cxlm->persistent_only_bytes, cxlm->partition_align_bytes); cxlm->lsa_size = le32_to_cpu(id.lsa_size); memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); @@ -1453,19 +1449,18 @@ static int cxl_mem_create_range_info(struct cxl_mem *cxlm) &cxlm->next_volatile_bytes, &cxlm->next_persistent_bytes); if (rc < 0) { - dev_err(&cxlm->pdev->dev, "Failed to query partition information\n"); + dev_err(cxlm->dev, "Failed to query partition information\n"); return rc; } - dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n" + dev_dbg(cxlm->dev, + "Get Partition Info\n" " active_volatile_bytes = %#llx\n" " active_persistent_bytes = %#llx\n" " next_volatile_bytes = %#llx\n" " next_persistent_bytes = %#llx\n", - cxlm->active_volatile_bytes, - cxlm->active_persistent_bytes, - cxlm->next_volatile_bytes, - cxlm->next_persistent_bytes); + cxlm->active_volatile_bytes, cxlm->active_persistent_bytes, + cxlm->next_volatile_bytes, cxlm->next_persistent_bytes); cxlm->ram_range.start = 0; cxlm->ram_range.end = cxlm->active_volatile_bytes - 1; @@ -1487,7 +1482,7 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - cxlm = cxl_mem_create(pdev); + cxlm = cxl_mem_create(&pdev->dev); if (IS_ERR(cxlm)) return PTR_ERR(cxlm); @@ -1511,7 +1506,7 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops); + cxlmd = devm_cxl_add_memdev(cxlm, &cxl_memdev_fops); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); -- cgit v1.2.3 From 13e7749d06b335774bbb341c65a0232484beb457 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 13 Sep 2021 15:24:32 -0700 Subject: cxl/pci: Clean up cxl_mem_get_partition_info() Commit 0b9159d0ff21 ("cxl/pci: Store memory capacity values") missed updating the kernel-doc for 'struct cxl_mem' leading to the following warnings: ./scripts/kernel-doc -v drivers/cxl/cxlmem.h 2>&1 | grep warn drivers/cxl/cxlmem.h:107: warning: Function parameter or member 'total_bytes' not described in 'cxl_mem' drivers/cxl/cxlmem.h:107: warning: Function parameter or member 'volatile_only_bytes' not described in 'cxl_mem' drivers/cxl/cxlmem.h:107: warning: Function parameter or member 'persistent_only_bytes' not described in 'cxl_mem' drivers/cxl/cxlmem.h:107: warning: Function parameter or member 'partition_align_bytes' not described in 'cxl_mem' drivers/cxl/cxlmem.h:107: warning: Function parameter or member 'active_volatile_bytes' not described in 'cxl_mem' drivers/cxl/cxlmem.h:107: warning: Function parameter or member 'active_persistent_bytes' not described in 'cxl_mem' drivers/cxl/cxlmem.h:107: warning: Function parameter or member 'next_volatile_bytes' not described in 'cxl_mem' drivers/cxl/cxlmem.h:107: warning: Function parameter or member 'next_persistent_bytes' not described in 'cxl_mem' Also, it is redundant to describe those same parameters in the kernel-doc for cxl_mem_get_partition_info(). Given the only user of that routine updates the values in @cxlm, just do that implicitly internal to the helper. Cc: Ira Weiny Reported-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163157174216.2653013.1277706528753990974.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/cxlmem.h | 15 +++++++++++++-- drivers/cxl/pci.c | 35 +++++++++++------------------------ 2 files changed, 24 insertions(+), 26 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index d5334df83fb2..e14bcd7a1ba1 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -78,8 +78,19 @@ devm_cxl_add_memdev(struct cxl_mem *cxlm, * @mbox_mutex: Mutex to synchronize mailbox access. * @firmware_version: Firmware version for the memory device. * @enabled_cmds: Hardware commands found enabled in CEL. - * @pmem_range: Persistent memory capacity information. - * @ram_range: Volatile memory capacity information. + * @pmem_range: Active Persistent memory capacity configuration + * @ram_range: Active Volatile memory capacity configuration + * @total_bytes: sum of all possible capacities + * @volatile_only_bytes: hard volatile capacity + * @persistent_only_bytes: hard persistent capacity + * @partition_align_bytes: alignment size for partition-able capacity + * @active_volatile_bytes: sum of hard + soft volatile + * @active_persistent_bytes: sum of hard + soft persistent + * @next_volatile_bytes: volatile capacity change pending device reset + * @next_persistent_bytes: persistent capacity change pending device reset + * + * See section 8.2.9.5.2 Capacity Configuration and Label Storage for + * details on capacity parameters. */ struct cxl_mem { struct device *dev; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index c1e1d12e24b6..8077d907e7d3 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1262,11 +1262,7 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) /** * cxl_mem_get_partition_info - Get partition info - * @cxlm: The device to act on - * @active_volatile_bytes: returned active volatile capacity - * @active_persistent_bytes: returned active persistent capacity - * @next_volatile_bytes: return next volatile capacity - * @next_persistent_bytes: return next persistent capacity + * @cxlm: cxl_mem instance to update partition info * * Retrieve the current partition info for the device specified. If not 0, the * 'next' values are pending and take affect on next cold reset. @@ -1275,11 +1271,7 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) * * See CXL @8.2.9.5.2.1 Get Partition Info */ -static int cxl_mem_get_partition_info(struct cxl_mem *cxlm, - u64 *active_volatile_bytes, - u64 *active_persistent_bytes, - u64 *next_volatile_bytes, - u64 *next_persistent_bytes) +static int cxl_mem_get_partition_info(struct cxl_mem *cxlm) { struct cxl_mbox_get_partition_info { __le64 active_volatile_cap; @@ -1294,15 +1286,14 @@ static int cxl_mem_get_partition_info(struct cxl_mem *cxlm, if (rc) return rc; - *active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap); - *active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap); - *next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap); - *next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap); - - *active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER; - *active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER; - *next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER; - *next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER; + cxlm->active_volatile_bytes = + le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; + cxlm->active_persistent_bytes = + le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; + cxlm->next_volatile_bytes = + le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; + cxlm->next_persistent_bytes = + le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; return 0; } @@ -1443,11 +1434,7 @@ static int cxl_mem_create_range_info(struct cxl_mem *cxlm) return 0; } - rc = cxl_mem_get_partition_info(cxlm, - &cxlm->active_volatile_bytes, - &cxlm->active_persistent_bytes, - &cxlm->next_volatile_bytes, - &cxlm->next_persistent_bytes); + rc = cxl_mem_get_partition_info(cxlm); if (rc < 0) { dev_err(cxlm->dev, "Failed to query partition information\n"); return rc; -- cgit v1.2.3 From b64955a92929346f16df058ad2bb53630eb80466 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:12:21 -0700 Subject: cxl/mbox: Introduce the mbox_send operation In preparation for implementing a unit test backend transport for ioctl operations, and making the mailbox available to the cxl/pmem infrastructure, move the existing PCI specific portion of mailbox handling to an "mbox_send" operation. With this split all the PCI-specific transport details are comprehended by a single operation and the rest of the mailbox infrastructure is 'struct cxl_mem' and 'struct cxl_memdev' generic. Acked-by: Ben Widawsky Reviewed-by: Jonathan Cameron Reviewed-by: Ben Widawsky Link: https://lore.kernel.org/r/163116434098.2460985.9004760022659400540.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/cxlmem.h | 42 +++++++++++++++++++++++++++++ drivers/cxl/pci.c | 76 +++++++++++++++------------------------------------- 2 files changed, 63 insertions(+), 55 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index e14bcd7a1ba1..c142532db8cb 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -66,6 +66,45 @@ struct cxl_memdev * devm_cxl_add_memdev(struct cxl_mem *cxlm, const struct cdevm_file_operations *cdevm_fops); +/** + * struct cxl_mbox_cmd - A command to be submitted to hardware. + * @opcode: (input) The command set and command submitted to hardware. + * @payload_in: (input) Pointer to the input payload. + * @payload_out: (output) Pointer to the output payload. Must be allocated by + * the caller. + * @size_in: (input) Number of bytes to load from @payload_in. + * @size_out: (input) Max number of bytes loaded into @payload_out. + * (output) Number of bytes generated by the device. For fixed size + * outputs commands this is always expected to be deterministic. For + * variable sized output commands, it tells the exact number of bytes + * written. + * @return_code: (output) Error code returned from hardware. + * + * This is the primary mechanism used to send commands to the hardware. + * All the fields except @payload_* correspond exactly to the fields described in + * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and + * @payload_out are written to, and read from the Command Payload Registers + * defined in CXL 2.0 8.2.8.4.8. + */ +struct cxl_mbox_cmd { + u16 opcode; + void *payload_in; + void *payload_out; + size_t size_in; + size_t size_out; + u16 return_code; +#define CXL_MBOX_SUCCESS 0 +}; + +/* + * CXL 2.0 - Memory capacity multiplier + * See Section 8.2.9.5 + * + * Volatile, Persistent, and Partition capacities are specified to be in + * multiples of 256MB - define a multiplier to convert to/from bytes. + */ +#define CXL_CAPACITY_MULTIPLIER SZ_256M + /** * struct cxl_mem - A CXL memory device * @dev: The device associated with this CXL device. @@ -88,6 +127,7 @@ devm_cxl_add_memdev(struct cxl_mem *cxlm, * @active_persistent_bytes: sum of hard + soft persistent * @next_volatile_bytes: volatile capacity change pending device reset * @next_persistent_bytes: persistent capacity change pending device reset + * @mbox_send: @dev specific transport for transmitting mailbox commands * * See section 8.2.9.5.2 Capacity Configuration and Label Storage for * details on capacity parameters. @@ -115,5 +155,7 @@ struct cxl_mem { u64 active_persistent_bytes; u64 next_volatile_bytes; u64 next_persistent_bytes; + + int (*mbox_send)(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd); }; #endif /* __CXL_MEM_H__ */ diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 8077d907e7d3..e2f27671c6b2 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -64,45 +64,6 @@ enum opcode { CXL_MBOX_OP_MAX = 0x10000 }; -/* - * CXL 2.0 - Memory capacity multiplier - * See Section 8.2.9.5 - * - * Volatile, Persistent, and Partition capacities are specified to be in - * multiples of 256MB - define a multiplier to convert to/from bytes. - */ -#define CXL_CAPACITY_MULTIPLIER SZ_256M - -/** - * struct mbox_cmd - A command to be submitted to hardware. - * @opcode: (input) The command set and command submitted to hardware. - * @payload_in: (input) Pointer to the input payload. - * @payload_out: (output) Pointer to the output payload. Must be allocated by - * the caller. - * @size_in: (input) Number of bytes to load from @payload_in. - * @size_out: (input) Max number of bytes loaded into @payload_out. - * (output) Number of bytes generated by the device. For fixed size - * outputs commands this is always expected to be deterministic. For - * variable sized output commands, it tells the exact number of bytes - * written. - * @return_code: (output) Error code returned from hardware. - * - * This is the primary mechanism used to send commands to the hardware. - * All the fields except @payload_* correspond exactly to the fields described in - * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and - * @payload_out are written to, and read from the Command Payload Registers - * defined in CXL 2.0 8.2.8.4.8. - */ -struct mbox_cmd { - u16 opcode; - void *payload_in; - void *payload_out; - size_t size_in; - size_t size_out; - u16 return_code; -#define CXL_MBOX_SUCCESS 0 -}; - static DECLARE_RWSEM(cxl_memdev_rwsem); static struct dentry *cxl_debugfs; static bool cxl_raw_allow_all; @@ -266,7 +227,7 @@ static bool cxl_is_security_command(u16 opcode) } static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, - struct mbox_cmd *mbox_cmd) + struct cxl_mbox_cmd *mbox_cmd) { struct device *dev = cxlm->dev; @@ -297,7 +258,7 @@ static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, * mailbox. */ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, - struct mbox_cmd *mbox_cmd) + struct cxl_mbox_cmd *mbox_cmd) { void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; struct device *dev = cxlm->dev; @@ -472,6 +433,20 @@ static void cxl_mem_mbox_put(struct cxl_mem *cxlm) mutex_unlock(&cxlm->mbox_mutex); } +static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) +{ + int rc; + + rc = cxl_mem_mbox_get(cxlm); + if (rc) + return rc; + + rc = __cxl_mem_mbox_send_cmd(cxlm, cmd); + cxl_mem_mbox_put(cxlm); + + return rc; +} + /** * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. * @cxlm: The CXL memory device to communicate with. @@ -503,7 +478,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, s32 *size_out, u32 *retval) { struct device *dev = cxlm->dev; - struct mbox_cmd mbox_cmd = { + struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd->opcode, .size_in = cmd->info.size_in, .size_out = cmd->info.size_out, @@ -525,10 +500,6 @@ static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, } } - rc = cxl_mem_mbox_get(cxlm); - if (rc) - goto out; - dev_dbg(dev, "Submitting %s command for user\n" "\topcode: %x\n" @@ -539,8 +510,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, "raw command path used\n"); - rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); - cxl_mem_mbox_put(cxlm); + rc = cxlm->mbox_send(cxlm, &mbox_cmd); if (rc) goto out; @@ -874,7 +844,7 @@ static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *out, size_t out_size) { const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - struct mbox_cmd mbox_cmd = { + struct cxl_mbox_cmd mbox_cmd = { .opcode = opcode, .payload_in = in, .size_in = in_size, @@ -886,12 +856,7 @@ static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, if (out_size > cxlm->payload_size) return -E2BIG; - rc = cxl_mem_mbox_get(cxlm); - if (rc) - return rc; - - rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); - cxl_mem_mbox_put(cxlm); + rc = cxlm->mbox_send(cxlm, &mbox_cmd); if (rc) return rc; @@ -913,6 +878,7 @@ static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) { const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); + cxlm->mbox_send = cxl_pci_mbox_send; cxlm->payload_size = 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); -- cgit v1.2.3 From 4cb35f1ca05a42acbc4a3c8cf7de1029a06558d0 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:12:26 -0700 Subject: cxl/pci: Drop idr.h Commit 3d135db51024 ("cxl/core: Move memdev management to core") left this straggling include for cxl_memdev setup. Clean it up. Cc: Ben Widawsky Reported-by: Jonathan Cameron Reviewed-by: Jonathan Cameron Reviewed-by: Ben Widawsky Link: https://lore.kernel.org/r/163116434668.2460985.12264757586266849616.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index e2f27671c6b2..9d8050fdd69c 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 4faf31b43468c58e2c8c91cc5fa26f08a6b733be Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:12:32 -0700 Subject: cxl/mbox: Move mailbox and other non-PCI specific infrastructure to the core Now that the internals of mailbox operations are abstracted from the PCI specifics a bulk of infrastructure can move to the core. The CXL_PMEM driver intends to proxy LIBNVDIMM UAPI and driver requests to the equivalent functionality provided by the CXL hardware mailbox interface. In support of that intent move the mailbox implementation to a shared location for the CXL_PCI driver native IOCTL path and CXL_PMEM nvdimm command proxy path to share. A unit test framework seeks to implement a unit test backend transport for mailbox commands to communicate mocked up payloads. It can reuse all of the mailbox infrastructure minus the PCI specifics, so that also gets moved to the core. Finally with the mailbox infrastructure and ioctl handling being transport generic there is no longer any need to pass file file_operations to devm_cxl_add_memdev(). That allows all the ioctl boilerplate to move into the core for unit test reuse. No functional change intended, just code movement. Acked-by: Ben Widawsky Reported-by: kernel test robot Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163116435233.2460985.16197340449713287180.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- Documentation/driver-api/cxl/memory-devices.rst | 3 + drivers/cxl/core/Makefile | 1 + drivers/cxl/core/bus.c | 4 + drivers/cxl/core/core.h | 8 + drivers/cxl/core/mbox.c | 825 +++++++++++++++++++++ drivers/cxl/core/memdev.c | 81 ++- drivers/cxl/cxlmem.h | 78 +- drivers/cxl/pci.c | 924 +----------------------- 8 files changed, 975 insertions(+), 949 deletions(-) create mode 100644 drivers/cxl/core/mbox.c (limited to 'drivers/cxl') diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst index 50ebcda17ad0..4624951b5c38 100644 --- a/Documentation/driver-api/cxl/memory-devices.rst +++ b/Documentation/driver-api/cxl/memory-devices.rst @@ -45,6 +45,9 @@ CXL Core .. kernel-doc:: drivers/cxl/core/regs.c :doc: cxl registers +.. kernel-doc:: drivers/cxl/core/mbox.c + :doc: cxl mbox + External Interfaces =================== diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index 0fdbf3c6ac1a..07eb8e1fb8a6 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -6,3 +6,4 @@ cxl_core-y := bus.o cxl_core-y += pmem.o cxl_core-y += regs.o cxl_core-y += memdev.o +cxl_core-y += mbox.o diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 267d8042bec2..9a8fa88b634c 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -635,6 +635,8 @@ static __init int cxl_core_init(void) { int rc; + cxl_mbox_init(); + rc = cxl_memdev_init(); if (rc) return rc; @@ -646,6 +648,7 @@ static __init int cxl_core_init(void) err: cxl_memdev_exit(); + cxl_mbox_exit(); return rc; } @@ -653,6 +656,7 @@ static void cxl_core_exit(void) { bus_unregister(&cxl_bus_type); cxl_memdev_exit(); + cxl_mbox_exit(); } module_init(cxl_core_init); diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 036a3c8106b4..c85b7fbad02d 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -14,7 +14,15 @@ static inline void unregister_cxl_dev(void *dev) device_unregister(dev); } +struct cxl_send_command; +struct cxl_mem_query_commands; +int cxl_query_cmd(struct cxl_memdev *cxlmd, + struct cxl_mem_query_commands __user *q); +int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s); + int cxl_memdev_init(void); void cxl_memdev_exit(void); +void cxl_mbox_init(void); +void cxl_mbox_exit(void); #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c new file mode 100644 index 000000000000..31e183991726 --- /dev/null +++ b/drivers/cxl/core/mbox.c @@ -0,0 +1,825 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include + +#include "core.h" + +static bool cxl_raw_allow_all; + +/** + * DOC: cxl mbox + * + * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The + * implementation is used by the cxl_pci driver to initialize the device + * and implement the cxl_mem.h IOCTL UAPI. It also implements the + * backend of the cxl_pmem_ctl() transport for LIBNVDIMM. + */ + +#define cxl_for_each_cmd(cmd) \ + for ((cmd) = &cxl_mem_commands[0]; \ + ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++) + +#define CXL_CMD(_id, sin, sout, _flags) \ + [CXL_MEM_COMMAND_ID_##_id] = { \ + .info = { \ + .id = CXL_MEM_COMMAND_ID_##_id, \ + .size_in = sin, \ + .size_out = sout, \ + }, \ + .opcode = CXL_MBOX_OP_##_id, \ + .flags = _flags, \ + } + +/* + * This table defines the supported mailbox commands for the driver. This table + * is made up of a UAPI structure. Non-negative values as parameters in the + * table will be validated against the user's input. For example, if size_in is + * 0, and the user passed in 1, it is an error. + */ +static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { + CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), +#ifdef CONFIG_CXL_MEM_RAW_COMMANDS + CXL_CMD(RAW, ~0, ~0, 0), +#endif + CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_FW_INFO, 0, 0x50, 0), + CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), + CXL_CMD(GET_LSA, 0x8, ~0, 0), + CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), + CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), + CXL_CMD(SET_LSA, ~0, 0, 0), + CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), + CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), + CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), + CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), + CXL_CMD(GET_POISON, 0x10, ~0, 0), + CXL_CMD(INJECT_POISON, 0x8, 0, 0), + CXL_CMD(CLEAR_POISON, 0x48, 0, 0), + CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), + CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), + CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), +}; + +/* + * Commands that RAW doesn't permit. The rationale for each: + * + * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / + * coordination of transaction timeout values at the root bridge level. + * + * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live + * and needs to be coordinated with HDM updates. + * + * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the + * driver and any writes from userspace invalidates those contents. + * + * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes + * to the device after it is marked clean, userspace can not make that + * assertion. + * + * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that + * is kept up to date with patrol notifications and error management. + */ +static u16 cxl_disabled_raw_commands[] = { + CXL_MBOX_OP_ACTIVATE_FW, + CXL_MBOX_OP_SET_PARTITION_INFO, + CXL_MBOX_OP_SET_LSA, + CXL_MBOX_OP_SET_SHUTDOWN_STATE, + CXL_MBOX_OP_SCAN_MEDIA, + CXL_MBOX_OP_GET_SCAN_MEDIA, +}; + +/* + * Command sets that RAW doesn't permit. All opcodes in this set are + * disabled because they pass plain text security payloads over the + * user/kernel boundary. This functionality is intended to be wrapped + * behind the keys ABI which allows for encrypted payloads in the UAPI + */ +static u8 security_command_sets[] = { + 0x44, /* Sanitize */ + 0x45, /* Persistent Memory Data-at-rest Security */ + 0x46, /* Security Passthrough */ +}; + +static bool cxl_is_security_command(u16 opcode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) + if (security_command_sets[i] == (opcode >> 8)) + return true; + return false; +} + +static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) +{ + struct cxl_mem_command *c; + + cxl_for_each_cmd(c) + if (c->opcode == opcode) + return c; + + return NULL; +} + +/** + * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device. + * @cxlm: The CXL memory device to communicate with. + * @opcode: Opcode for the mailbox command. + * @in: The input payload for the mailbox command. + * @in_size: The length of the input payload + * @out: Caller allocated buffer for the output. + * @out_size: Expected size of output. + * + * Context: Any context. Will acquire and release mbox_mutex. + * Return: + * * %>=0 - Number of bytes returned in @out. + * * %-E2BIG - Payload is too large for hardware. + * * %-EBUSY - Couldn't acquire exclusive mailbox access. + * * %-EFAULT - Hardware error occurred. + * * %-ENXIO - Command completed, but device reported an error. + * * %-EIO - Unexpected output size. + * + * Mailbox commands may execute successfully yet the device itself reported an + * error. While this distinction can be useful for commands from userspace, the + * kernel will only be able to use results when both are successful. + * + * See __cxl_mem_mbox_send_cmd() + */ +int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in, + size_t in_size, void *out, size_t out_size) +{ + const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); + struct cxl_mbox_cmd mbox_cmd = { + .opcode = opcode, + .payload_in = in, + .size_in = in_size, + .size_out = out_size, + .payload_out = out, + }; + int rc; + + if (out_size > cxlm->payload_size) + return -E2BIG; + + rc = cxlm->mbox_send(cxlm, &mbox_cmd); + if (rc) + return rc; + + /* TODO: Map return code to proper kernel style errno */ + if (mbox_cmd.return_code != CXL_MBOX_SUCCESS) + return -ENXIO; + + /* + * Variable sized commands can't be validated and so it's up to the + * caller to do that if they wish. + */ + if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_mem_mbox_send_cmd); + +static bool cxl_mem_raw_command_allowed(u16 opcode) +{ + int i; + + if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) + return false; + + if (security_locked_down(LOCKDOWN_PCI_ACCESS)) + return false; + + if (cxl_raw_allow_all) + return true; + + if (cxl_is_security_command(opcode)) + return false; + + for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) + if (cxl_disabled_raw_commands[i] == opcode) + return false; + + return true; +} + +/** + * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. + * @cxlm: &struct cxl_mem device whose mailbox will be used. + * @send_cmd: &struct cxl_send_command copied in from userspace. + * @out_cmd: Sanitized and populated &struct cxl_mem_command. + * + * Return: + * * %0 - @out_cmd is ready to send. + * * %-ENOTTY - Invalid command specified. + * * %-EINVAL - Reserved fields or invalid values were used. + * * %-ENOMEM - Input or output buffer wasn't sized properly. + * * %-EPERM - Attempted to use a protected command. + * + * The result of this command is a fully validated command in @out_cmd that is + * safe to send to the hardware. + * + * See handle_mailbox_cmd_from_user() + */ +static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, + const struct cxl_send_command *send_cmd, + struct cxl_mem_command *out_cmd) +{ + const struct cxl_command_info *info; + struct cxl_mem_command *c; + + if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) + return -ENOTTY; + + /* + * The user can never specify an input payload larger than what hardware + * supports, but output can be arbitrarily large (simply write out as + * much data as the hardware provides). + */ + if (send_cmd->in.size > cxlm->payload_size) + return -EINVAL; + + /* + * Checks are bypassed for raw commands but a WARN/taint will occur + * later in the callchain + */ + if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) { + const struct cxl_mem_command temp = { + .info = { + .id = CXL_MEM_COMMAND_ID_RAW, + .flags = 0, + .size_in = send_cmd->in.size, + .size_out = send_cmd->out.size, + }, + .opcode = send_cmd->raw.opcode + }; + + if (send_cmd->raw.rsvd) + return -EINVAL; + + /* + * Unlike supported commands, the output size of RAW commands + * gets passed along without further checking, so it must be + * validated here. + */ + if (send_cmd->out.size > cxlm->payload_size) + return -EINVAL; + + if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) + return -EPERM; + + memcpy(out_cmd, &temp, sizeof(temp)); + + return 0; + } + + if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) + return -EINVAL; + + if (send_cmd->rsvd) + return -EINVAL; + + if (send_cmd->in.rsvd || send_cmd->out.rsvd) + return -EINVAL; + + /* Convert user's command into the internal representation */ + c = &cxl_mem_commands[send_cmd->id]; + info = &c->info; + + /* Check that the command is enabled for hardware */ + if (!test_bit(info->id, cxlm->enabled_cmds)) + return -ENOTTY; + + /* Check the input buffer is the expected size */ + if (info->size_in >= 0 && info->size_in != send_cmd->in.size) + return -ENOMEM; + + /* Check the output buffer is at least large enough */ + if (info->size_out >= 0 && send_cmd->out.size < info->size_out) + return -ENOMEM; + + memcpy(out_cmd, c, sizeof(*c)); + out_cmd->info.size_in = send_cmd->in.size; + /* + * XXX: out_cmd->info.size_out will be controlled by the driver, and the + * specified number of bytes @send_cmd->out.size will be copied back out + * to userspace. + */ + + return 0; +} + +#define cxl_cmd_count ARRAY_SIZE(cxl_mem_commands) + +int cxl_query_cmd(struct cxl_memdev *cxlmd, + struct cxl_mem_query_commands __user *q) +{ + struct device *dev = &cxlmd->dev; + struct cxl_mem_command *cmd; + u32 n_commands; + int j = 0; + + dev_dbg(dev, "Query IOCTL\n"); + + if (get_user(n_commands, &q->n_commands)) + return -EFAULT; + + /* returns the total number if 0 elements are requested. */ + if (n_commands == 0) + return put_user(cxl_cmd_count, &q->n_commands); + + /* + * otherwise, return max(n_commands, total commands) cxl_command_info + * structures. + */ + cxl_for_each_cmd(cmd) { + const struct cxl_command_info *info = &cmd->info; + + if (copy_to_user(&q->commands[j++], info, sizeof(*info))) + return -EFAULT; + + if (j == n_commands) + break; + } + + return 0; +} + +/** + * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. + * @cxlm: The CXL memory device to communicate with. + * @cmd: The validated command. + * @in_payload: Pointer to userspace's input payload. + * @out_payload: Pointer to userspace's output payload. + * @size_out: (Input) Max payload size to copy out. + * (Output) Payload size hardware generated. + * @retval: Hardware generated return code from the operation. + * + * Return: + * * %0 - Mailbox transaction succeeded. This implies the mailbox + * protocol completed successfully not that the operation itself + * was successful. + * * %-ENOMEM - Couldn't allocate a bounce buffer. + * * %-EFAULT - Something happened with copy_to/from_user. + * * %-EINTR - Mailbox acquisition interrupted. + * * %-EXXX - Transaction level failures. + * + * Creates the appropriate mailbox command and dispatches it on behalf of a + * userspace request. The input and output payloads are copied between + * userspace. + * + * See cxl_send_cmd(). + */ +static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, + const struct cxl_mem_command *cmd, + u64 in_payload, u64 out_payload, + s32 *size_out, u32 *retval) +{ + struct device *dev = cxlm->dev; + struct cxl_mbox_cmd mbox_cmd = { + .opcode = cmd->opcode, + .size_in = cmd->info.size_in, + .size_out = cmd->info.size_out, + }; + int rc; + + if (cmd->info.size_out) { + mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL); + if (!mbox_cmd.payload_out) + return -ENOMEM; + } + + if (cmd->info.size_in) { + mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload), + cmd->info.size_in); + if (IS_ERR(mbox_cmd.payload_in)) { + kvfree(mbox_cmd.payload_out); + return PTR_ERR(mbox_cmd.payload_in); + } + } + + dev_dbg(dev, + "Submitting %s command for user\n" + "\topcode: %x\n" + "\tsize: %ub\n", + cxl_command_names[cmd->info.id].name, mbox_cmd.opcode, + cmd->info.size_in); + + dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, + "raw command path used\n"); + + rc = cxlm->mbox_send(cxlm, &mbox_cmd); + if (rc) + goto out; + + /* + * @size_out contains the max size that's allowed to be written back out + * to userspace. While the payload may have written more output than + * this it will have to be ignored. + */ + if (mbox_cmd.size_out) { + dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out, + "Invalid return size\n"); + if (copy_to_user(u64_to_user_ptr(out_payload), + mbox_cmd.payload_out, mbox_cmd.size_out)) { + rc = -EFAULT; + goto out; + } + } + + *size_out = mbox_cmd.size_out; + *retval = mbox_cmd.return_code; + +out: + kvfree(mbox_cmd.payload_in); + kvfree(mbox_cmd.payload_out); + return rc; +} + +int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) +{ + struct cxl_mem *cxlm = cxlmd->cxlm; + struct device *dev = &cxlmd->dev; + struct cxl_send_command send; + struct cxl_mem_command c; + int rc; + + dev_dbg(dev, "Send IOCTL\n"); + + if (copy_from_user(&send, s, sizeof(send))) + return -EFAULT; + + rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c); + if (rc) + return rc; + + /* Prepare to handle a full payload for variable sized output */ + if (c.info.size_out < 0) + c.info.size_out = cxlm->payload_size; + + rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload, + send.out.payload, &send.out.size, + &send.retval); + if (rc) + return rc; + + if (copy_to_user(s, &send, sizeof(send))) + return -EFAULT; + + return 0; +} + +static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) +{ + u32 remaining = size; + u32 offset = 0; + + while (remaining) { + u32 xfer_size = min_t(u32, remaining, cxlm->payload_size); + struct cxl_mbox_get_log { + uuid_t uuid; + __le32 offset; + __le32 length; + } __packed log = { + .uuid = *uuid, + .offset = cpu_to_le32(offset), + .length = cpu_to_le32(xfer_size) + }; + int rc; + + rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log, + sizeof(log), out, xfer_size); + if (rc < 0) + return rc; + + out += xfer_size; + remaining -= xfer_size; + offset += xfer_size; + } + + return 0; +} + +/** + * cxl_walk_cel() - Walk through the Command Effects Log. + * @cxlm: Device. + * @size: Length of the Command Effects Log. + * @cel: CEL + * + * Iterate over each entry in the CEL and determine if the driver supports the + * command. If so, the command is enabled for the device and can be used later. + */ +static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) +{ + struct cel_entry { + __le16 opcode; + __le16 effect; + } __packed * cel_entry; + const int cel_entries = size / sizeof(*cel_entry); + int i; + + cel_entry = (struct cel_entry *)cel; + + for (i = 0; i < cel_entries; i++) { + u16 opcode = le16_to_cpu(cel_entry[i].opcode); + struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); + + if (!cmd) { + dev_dbg(cxlm->dev, + "Opcode 0x%04x unsupported by driver", opcode); + continue; + } + + set_bit(cmd->info.id, cxlm->enabled_cmds); + } +} + +struct cxl_mbox_get_supported_logs { + __le16 entries; + u8 rsvd[6]; + struct gsl_entry { + uuid_t uuid; + __le32 size; + } __packed entry[]; +} __packed; + +static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) +{ + struct cxl_mbox_get_supported_logs *ret; + int rc; + + ret = kvmalloc(cxlm->payload_size, GFP_KERNEL); + if (!ret) + return ERR_PTR(-ENOMEM); + + rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, + 0, ret, cxlm->payload_size); + if (rc < 0) { + kvfree(ret); + return ERR_PTR(rc); + } + + return ret; +} + +enum { + CEL_UUID, + VENDOR_DEBUG_UUID, +}; + +/* See CXL 2.0 Table 170. Get Log Input Payload */ +static const uuid_t log_uuid[] = { + [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, + 0xb1, 0x62, 0x3b, 0x3f, 0x17), + [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, + 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86), +}; + +/** + * cxl_mem_enumerate_cmds() - Enumerate commands for a device. + * @cxlm: The device. + * + * Returns 0 if enumerate completed successfully. + * + * CXL devices have optional support for certain commands. This function will + * determine the set of supported commands for the hardware and update the + * enabled_cmds bitmap in the @cxlm. + */ +int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm) +{ + struct cxl_mbox_get_supported_logs *gsl; + struct device *dev = cxlm->dev; + struct cxl_mem_command *cmd; + int i, rc; + + gsl = cxl_get_gsl(cxlm); + if (IS_ERR(gsl)) + return PTR_ERR(gsl); + + rc = -ENOENT; + for (i = 0; i < le16_to_cpu(gsl->entries); i++) { + u32 size = le32_to_cpu(gsl->entry[i].size); + uuid_t uuid = gsl->entry[i].uuid; + u8 *log; + + dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); + + if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) + continue; + + log = kvmalloc(size, GFP_KERNEL); + if (!log) { + rc = -ENOMEM; + goto out; + } + + rc = cxl_xfer_log(cxlm, &uuid, size, log); + if (rc) { + kvfree(log); + goto out; + } + + cxl_walk_cel(cxlm, size, log); + kvfree(log); + + /* In case CEL was bogus, enable some default commands. */ + cxl_for_each_cmd(cmd) + if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) + set_bit(cmd->info.id, cxlm->enabled_cmds); + + /* Found the required CEL */ + rc = 0; + } + +out: + kvfree(gsl); + return rc; +} +EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds); + +/** + * cxl_mem_get_partition_info - Get partition info + * @cxlm: cxl_mem instance to update partition info + * + * Retrieve the current partition info for the device specified. The active + * values are the current capacity in bytes. If not 0, the 'next' values are + * the pending values, in bytes, which take affect on next cold reset. + * + * Return: 0 if no error: or the result of the mailbox command. + * + * See CXL @8.2.9.5.2.1 Get Partition Info + */ +static int cxl_mem_get_partition_info(struct cxl_mem *cxlm) +{ + struct cxl_mbox_get_partition_info { + __le64 active_volatile_cap; + __le64 active_persistent_cap; + __le64 next_volatile_cap; + __le64 next_persistent_cap; + } __packed pi; + int rc; + + rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO, + NULL, 0, &pi, sizeof(pi)); + + if (rc) + return rc; + + cxlm->active_volatile_bytes = + le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; + cxlm->active_persistent_bytes = + le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; + cxlm->next_volatile_bytes = + le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; + cxlm->next_persistent_bytes = + le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; + + return 0; +} + +/** + * cxl_mem_identify() - Send the IDENTIFY command to the device. + * @cxlm: The device to identify. + * + * Return: 0 if identify was executed successfully. + * + * This will dispatch the identify command to the device and on success populate + * structures to be exported to sysfs. + */ +int cxl_mem_identify(struct cxl_mem *cxlm) +{ + /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ + struct cxl_mbox_identify { + char fw_revision[0x10]; + __le64 total_capacity; + __le64 volatile_capacity; + __le64 persistent_capacity; + __le64 partition_align; + __le16 info_event_log_size; + __le16 warning_event_log_size; + __le16 failure_event_log_size; + __le16 fatal_event_log_size; + __le32 lsa_size; + u8 poison_list_max_mer[3]; + __le16 inject_poison_limit; + u8 poison_caps; + u8 qos_telemetry_caps; + } __packed id; + int rc; + + rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, + sizeof(id)); + if (rc < 0) + return rc; + + cxlm->total_bytes = + le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER; + cxlm->volatile_only_bytes = + le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER; + cxlm->persistent_only_bytes = + le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER; + cxlm->partition_align_bytes = + le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER; + + dev_dbg(cxlm->dev, + "Identify Memory Device\n" + " total_bytes = %#llx\n" + " volatile_only_bytes = %#llx\n" + " persistent_only_bytes = %#llx\n" + " partition_align_bytes = %#llx\n", + cxlm->total_bytes, cxlm->volatile_only_bytes, + cxlm->persistent_only_bytes, cxlm->partition_align_bytes); + + cxlm->lsa_size = le32_to_cpu(id.lsa_size); + memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_mem_identify); + +int cxl_mem_create_range_info(struct cxl_mem *cxlm) +{ + int rc; + + if (cxlm->partition_align_bytes == 0) { + cxlm->ram_range.start = 0; + cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; + cxlm->pmem_range.start = cxlm->volatile_only_bytes; + cxlm->pmem_range.end = cxlm->volatile_only_bytes + + cxlm->persistent_only_bytes - 1; + return 0; + } + + rc = cxl_mem_get_partition_info(cxlm); + if (rc) { + dev_err(cxlm->dev, "Failed to query partition information\n"); + return rc; + } + + dev_dbg(cxlm->dev, + "Get Partition Info\n" + " active_volatile_bytes = %#llx\n" + " active_persistent_bytes = %#llx\n" + " next_volatile_bytes = %#llx\n" + " next_persistent_bytes = %#llx\n", + cxlm->active_volatile_bytes, cxlm->active_persistent_bytes, + cxlm->next_volatile_bytes, cxlm->next_persistent_bytes); + + cxlm->ram_range.start = 0; + cxlm->ram_range.end = cxlm->active_volatile_bytes - 1; + + cxlm->pmem_range.start = cxlm->active_volatile_bytes; + cxlm->pmem_range.end = + cxlm->active_volatile_bytes + cxlm->active_persistent_bytes - 1; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_mem_create_range_info); + +struct cxl_mem *cxl_mem_create(struct device *dev) +{ + struct cxl_mem *cxlm; + + cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL); + if (!cxlm) { + dev_err(dev, "No memory available\n"); + return ERR_PTR(-ENOMEM); + } + + mutex_init(&cxlm->mbox_mutex); + cxlm->dev = dev; + cxlm->enabled_cmds = + devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), + sizeof(unsigned long), + GFP_KERNEL | __GFP_ZERO); + if (!cxlm->enabled_cmds) { + dev_err(dev, "No memory available for bitmap\n"); + return ERR_PTR(-ENOMEM); + } + + return cxlm; +} +EXPORT_SYMBOL_GPL(cxl_mem_create); + +static struct dentry *cxl_debugfs; + +void __init cxl_mbox_init(void) +{ + struct dentry *mbox_debugfs; + + cxl_debugfs = debugfs_create_dir("cxl", NULL); + mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); + debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, + &cxl_raw_allow_all); +} + +void cxl_mbox_exit(void) +{ + debugfs_remove_recursive(cxl_debugfs); +} diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 331ef7d6c598..df2ba87238c2 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -8,6 +8,8 @@ #include #include "core.h" +static DECLARE_RWSEM(cxl_memdev_rwsem); + /* * An entire PCI topology full of devices should be enough for any * config @@ -132,16 +134,21 @@ static const struct device_type cxl_memdev_type = { .groups = cxl_memdev_attribute_groups, }; +static void cxl_memdev_shutdown(struct device *dev) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + + down_write(&cxl_memdev_rwsem); + cxlmd->cxlm = NULL; + up_write(&cxl_memdev_rwsem); +} + static void cxl_memdev_unregister(void *_cxlmd) { struct cxl_memdev *cxlmd = _cxlmd; struct device *dev = &cxlmd->dev; - struct cdev *cdev = &cxlmd->cdev; - const struct cdevm_file_operations *cdevm_fops; - - cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops); - cdevm_fops->shutdown(dev); + cxl_memdev_shutdown(dev); cdev_device_del(&cxlmd->cdev, dev); put_device(dev); } @@ -180,16 +187,72 @@ err: return ERR_PTR(rc); } +static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case CXL_MEM_QUERY_COMMANDS: + return cxl_query_cmd(cxlmd, (void __user *)arg); + case CXL_MEM_SEND_COMMAND: + return cxl_send_cmd(cxlmd, (void __user *)arg); + default: + return -ENOTTY; + } +} + +static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct cxl_memdev *cxlmd = file->private_data; + int rc = -ENXIO; + + down_read(&cxl_memdev_rwsem); + if (cxlmd->cxlm) + rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); + up_read(&cxl_memdev_rwsem); + + return rc; +} + +static int cxl_memdev_open(struct inode *inode, struct file *file) +{ + struct cxl_memdev *cxlmd = + container_of(inode->i_cdev, typeof(*cxlmd), cdev); + + get_device(&cxlmd->dev); + file->private_data = cxlmd; + + return 0; +} + +static int cxl_memdev_release_file(struct inode *inode, struct file *file) +{ + struct cxl_memdev *cxlmd = + container_of(inode->i_cdev, typeof(*cxlmd), cdev); + + put_device(&cxlmd->dev); + + return 0; +} + +static const struct file_operations cxl_memdev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = cxl_memdev_ioctl, + .open = cxl_memdev_open, + .release = cxl_memdev_release_file, + .compat_ioctl = compat_ptr_ioctl, + .llseek = noop_llseek, +}; + struct cxl_memdev * -devm_cxl_add_memdev(struct cxl_mem *cxlm, - const struct cdevm_file_operations *cdevm_fops) +devm_cxl_add_memdev(struct cxl_mem *cxlm) { struct cxl_memdev *cxlmd; struct device *dev; struct cdev *cdev; int rc; - cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops); + cxlmd = cxl_memdev_alloc(cxlm, &cxl_memdev_fops); if (IS_ERR(cxlmd)) return cxlmd; @@ -219,7 +282,7 @@ err: * The cdev was briefly live, shutdown any ioctl operations that * saw that state. */ - cdevm_fops->shutdown(dev); + cxl_memdev_shutdown(dev); put_device(dev); return ERR_PTR(rc); } diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index c142532db8cb..f5d3326a1d0b 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -2,6 +2,7 @@ /* Copyright(c) 2020-2021 Intel Corporation. */ #ifndef __CXL_MEM_H__ #define __CXL_MEM_H__ +#include #include #include "cxl.h" @@ -28,21 +29,6 @@ (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \ CXLMDEV_RESET_NEEDED_NOT) -/** - * struct cdevm_file_operations - devm coordinated cdev file operations - * @fops: file operations that are synchronized against @shutdown - * @shutdown: disconnect driver data - * - * @shutdown is invoked in the devres release path to disconnect any - * driver instance data from @dev. It assumes synchronization with any - * fops operation that requires driver data. After @shutdown an - * operation may only reference @device data. - */ -struct cdevm_file_operations { - struct file_operations fops; - void (*shutdown)(struct device *dev); -}; - /** * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device * @dev: driver core device object @@ -62,9 +48,7 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev) return container_of(dev, struct cxl_memdev, dev); } -struct cxl_memdev * -devm_cxl_add_memdev(struct cxl_mem *cxlm, - const struct cdevm_file_operations *cdevm_fops); +struct cxl_memdev *devm_cxl_add_memdev(struct cxl_mem *cxlm); /** * struct cxl_mbox_cmd - A command to be submitted to hardware. @@ -158,4 +142,62 @@ struct cxl_mem { int (*mbox_send)(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd); }; + +enum cxl_opcode { + CXL_MBOX_OP_INVALID = 0x0000, + CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID, + CXL_MBOX_OP_GET_FW_INFO = 0x0200, + CXL_MBOX_OP_ACTIVATE_FW = 0x0202, + CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, + CXL_MBOX_OP_GET_LOG = 0x0401, + CXL_MBOX_OP_IDENTIFY = 0x4000, + CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, + CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, + CXL_MBOX_OP_GET_LSA = 0x4102, + CXL_MBOX_OP_SET_LSA = 0x4103, + CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200, + CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201, + CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202, + CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203, + CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204, + CXL_MBOX_OP_GET_POISON = 0x4300, + CXL_MBOX_OP_INJECT_POISON = 0x4301, + CXL_MBOX_OP_CLEAR_POISON = 0x4302, + CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, + CXL_MBOX_OP_SCAN_MEDIA = 0x4304, + CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, + CXL_MBOX_OP_MAX = 0x10000 +}; + +/** + * struct cxl_mem_command - Driver representation of a memory device command + * @info: Command information as it exists for the UAPI + * @opcode: The actual bits used for the mailbox protocol + * @flags: Set of flags effecting driver behavior. + * + * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag + * will be enabled by the driver regardless of what hardware may have + * advertised. + * + * The cxl_mem_command is the driver's internal representation of commands that + * are supported by the driver. Some of these commands may not be supported by + * the hardware. The driver will use @info to validate the fields passed in by + * the user then submit the @opcode to the hardware. + * + * See struct cxl_command_info. + */ +struct cxl_mem_command { + struct cxl_command_info info; + enum cxl_opcode opcode; + u32 flags; +#define CXL_CMD_FLAG_NONE 0 +#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) +}; + +int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in, + size_t in_size, void *out, size_t out_size); +int cxl_mem_identify(struct cxl_mem *cxlm); +int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm); +int cxl_mem_create_range_info(struct cxl_mem *cxlm); +struct cxl_mem *cxl_mem_create(struct device *dev); #endif /* __CXL_MEM_H__ */ diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 9d8050fdd69c..c9f2ac134f4d 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1,16 +1,12 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ -#include -#include -#include +#include #include #include #include #include -#include #include #include -#include #include "cxlmem.h" #include "pci.h" #include "cxl.h" @@ -37,162 +33,6 @@ /* CXL 2.0 - 8.2.8.4 */ #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) -enum opcode { - CXL_MBOX_OP_INVALID = 0x0000, - CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID, - CXL_MBOX_OP_GET_FW_INFO = 0x0200, - CXL_MBOX_OP_ACTIVATE_FW = 0x0202, - CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, - CXL_MBOX_OP_GET_LOG = 0x0401, - CXL_MBOX_OP_IDENTIFY = 0x4000, - CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, - CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, - CXL_MBOX_OP_GET_LSA = 0x4102, - CXL_MBOX_OP_SET_LSA = 0x4103, - CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200, - CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201, - CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202, - CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203, - CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204, - CXL_MBOX_OP_GET_POISON = 0x4300, - CXL_MBOX_OP_INJECT_POISON = 0x4301, - CXL_MBOX_OP_CLEAR_POISON = 0x4302, - CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, - CXL_MBOX_OP_SCAN_MEDIA = 0x4304, - CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, - CXL_MBOX_OP_MAX = 0x10000 -}; - -static DECLARE_RWSEM(cxl_memdev_rwsem); -static struct dentry *cxl_debugfs; -static bool cxl_raw_allow_all; - -enum { - CEL_UUID, - VENDOR_DEBUG_UUID, -}; - -/* See CXL 2.0 Table 170. Get Log Input Payload */ -static const uuid_t log_uuid[] = { - [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, - 0xb1, 0x62, 0x3b, 0x3f, 0x17), - [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, - 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86), -}; - -/** - * struct cxl_mem_command - Driver representation of a memory device command - * @info: Command information as it exists for the UAPI - * @opcode: The actual bits used for the mailbox protocol - * @flags: Set of flags effecting driver behavior. - * - * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag - * will be enabled by the driver regardless of what hardware may have - * advertised. - * - * The cxl_mem_command is the driver's internal representation of commands that - * are supported by the driver. Some of these commands may not be supported by - * the hardware. The driver will use @info to validate the fields passed in by - * the user then submit the @opcode to the hardware. - * - * See struct cxl_command_info. - */ -struct cxl_mem_command { - struct cxl_command_info info; - enum opcode opcode; - u32 flags; -#define CXL_CMD_FLAG_NONE 0 -#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) -}; - -#define CXL_CMD(_id, sin, sout, _flags) \ - [CXL_MEM_COMMAND_ID_##_id] = { \ - .info = { \ - .id = CXL_MEM_COMMAND_ID_##_id, \ - .size_in = sin, \ - .size_out = sout, \ - }, \ - .opcode = CXL_MBOX_OP_##_id, \ - .flags = _flags, \ - } - -/* - * This table defines the supported mailbox commands for the driver. This table - * is made up of a UAPI structure. Non-negative values as parameters in the - * table will be validated against the user's input. For example, if size_in is - * 0, and the user passed in 1, it is an error. - */ -static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = { - CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), -#ifdef CONFIG_CXL_MEM_RAW_COMMANDS - CXL_CMD(RAW, ~0, ~0, 0), -#endif - CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), - CXL_CMD(GET_FW_INFO, 0, 0x50, 0), - CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), - CXL_CMD(GET_LSA, 0x8, ~0, 0), - CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), - CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), - CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), - CXL_CMD(SET_LSA, ~0, 0, 0), - CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), - CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), - CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), - CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), - CXL_CMD(GET_POISON, 0x10, ~0, 0), - CXL_CMD(INJECT_POISON, 0x8, 0, 0), - CXL_CMD(CLEAR_POISON, 0x48, 0, 0), - CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), - CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), - CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), -}; - -/* - * Commands that RAW doesn't permit. The rationale for each: - * - * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / - * coordination of transaction timeout values at the root bridge level. - * - * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live - * and needs to be coordinated with HDM updates. - * - * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the - * driver and any writes from userspace invalidates those contents. - * - * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes - * to the device after it is marked clean, userspace can not make that - * assertion. - * - * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that - * is kept up to date with patrol notifications and error management. - */ -static u16 cxl_disabled_raw_commands[] = { - CXL_MBOX_OP_ACTIVATE_FW, - CXL_MBOX_OP_SET_PARTITION_INFO, - CXL_MBOX_OP_SET_LSA, - CXL_MBOX_OP_SET_SHUTDOWN_STATE, - CXL_MBOX_OP_SCAN_MEDIA, - CXL_MBOX_OP_GET_SCAN_MEDIA, -}; - -/* - * Command sets that RAW doesn't permit. All opcodes in this set are - * disabled because they pass plain text security payloads over the - * user/kernel boundary. This functionality is intended to be wrapped - * behind the keys ABI which allows for encrypted payloads in the UAPI - */ -static u8 security_command_sets[] = { - 0x44, /* Sanitize */ - 0x45, /* Persistent Memory Data-at-rest Security */ - 0x46, /* Security Passthrough */ -}; - -#define cxl_for_each_cmd(cmd) \ - for ((cmd) = &mem_commands[0]; \ - ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++) - -#define cxl_cmd_count ARRAY_SIZE(mem_commands) - static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) { const unsigned long start = jiffies; @@ -215,16 +55,6 @@ static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) return 0; } -static bool cxl_is_security_command(u16 opcode) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) - if (security_command_sets[i] == (opcode >> 8)) - return true; - return false; -} - static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, struct cxl_mbox_cmd *mbox_cmd) { @@ -446,433 +276,6 @@ static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) return rc; } -/** - * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. - * @cxlm: The CXL memory device to communicate with. - * @cmd: The validated command. - * @in_payload: Pointer to userspace's input payload. - * @out_payload: Pointer to userspace's output payload. - * @size_out: (Input) Max payload size to copy out. - * (Output) Payload size hardware generated. - * @retval: Hardware generated return code from the operation. - * - * Return: - * * %0 - Mailbox transaction succeeded. This implies the mailbox - * protocol completed successfully not that the operation itself - * was successful. - * * %-ENOMEM - Couldn't allocate a bounce buffer. - * * %-EFAULT - Something happened with copy_to/from_user. - * * %-EINTR - Mailbox acquisition interrupted. - * * %-EXXX - Transaction level failures. - * - * Creates the appropriate mailbox command and dispatches it on behalf of a - * userspace request. The input and output payloads are copied between - * userspace. - * - * See cxl_send_cmd(). - */ -static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, - const struct cxl_mem_command *cmd, - u64 in_payload, u64 out_payload, - s32 *size_out, u32 *retval) -{ - struct device *dev = cxlm->dev; - struct cxl_mbox_cmd mbox_cmd = { - .opcode = cmd->opcode, - .size_in = cmd->info.size_in, - .size_out = cmd->info.size_out, - }; - int rc; - - if (cmd->info.size_out) { - mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL); - if (!mbox_cmd.payload_out) - return -ENOMEM; - } - - if (cmd->info.size_in) { - mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload), - cmd->info.size_in); - if (IS_ERR(mbox_cmd.payload_in)) { - kvfree(mbox_cmd.payload_out); - return PTR_ERR(mbox_cmd.payload_in); - } - } - - dev_dbg(dev, - "Submitting %s command for user\n" - "\topcode: %x\n" - "\tsize: %ub\n", - cxl_command_names[cmd->info.id].name, mbox_cmd.opcode, - cmd->info.size_in); - - dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, - "raw command path used\n"); - - rc = cxlm->mbox_send(cxlm, &mbox_cmd); - if (rc) - goto out; - - /* - * @size_out contains the max size that's allowed to be written back out - * to userspace. While the payload may have written more output than - * this it will have to be ignored. - */ - if (mbox_cmd.size_out) { - dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out, - "Invalid return size\n"); - if (copy_to_user(u64_to_user_ptr(out_payload), - mbox_cmd.payload_out, mbox_cmd.size_out)) { - rc = -EFAULT; - goto out; - } - } - - *size_out = mbox_cmd.size_out; - *retval = mbox_cmd.return_code; - -out: - kvfree(mbox_cmd.payload_in); - kvfree(mbox_cmd.payload_out); - return rc; -} - -static bool cxl_mem_raw_command_allowed(u16 opcode) -{ - int i; - - if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) - return false; - - if (security_locked_down(LOCKDOWN_PCI_ACCESS)) - return false; - - if (cxl_raw_allow_all) - return true; - - if (cxl_is_security_command(opcode)) - return false; - - for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) - if (cxl_disabled_raw_commands[i] == opcode) - return false; - - return true; -} - -/** - * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. - * @cxlm: &struct cxl_mem device whose mailbox will be used. - * @send_cmd: &struct cxl_send_command copied in from userspace. - * @out_cmd: Sanitized and populated &struct cxl_mem_command. - * - * Return: - * * %0 - @out_cmd is ready to send. - * * %-ENOTTY - Invalid command specified. - * * %-EINVAL - Reserved fields or invalid values were used. - * * %-ENOMEM - Input or output buffer wasn't sized properly. - * * %-EPERM - Attempted to use a protected command. - * - * The result of this command is a fully validated command in @out_cmd that is - * safe to send to the hardware. - * - * See handle_mailbox_cmd_from_user() - */ -static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, - const struct cxl_send_command *send_cmd, - struct cxl_mem_command *out_cmd) -{ - const struct cxl_command_info *info; - struct cxl_mem_command *c; - - if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) - return -ENOTTY; - - /* - * The user can never specify an input payload larger than what hardware - * supports, but output can be arbitrarily large (simply write out as - * much data as the hardware provides). - */ - if (send_cmd->in.size > cxlm->payload_size) - return -EINVAL; - - /* - * Checks are bypassed for raw commands but a WARN/taint will occur - * later in the callchain - */ - if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) { - const struct cxl_mem_command temp = { - .info = { - .id = CXL_MEM_COMMAND_ID_RAW, - .flags = 0, - .size_in = send_cmd->in.size, - .size_out = send_cmd->out.size, - }, - .opcode = send_cmd->raw.opcode - }; - - if (send_cmd->raw.rsvd) - return -EINVAL; - - /* - * Unlike supported commands, the output size of RAW commands - * gets passed along without further checking, so it must be - * validated here. - */ - if (send_cmd->out.size > cxlm->payload_size) - return -EINVAL; - - if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) - return -EPERM; - - memcpy(out_cmd, &temp, sizeof(temp)); - - return 0; - } - - if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) - return -EINVAL; - - if (send_cmd->rsvd) - return -EINVAL; - - if (send_cmd->in.rsvd || send_cmd->out.rsvd) - return -EINVAL; - - /* Convert user's command into the internal representation */ - c = &mem_commands[send_cmd->id]; - info = &c->info; - - /* Check that the command is enabled for hardware */ - if (!test_bit(info->id, cxlm->enabled_cmds)) - return -ENOTTY; - - /* Check the input buffer is the expected size */ - if (info->size_in >= 0 && info->size_in != send_cmd->in.size) - return -ENOMEM; - - /* Check the output buffer is at least large enough */ - if (info->size_out >= 0 && send_cmd->out.size < info->size_out) - return -ENOMEM; - - memcpy(out_cmd, c, sizeof(*c)); - out_cmd->info.size_in = send_cmd->in.size; - /* - * XXX: out_cmd->info.size_out will be controlled by the driver, and the - * specified number of bytes @send_cmd->out.size will be copied back out - * to userspace. - */ - - return 0; -} - -static int cxl_query_cmd(struct cxl_memdev *cxlmd, - struct cxl_mem_query_commands __user *q) -{ - struct device *dev = &cxlmd->dev; - struct cxl_mem_command *cmd; - u32 n_commands; - int j = 0; - - dev_dbg(dev, "Query IOCTL\n"); - - if (get_user(n_commands, &q->n_commands)) - return -EFAULT; - - /* returns the total number if 0 elements are requested. */ - if (n_commands == 0) - return put_user(cxl_cmd_count, &q->n_commands); - - /* - * otherwise, return max(n_commands, total commands) cxl_command_info - * structures. - */ - cxl_for_each_cmd(cmd) { - const struct cxl_command_info *info = &cmd->info; - - if (copy_to_user(&q->commands[j++], info, sizeof(*info))) - return -EFAULT; - - if (j == n_commands) - break; - } - - return 0; -} - -static int cxl_send_cmd(struct cxl_memdev *cxlmd, - struct cxl_send_command __user *s) -{ - struct cxl_mem *cxlm = cxlmd->cxlm; - struct device *dev = &cxlmd->dev; - struct cxl_send_command send; - struct cxl_mem_command c; - int rc; - - dev_dbg(dev, "Send IOCTL\n"); - - if (copy_from_user(&send, s, sizeof(send))) - return -EFAULT; - - rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c); - if (rc) - return rc; - - /* Prepare to handle a full payload for variable sized output */ - if (c.info.size_out < 0) - c.info.size_out = cxlm->payload_size; - - rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload, - send.out.payload, &send.out.size, - &send.retval); - if (rc) - return rc; - - if (copy_to_user(s, &send, sizeof(send))) - return -EFAULT; - - return 0; -} - -static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, - unsigned long arg) -{ - switch (cmd) { - case CXL_MEM_QUERY_COMMANDS: - return cxl_query_cmd(cxlmd, (void __user *)arg); - case CXL_MEM_SEND_COMMAND: - return cxl_send_cmd(cxlmd, (void __user *)arg); - default: - return -ENOTTY; - } -} - -static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct cxl_memdev *cxlmd = file->private_data; - int rc = -ENXIO; - - down_read(&cxl_memdev_rwsem); - if (cxlmd->cxlm) - rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); - up_read(&cxl_memdev_rwsem); - - return rc; -} - -static int cxl_memdev_open(struct inode *inode, struct file *file) -{ - struct cxl_memdev *cxlmd = - container_of(inode->i_cdev, typeof(*cxlmd), cdev); - - get_device(&cxlmd->dev); - file->private_data = cxlmd; - - return 0; -} - -static int cxl_memdev_release_file(struct inode *inode, struct file *file) -{ - struct cxl_memdev *cxlmd = - container_of(inode->i_cdev, typeof(*cxlmd), cdev); - - put_device(&cxlmd->dev); - - return 0; -} - -static void cxl_memdev_shutdown(struct device *dev) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - - down_write(&cxl_memdev_rwsem); - cxlmd->cxlm = NULL; - up_write(&cxl_memdev_rwsem); -} - -static const struct cdevm_file_operations cxl_memdev_fops = { - .fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = cxl_memdev_ioctl, - .open = cxl_memdev_open, - .release = cxl_memdev_release_file, - .compat_ioctl = compat_ptr_ioctl, - .llseek = noop_llseek, - }, - .shutdown = cxl_memdev_shutdown, -}; - -static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode) -{ - struct cxl_mem_command *c; - - cxl_for_each_cmd(c) - if (c->opcode == opcode) - return c; - - return NULL; -} - -/** - * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device. - * @cxlm: The CXL memory device to communicate with. - * @opcode: Opcode for the mailbox command. - * @in: The input payload for the mailbox command. - * @in_size: The length of the input payload - * @out: Caller allocated buffer for the output. - * @out_size: Expected size of output. - * - * Context: Any context. Will acquire and release mbox_mutex. - * Return: - * * %>=0 - Number of bytes returned in @out. - * * %-E2BIG - Payload is too large for hardware. - * * %-EBUSY - Couldn't acquire exclusive mailbox access. - * * %-EFAULT - Hardware error occurred. - * * %-ENXIO - Command completed, but device reported an error. - * * %-EIO - Unexpected output size. - * - * Mailbox commands may execute successfully yet the device itself reported an - * error. While this distinction can be useful for commands from userspace, the - * kernel will only be able to use results when both are successful. - * - * See __cxl_mem_mbox_send_cmd() - */ -static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, - void *in, size_t in_size, - void *out, size_t out_size) -{ - const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - struct cxl_mbox_cmd mbox_cmd = { - .opcode = opcode, - .payload_in = in, - .size_in = in_size, - .size_out = out_size, - .payload_out = out, - }; - int rc; - - if (out_size > cxlm->payload_size) - return -E2BIG; - - rc = cxlm->mbox_send(cxlm, &mbox_cmd); - if (rc) - return rc; - - /* TODO: Map return code to proper kernel style errno */ - if (mbox_cmd.return_code != CXL_MBOX_SUCCESS) - return -ENXIO; - - /* - * Variable sized commands can't be validated and so it's up to the - * caller to do that if they wish. - */ - if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) - return -EIO; - - return 0; -} - static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) { const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); @@ -901,30 +304,6 @@ static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) return 0; } -static struct cxl_mem *cxl_mem_create(struct device *dev) -{ - struct cxl_mem *cxlm; - - cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL); - if (!cxlm) { - dev_err(dev, "No memory available\n"); - return ERR_PTR(-ENOMEM); - } - - mutex_init(&cxlm->mbox_mutex); - cxlm->dev = dev; - cxlm->enabled_cmds = - devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), - sizeof(unsigned long), - GFP_KERNEL | __GFP_ZERO); - if (!cxlm->enabled_cmds) { - dev_err(dev, "No memory available for bitmap\n"); - return ERR_PTR(-ENOMEM); - } - - return cxlm; -} - static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, u8 bar, u64 offset) { @@ -1132,298 +511,6 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) return ret; } -static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) -{ - u32 remaining = size; - u32 offset = 0; - - while (remaining) { - u32 xfer_size = min_t(u32, remaining, cxlm->payload_size); - struct cxl_mbox_get_log { - uuid_t uuid; - __le32 offset; - __le32 length; - } __packed log = { - .uuid = *uuid, - .offset = cpu_to_le32(offset), - .length = cpu_to_le32(xfer_size) - }; - int rc; - - rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log, - sizeof(log), out, xfer_size); - if (rc < 0) - return rc; - - out += xfer_size; - remaining -= xfer_size; - offset += xfer_size; - } - - return 0; -} - -/** - * cxl_walk_cel() - Walk through the Command Effects Log. - * @cxlm: Device. - * @size: Length of the Command Effects Log. - * @cel: CEL - * - * Iterate over each entry in the CEL and determine if the driver supports the - * command. If so, the command is enabled for the device and can be used later. - */ -static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) -{ - struct cel_entry { - __le16 opcode; - __le16 effect; - } __packed * cel_entry; - const int cel_entries = size / sizeof(*cel_entry); - int i; - - cel_entry = (struct cel_entry *)cel; - - for (i = 0; i < cel_entries; i++) { - u16 opcode = le16_to_cpu(cel_entry[i].opcode); - struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - - if (!cmd) { - dev_dbg(cxlm->dev, - "Opcode 0x%04x unsupported by driver", opcode); - continue; - } - - set_bit(cmd->info.id, cxlm->enabled_cmds); - } -} - -struct cxl_mbox_get_supported_logs { - __le16 entries; - u8 rsvd[6]; - struct gsl_entry { - uuid_t uuid; - __le32 size; - } __packed entry[]; -} __packed; - -static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) -{ - struct cxl_mbox_get_supported_logs *ret; - int rc; - - ret = kvmalloc(cxlm->payload_size, GFP_KERNEL); - if (!ret) - return ERR_PTR(-ENOMEM); - - rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, - 0, ret, cxlm->payload_size); - if (rc < 0) { - kvfree(ret); - return ERR_PTR(rc); - } - - return ret; -} - -/** - * cxl_mem_get_partition_info - Get partition info - * @cxlm: cxl_mem instance to update partition info - * - * Retrieve the current partition info for the device specified. If not 0, the - * 'next' values are pending and take affect on next cold reset. - * - * Return: 0 if no error: or the result of the mailbox command. - * - * See CXL @8.2.9.5.2.1 Get Partition Info - */ -static int cxl_mem_get_partition_info(struct cxl_mem *cxlm) -{ - struct cxl_mbox_get_partition_info { - __le64 active_volatile_cap; - __le64 active_persistent_cap; - __le64 next_volatile_cap; - __le64 next_persistent_cap; - } __packed pi; - int rc; - - rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO, - NULL, 0, &pi, sizeof(pi)); - if (rc) - return rc; - - cxlm->active_volatile_bytes = - le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; - cxlm->active_persistent_bytes = - le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; - cxlm->next_volatile_bytes = - le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; - cxlm->next_persistent_bytes = - le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; - - return 0; -} - -/** - * cxl_mem_enumerate_cmds() - Enumerate commands for a device. - * @cxlm: The device. - * - * Returns 0 if enumerate completed successfully. - * - * CXL devices have optional support for certain commands. This function will - * determine the set of supported commands for the hardware and update the - * enabled_cmds bitmap in the @cxlm. - */ -static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm) -{ - struct cxl_mbox_get_supported_logs *gsl; - struct device *dev = cxlm->dev; - struct cxl_mem_command *cmd; - int i, rc; - - gsl = cxl_get_gsl(cxlm); - if (IS_ERR(gsl)) - return PTR_ERR(gsl); - - rc = -ENOENT; - for (i = 0; i < le16_to_cpu(gsl->entries); i++) { - u32 size = le32_to_cpu(gsl->entry[i].size); - uuid_t uuid = gsl->entry[i].uuid; - u8 *log; - - dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); - - if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) - continue; - - log = kvmalloc(size, GFP_KERNEL); - if (!log) { - rc = -ENOMEM; - goto out; - } - - rc = cxl_xfer_log(cxlm, &uuid, size, log); - if (rc) { - kvfree(log); - goto out; - } - - cxl_walk_cel(cxlm, size, log); - kvfree(log); - - /* In case CEL was bogus, enable some default commands. */ - cxl_for_each_cmd(cmd) - if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) - set_bit(cmd->info.id, cxlm->enabled_cmds); - - /* Found the required CEL */ - rc = 0; - } - -out: - kvfree(gsl); - return rc; -} - -/** - * cxl_mem_identify() - Send the IDENTIFY command to the device. - * @cxlm: The device to identify. - * - * Return: 0 if identify was executed successfully. - * - * This will dispatch the identify command to the device and on success populate - * structures to be exported to sysfs. - */ -static int cxl_mem_identify(struct cxl_mem *cxlm) -{ - /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ - struct cxl_mbox_identify { - char fw_revision[0x10]; - __le64 total_capacity; - __le64 volatile_capacity; - __le64 persistent_capacity; - __le64 partition_align; - __le16 info_event_log_size; - __le16 warning_event_log_size; - __le16 failure_event_log_size; - __le16 fatal_event_log_size; - __le32 lsa_size; - u8 poison_list_max_mer[3]; - __le16 inject_poison_limit; - u8 poison_caps; - u8 qos_telemetry_caps; - } __packed id; - int rc; - - rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, - sizeof(id)); - if (rc < 0) - return rc; - - cxlm->total_bytes = le64_to_cpu(id.total_capacity); - cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER; - - cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity); - cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER; - - cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity); - cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER; - - cxlm->partition_align_bytes = le64_to_cpu(id.partition_align); - cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER; - - dev_dbg(cxlm->dev, - "Identify Memory Device\n" - " total_bytes = %#llx\n" - " volatile_only_bytes = %#llx\n" - " persistent_only_bytes = %#llx\n" - " partition_align_bytes = %#llx\n", - cxlm->total_bytes, cxlm->volatile_only_bytes, - cxlm->persistent_only_bytes, cxlm->partition_align_bytes); - - cxlm->lsa_size = le32_to_cpu(id.lsa_size); - memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); - - return 0; -} - -static int cxl_mem_create_range_info(struct cxl_mem *cxlm) -{ - int rc; - - if (cxlm->partition_align_bytes == 0) { - cxlm->ram_range.start = 0; - cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; - cxlm->pmem_range.start = cxlm->volatile_only_bytes; - cxlm->pmem_range.end = cxlm->volatile_only_bytes + - cxlm->persistent_only_bytes - 1; - return 0; - } - - rc = cxl_mem_get_partition_info(cxlm); - if (rc < 0) { - dev_err(cxlm->dev, "Failed to query partition information\n"); - return rc; - } - - dev_dbg(cxlm->dev, - "Get Partition Info\n" - " active_volatile_bytes = %#llx\n" - " active_persistent_bytes = %#llx\n" - " next_volatile_bytes = %#llx\n" - " next_persistent_bytes = %#llx\n", - cxlm->active_volatile_bytes, cxlm->active_persistent_bytes, - cxlm->next_volatile_bytes, cxlm->next_persistent_bytes); - - cxlm->ram_range.start = 0; - cxlm->ram_range.end = cxlm->active_volatile_bytes - 1; - - cxlm->pmem_range.start = cxlm->active_volatile_bytes; - cxlm->pmem_range.end = cxlm->active_volatile_bytes + - cxlm->active_persistent_bytes - 1; - - return 0; -} - static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct cxl_memdev *cxlmd; @@ -1458,7 +545,7 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - cxlmd = devm_cxl_add_memdev(cxlm, &cxl_memdev_fops); + cxlmd = devm_cxl_add_memdev(cxlm); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); @@ -1486,7 +573,6 @@ static struct pci_driver cxl_mem_driver = { static __init int cxl_mem_init(void) { - struct dentry *mbox_debugfs; int rc; /* Double check the anonymous union trickery in struct cxl_regs */ @@ -1497,17 +583,11 @@ static __init int cxl_mem_init(void) if (rc) return rc; - cxl_debugfs = debugfs_create_dir("cxl", NULL); - mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); - debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, - &cxl_raw_allow_all); - return 0; } static __exit void cxl_mem_exit(void) { - debugfs_remove_recursive(cxl_debugfs); pci_unregister_driver(&cxl_mem_driver); } -- cgit v1.2.3 From 5a2328f4e872a5bcbb2ff790497f000e8f79b152 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:12:38 -0700 Subject: cxl/pci: Use module_pci_driver Now that cxl_mem_{init,exit} no longer need to manage debugfs, switch back to the smaller form of the boiler plate. Acked-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163116435825.2460985.7201322215431441130.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index c9f2ac134f4d..27f75b5a2ee2 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -517,6 +517,13 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct cxl_mem *cxlm; int rc; + /* + * Double check the anonymous union trickery in struct cxl_regs + * FIXME switch to struct_group() + */ + BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != + offsetof(struct cxl_regs, device_regs.memdev)); + rc = pcim_enable_device(pdev); if (rc) return rc; @@ -571,27 +578,6 @@ static struct pci_driver cxl_mem_driver = { }, }; -static __init int cxl_mem_init(void) -{ - int rc; - - /* Double check the anonymous union trickery in struct cxl_regs */ - BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != - offsetof(struct cxl_regs, device_regs.memdev)); - - rc = pci_register_driver(&cxl_mem_driver); - if (rc) - return rc; - - return 0; -} - -static __exit void cxl_mem_exit(void) -{ - pci_unregister_driver(&cxl_mem_driver); -} - MODULE_LICENSE("GPL v2"); -module_init(cxl_mem_init); -module_exit(cxl_mem_exit); +module_pci_driver(cxl_mem_driver); MODULE_IMPORT_NS(CXL); -- cgit v1.2.3 From ff56ab9e164d71c4a6ae33fc61ae856faec265a1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:12:44 -0700 Subject: cxl/mbox: Convert 'enabled_cmds' to DECLARE_BITMAP Define enabled_cmds as an embedded member of 'struct cxl_mem' rather than a pointer to another dynamic allocation. As this leaves only one user of cxl_cmd_count, just open code it and delete the helper. Acked-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163116436415.2460985.10101824045493194813.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 12 +----------- drivers/cxl/cxlmem.h | 2 +- 2 files changed, 2 insertions(+), 12 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 31e183991726..422999740649 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -315,8 +315,6 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, return 0; } -#define cxl_cmd_count ARRAY_SIZE(cxl_mem_commands) - int cxl_query_cmd(struct cxl_memdev *cxlmd, struct cxl_mem_query_commands __user *q) { @@ -332,7 +330,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, /* returns the total number if 0 elements are requested. */ if (n_commands == 0) - return put_user(cxl_cmd_count, &q->n_commands); + return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands); /* * otherwise, return max(n_commands, total commands) cxl_command_info @@ -794,14 +792,6 @@ struct cxl_mem *cxl_mem_create(struct device *dev) mutex_init(&cxlm->mbox_mutex); cxlm->dev = dev; - cxlm->enabled_cmds = - devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), - sizeof(unsigned long), - GFP_KERNEL | __GFP_ZERO); - if (!cxlm->enabled_cmds) { - dev_err(dev, "No memory available for bitmap\n"); - return ERR_PTR(-ENOMEM); - } return cxlm; } diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index f5d3326a1d0b..8f59a89a0aab 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -126,7 +126,7 @@ struct cxl_mem { size_t lsa_size; struct mutex mbox_mutex; /* Protects device mailbox and firmware */ char firmware_version[0x10]; - unsigned long *enabled_cmds; + DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX); struct range pmem_range; struct range ram_range; -- cgit v1.2.3 From 12f3856ad42d6ce0dbd4266e105c04ae999f908c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Sep 2021 12:03:04 -0700 Subject: cxl/mbox: Add exclusive kernel command support The CXL_PMEM driver expects exclusive control of the label storage area space. Similar to the LIBNVDIMM expectation that the label storage area is only writable from userspace when the corresponding memory device is not active in any region, the expectation is the native CXL_PCI UAPI path is disabled while the cxl_nvdimm for a given cxl_memdev device is active in LIBNVDIMM. Add the ability to toggle the availability of a given command for the UAPI path. Use that new capability to shutdown changes to partitions and the label storage area while the cxl_nvdimm device is actively proxying commands for LIBNVDIMM. Reviewed-by: Ben Widawsky Link: https://lore.kernel.org/r/163164579468.2830966.6980053377428474263.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 5 +++++ drivers/cxl/core/memdev.c | 32 ++++++++++++++++++++++++++++++++ drivers/cxl/cxlmem.h | 4 ++++ drivers/cxl/pmem.c | 29 ++++++++++++++++++++++++++--- 4 files changed, 67 insertions(+), 3 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 422999740649..82e79da195fa 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -221,6 +221,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode) * * %-EINVAL - Reserved fields or invalid values were used. * * %-ENOMEM - Input or output buffer wasn't sized properly. * * %-EPERM - Attempted to use a protected command. + * * %-EBUSY - Kernel has claimed exclusive access to this opcode * * The result of this command is a fully validated command in @out_cmd that is * safe to send to the hardware. @@ -296,6 +297,10 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, if (!test_bit(info->id, cxlm->enabled_cmds)) return -ENOTTY; + /* Check that the command is not claimed for exclusive kernel use */ + if (test_bit(info->id, cxlm->exclusive_cmds)) + return -EBUSY; + /* Check the input buffer is the expected size */ if (info->size_in >= 0 && info->size_in != send_cmd->in.size) return -ENOMEM; diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index df2ba87238c2..bf1b04d00ff4 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -134,6 +134,38 @@ static const struct device_type cxl_memdev_type = { .groups = cxl_memdev_attribute_groups, }; +/** + * set_exclusive_cxl_commands() - atomically disable user cxl commands + * @cxlm: cxl_mem instance to modify + * @cmds: bitmap of commands to mark exclusive + * + * Grab the cxl_memdev_rwsem in write mode to flush in-flight + * invocations of the ioctl path and then disable future execution of + * commands with the command ids set in @cmds. + */ +void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds) +{ + down_write(&cxl_memdev_rwsem); + bitmap_or(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds, + CXL_MEM_COMMAND_ID_MAX); + up_write(&cxl_memdev_rwsem); +} +EXPORT_SYMBOL_GPL(set_exclusive_cxl_commands); + +/** + * clear_exclusive_cxl_commands() - atomically enable user cxl commands + * @cxlm: cxl_mem instance to modify + * @cmds: bitmap of commands to mark available for userspace + */ +void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds) +{ + down_write(&cxl_memdev_rwsem); + bitmap_andnot(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds, + CXL_MEM_COMMAND_ID_MAX); + up_write(&cxl_memdev_rwsem); +} +EXPORT_SYMBOL_GPL(clear_exclusive_cxl_commands); + static void cxl_memdev_shutdown(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 8f59a89a0aab..373add570ef6 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -101,6 +101,7 @@ struct cxl_mbox_cmd { * @mbox_mutex: Mutex to synchronize mailbox access. * @firmware_version: Firmware version for the memory device. * @enabled_cmds: Hardware commands found enabled in CEL. + * @exclusive_cmds: Commands that are kernel-internal only * @pmem_range: Active Persistent memory capacity configuration * @ram_range: Active Volatile memory capacity configuration * @total_bytes: sum of all possible capacities @@ -127,6 +128,7 @@ struct cxl_mem { struct mutex mbox_mutex; /* Protects device mailbox and firmware */ char firmware_version[0x10]; DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX); + DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); struct range pmem_range; struct range ram_range; @@ -200,4 +202,6 @@ int cxl_mem_identify(struct cxl_mem *cxlm); int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm); int cxl_mem_create_range_info(struct cxl_mem *cxlm); struct cxl_mem *cxl_mem_create(struct device *dev); +void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds); +void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds); #endif /* __CXL_MEM_H__ */ diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 9652c3ee41e7..5629289939af 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -16,6 +16,13 @@ */ static struct workqueue_struct *cxl_pmem_wq; +static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); + +static void clear_exclusive(void *cxlm) +{ + clear_exclusive_cxl_commands(cxlm, exclusive_cmds); +} + static void unregister_nvdimm(void *nvdimm) { nvdimm_delete(nvdimm); @@ -39,25 +46,37 @@ static struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void) static int cxl_nvdimm_probe(struct device *dev) { struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_mem *cxlm = cxlmd->cxlm; struct cxl_nvdimm_bridge *cxl_nvb; unsigned long flags = 0; struct nvdimm *nvdimm; - int rc = -ENXIO; + int rc; cxl_nvb = cxl_find_nvdimm_bridge(); if (!cxl_nvb) return -ENXIO; device_lock(&cxl_nvb->dev); - if (!cxl_nvb->nvdimm_bus) + if (!cxl_nvb->nvdimm_bus) { + rc = -ENXIO; + goto out; + } + + set_exclusive_cxl_commands(cxlm, exclusive_cmds); + rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm); + if (rc) goto out; set_bit(NDD_LABELING, &flags); nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, 0, 0, NULL); - if (!nvdimm) + if (!nvdimm) { + rc = -ENOMEM; goto out; + } + dev_set_drvdata(dev, nvdimm); rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm); out: device_unlock(&cxl_nvb->dev); @@ -194,6 +213,10 @@ static __init int cxl_pmem_init(void) { int rc; + set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds); + set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds); + set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds); + cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0); if (!cxl_pmem_wq) return -ENXIO; -- cgit v1.2.3 From 60b8f17215de1e6551fec4e942494c3832c3e98b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 9 Sep 2021 15:08:15 -0700 Subject: cxl/pmem: Translate NVDIMM label commands to CXL label commands The LIBNVDIMM IOCTL UAPI calls back to the nvdimm-bus-provider to translate the Linux command payload to the device native command format. The LIBNVDIMM commands get-config-size, get-config-data, and set-config-data, map to the CXL memory device commands device-identify, get-lsa, and set-lsa. Recall that the label-storage-area (LSA) on an NVDIMM device arranges for the provisioning of namespaces. Additionally for CXL the LSA is used for provisioning regions as well. The data from device-identify is already cached in the 'struct cxl_mem' instance associated with @cxl_nvd, so that payload return is simply crafted and no CXL command is issued. The conversion for get-lsa is straightforward, but the conversion for set-lsa requires an allocation to append the set-lsa header in front of the payload. Reviewed-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163122524923.2534512.9431316965424264864.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pmem.c | 128 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 124 insertions(+), 4 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 5629289939af..bc1466b0e999 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ #include +#include #include #include #include @@ -47,9 +48,9 @@ static int cxl_nvdimm_probe(struct device *dev) { struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + unsigned long flags = 0, cmd_mask = 0; struct cxl_mem *cxlm = cxlmd->cxlm; struct cxl_nvdimm_bridge *cxl_nvb; - unsigned long flags = 0; struct nvdimm *nvdimm; int rc; @@ -69,8 +70,11 @@ static int cxl_nvdimm_probe(struct device *dev) goto out; set_bit(NDD_LABELING, &flags); - nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, 0, 0, - NULL); + set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); + set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); + set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); + nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, + cmd_mask, 0, NULL); if (!nvdimm) { rc = -ENOMEM; goto out; @@ -91,11 +95,127 @@ static struct cxl_driver cxl_nvdimm_driver = { .id = CXL_DEVICE_NVDIMM, }; +static int cxl_pmem_get_config_size(struct cxl_mem *cxlm, + struct nd_cmd_get_config_size *cmd, + unsigned int buf_len) +{ + if (sizeof(*cmd) > buf_len) + return -EINVAL; + + *cmd = (struct nd_cmd_get_config_size) { + .config_size = cxlm->lsa_size, + .max_xfer = cxlm->payload_size, + }; + + return 0; +} + +static int cxl_pmem_get_config_data(struct cxl_mem *cxlm, + struct nd_cmd_get_config_data_hdr *cmd, + unsigned int buf_len) +{ + struct cxl_mbox_get_lsa { + u32 offset; + u32 length; + } get_lsa; + int rc; + + if (sizeof(*cmd) > buf_len) + return -EINVAL; + if (struct_size(cmd, out_buf, cmd->in_length) > buf_len) + return -EINVAL; + + get_lsa = (struct cxl_mbox_get_lsa) { + .offset = cmd->in_offset, + .length = cmd->in_length, + }; + + rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa, + sizeof(get_lsa), cmd->out_buf, + cmd->in_length); + cmd->status = 0; + + return rc; +} + +static int cxl_pmem_set_config_data(struct cxl_mem *cxlm, + struct nd_cmd_set_config_hdr *cmd, + unsigned int buf_len) +{ + struct cxl_mbox_set_lsa { + u32 offset; + u32 reserved; + u8 data[]; + } *set_lsa; + int rc; + + if (sizeof(*cmd) > buf_len) + return -EINVAL; + + /* 4-byte status follows the input data in the payload */ + if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len) + return -EINVAL; + + set_lsa = + kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL); + if (!set_lsa) + return -ENOMEM; + + *set_lsa = (struct cxl_mbox_set_lsa) { + .offset = cmd->in_offset, + }; + memcpy(set_lsa->data, cmd->in_buf, cmd->in_length); + + rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa, + struct_size(set_lsa, data, cmd->in_length), + NULL, 0); + + /* + * Set "firmware" status (4-packed bytes at the end of the input + * payload. + */ + put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]); + kvfree(set_lsa); + + return rc; +} + +static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, + void *buf, unsigned int buf_len) +{ + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_mem *cxlm = cxlmd->cxlm; + + if (!test_bit(cmd, &cmd_mask)) + return -ENOTTY; + + switch (cmd) { + case ND_CMD_GET_CONFIG_SIZE: + return cxl_pmem_get_config_size(cxlm, buf, buf_len); + case ND_CMD_GET_CONFIG_DATA: + return cxl_pmem_get_config_data(cxlm, buf, buf_len); + case ND_CMD_SET_CONFIG_DATA: + return cxl_pmem_set_config_data(cxlm, buf, buf_len); + default: + return -ENOTTY; + } +} + static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { - return -ENOTTY; + /* + * No firmware response to translate, let the transport error + * code take precedence. + */ + *cmd_rc = 0; + + if (!nvdimm) + return -ENOTTY; + return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len); } static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb) -- cgit v1.2.3 From 2e52b6256b9af23c5a881f56b5b5e7f5cb9b8b4b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Sep 2021 12:08:40 -0700 Subject: cxl/pmem: Add support for multiple nvdimm-bridge objects In preparation for a mocked unit test environment for CXL objects, allow for multiple unique nvdimm-bridge objects. For now, just allow multiple bridges to be registered. Later, when there are multiple present, further updates are needed to cxl_find_nvdimm_bridge() to identify which bridge is associated with which CXL hierarchy for nvdimm registration. Note that this does change the kernel device-name for the bridge object. User space should not have any attachment to the device name at this point as it is still early days in the CXL driver development. Acked-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163164647007.2831228.2150246954620721526.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/pmem.c | 32 +++++++++++++++++++++++++++++++- drivers/cxl/cxl.h | 2 ++ drivers/cxl/pmem.c | 15 --------------- 3 files changed, 33 insertions(+), 16 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index d24570f5b8ba..9e56be3994f1 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -2,6 +2,7 @@ /* Copyright(c) 2020 Intel Corporation. */ #include #include +#include #include #include #include "core.h" @@ -20,10 +21,13 @@ * operations, for example, namespace label access commands. */ +static DEFINE_IDA(cxl_nvdimm_bridge_ida); + static void cxl_nvdimm_bridge_release(struct device *dev) { struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); + ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id); kfree(cxl_nvb); } @@ -47,16 +51,38 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) } EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge); +static int match_nvdimm_bridge(struct device *dev, const void *data) +{ + return dev->type == &cxl_nvdimm_bridge_type; +} + +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void) +{ + struct device *dev; + + dev = bus_find_device(&cxl_bus_type, NULL, NULL, match_nvdimm_bridge); + if (!dev) + return NULL; + return to_cxl_nvdimm_bridge(dev); +} +EXPORT_SYMBOL_GPL(cxl_find_nvdimm_bridge); + static struct cxl_nvdimm_bridge * cxl_nvdimm_bridge_alloc(struct cxl_port *port) { struct cxl_nvdimm_bridge *cxl_nvb; struct device *dev; + int rc; cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL); if (!cxl_nvb) return ERR_PTR(-ENOMEM); + rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL); + if (rc < 0) + goto err; + cxl_nvb->id = rc; + dev = &cxl_nvb->dev; cxl_nvb->port = port; cxl_nvb->state = CXL_NVB_NEW; @@ -67,6 +93,10 @@ cxl_nvdimm_bridge_alloc(struct cxl_port *port) dev->type = &cxl_nvdimm_bridge_type; return cxl_nvb; + +err: + kfree(cxl_nvb); + return ERR_PTR(rc); } static void unregister_nvb(void *_cxl_nvb) @@ -119,7 +149,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, return cxl_nvb; dev = &cxl_nvb->dev; - rc = dev_set_name(dev, "nvdimm-bridge"); + rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id); if (rc) goto err; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 53927f9fa77e..1b2e816e061e 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -211,6 +211,7 @@ enum cxl_nvdimm_brige_state { }; struct cxl_nvdimm_bridge { + int id; struct device dev; struct cxl_port *port; struct nvdimm_bus *nvdimm_bus; @@ -323,4 +324,5 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm(struct device *dev); int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd); +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void); #endif /* __CXL_H__ */ diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index bc1466b0e999..5ccf9aa6b3ae 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -29,21 +29,6 @@ static void unregister_nvdimm(void *nvdimm) nvdimm_delete(nvdimm); } -static int match_nvdimm_bridge(struct device *dev, const void *data) -{ - return strcmp(dev_name(dev), "nvdimm-bridge") == 0; -} - -static struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void) -{ - struct device *dev; - - dev = bus_find_device(&cxl_bus_type, NULL, NULL, match_nvdimm_bridge); - if (!dev) - return NULL; - return to_cxl_nvdimm_bridge(dev); -} - static int cxl_nvdimm_probe(struct device *dev) { struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); -- cgit v1.2.3 From 67dcdd4d3b832ace448f454c47426f657d648fc5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Sep 2021 12:14:22 -0700 Subject: tools/testing/cxl: Introduce a mocked-up CXL port hierarchy Create an environment for CXL plumbing unit tests. Especially when it comes to an algorithm for HDM Decoder (Host-managed Device Memory Decoder) programming, the availability of an in-kernel-tree emulation environment for CXL configuration complexity and corner cases speeds development and deters regressions. The approach taken mirrors what was done for tools/testing/nvdimm/. I.e. an external module, cxl_test.ko built out of the tools/testing/cxl/ directory, provides mock implementations of kernel APIs and kernel objects to simulate a real world device hierarchy. One feedback for the tools/testing/nvdimm/ proposal was "why not do this in QEMU?". In fact, the CXL development community has developed a QEMU model for CXL [1]. However, there are a few blocking issues that keep QEMU from being a tight fit for topology + provisioning unit tests: 1/ The QEMU community has yet to show interest in merging any of this support that has had patches on the list since November 2020. So, testing CXL to date involves building custom QEMU with out-of-tree patches. 2/ CXL mechanisms like cross-host-bridge interleave do not have a clear path to be emulated by QEMU without major infrastructure work. This is easier to achieve with the alloc_mock_res() approach taken in this patch to shortcut-define emulated system physical address ranges with interleave behavior. The QEMU enabling has been critical to get the driver off the ground, and may still move forward, but it does not address the ongoing needs of a regression testing environment and test driven development. This patch adds an ACPI CXL Platform definition with emulated CXL multi-ported host-bridges. A follow on patch adds emulated memory expander devices. Acked-by: Ben Widawsky Reported-by: Vishal Verma Link: https://lore.kernel.org/r/20210202005948.241655-1-ben.widawsky@intel.com [1] Link: https://lore.kernel.org/r/163164680798.2831381.838684634806668012.stgit@dwillia2-desk3.amr.corp.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 36 +-- drivers/cxl/cxl.h | 16 ++ tools/testing/cxl/Kbuild | 36 +++ tools/testing/cxl/config_check.c | 13 + tools/testing/cxl/mock_acpi.c | 109 +++++++++ tools/testing/cxl/test/Kbuild | 6 + tools/testing/cxl/test/cxl.c | 509 +++++++++++++++++++++++++++++++++++++++ tools/testing/cxl/test/mock.c | 171 +++++++++++++ tools/testing/cxl/test/mock.h | 27 +++ 9 files changed, 908 insertions(+), 15 deletions(-) create mode 100644 tools/testing/cxl/Kbuild create mode 100644 tools/testing/cxl/config_check.c create mode 100644 tools/testing/cxl/mock_acpi.c create mode 100644 tools/testing/cxl/test/Kbuild create mode 100644 tools/testing/cxl/test/cxl.c create mode 100644 tools/testing/cxl/test/mock.c create mode 100644 tools/testing/cxl/test/mock.h (limited to 'drivers/cxl') diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 54e9d4d2cf5f..32775d1ac4b3 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -182,15 +182,7 @@ static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs) return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base; } -struct cxl_walk_context { - struct device *dev; - struct pci_bus *root; - struct cxl_port *port; - int error; - int count; -}; - -static int match_add_root_ports(struct pci_dev *pdev, void *data) +__mock int match_add_root_ports(struct pci_dev *pdev, void *data) { struct cxl_walk_context *ctx = data; struct pci_bus *root_bus = ctx->root; @@ -239,7 +231,8 @@ static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device return NULL; } -static struct acpi_device *to_cxl_host_bridge(struct device *dev) +__mock struct acpi_device *to_cxl_host_bridge(struct device *host, + struct device *dev) { struct acpi_device *adev = to_acpi_device(dev); @@ -257,9 +250,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev) */ static int add_host_bridge_uport(struct device *match, void *arg) { - struct acpi_device *bridge = to_cxl_host_bridge(match); struct cxl_port *root_port = arg; struct device *host = root_port->dev.parent; + struct acpi_device *bridge = to_cxl_host_bridge(host, match); struct acpi_pci_root *pci_root; struct cxl_walk_context ctx; struct cxl_decoder *cxld; @@ -323,7 +316,7 @@ static int add_host_bridge_dport(struct device *match, void *arg) struct acpi_cedt_chbs *chbs; struct cxl_port *root_port = arg; struct device *host = root_port->dev.parent; - struct acpi_device *bridge = to_cxl_host_bridge(match); + struct acpi_device *bridge = to_cxl_host_bridge(host, match); if (!bridge) return 0; @@ -375,6 +368,17 @@ static int add_root_nvdimm_bridge(struct device *match, void *data) return 1; } +static u32 cedt_instance(struct platform_device *pdev) +{ + const bool *native_acpi0017 = acpi_device_get_match_data(&pdev->dev); + + if (native_acpi0017 && *native_acpi0017) + return 0; + + /* for cxl_test request a non-canonical instance */ + return U32_MAX; +} + static int cxl_acpi_probe(struct platform_device *pdev) { int rc; @@ -388,7 +392,7 @@ static int cxl_acpi_probe(struct platform_device *pdev) return PTR_ERR(root_port); dev_dbg(host, "add: %s\n", dev_name(&root_port->dev)); - status = acpi_get_table(ACPI_SIG_CEDT, 0, &acpi_cedt); + status = acpi_get_table(ACPI_SIG_CEDT, cedt_instance(pdev), &acpi_cedt); if (ACPI_FAILURE(status)) return -ENXIO; @@ -419,9 +423,11 @@ out: return 0; } +static bool native_acpi0017 = true; + static const struct acpi_device_id cxl_acpi_ids[] = { - { "ACPI0017", 0 }, - { "", 0 }, + { "ACPI0017", (unsigned long) &native_acpi0017 }, + { }, }; MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 1b2e816e061e..c5152718267e 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -226,6 +226,14 @@ struct cxl_nvdimm { struct nvdimm *nvdimm; }; +struct cxl_walk_context { + struct device *dev; + struct pci_bus *root; + struct cxl_port *port; + int error; + int count; +}; + /** * struct cxl_port - logical collection of upstream port devices and * downstream port devices to construct a CXL memory @@ -325,4 +333,12 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm(struct device *dev); int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd); struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void); + +/* + * Unit test builds overrides this to __weak, find the 'strong' version + * of these symbols in tools/testing/cxl/. + */ +#ifndef __mock +#define __mock static +#endif #endif /* __CXL_H__ */ diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild new file mode 100644 index 000000000000..63a4a07e71c4 --- /dev/null +++ b/tools/testing/cxl/Kbuild @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +ldflags-y += --wrap=is_acpi_device_node +ldflags-y += --wrap=acpi_get_table +ldflags-y += --wrap=acpi_put_table +ldflags-y += --wrap=acpi_evaluate_integer +ldflags-y += --wrap=acpi_pci_find_root +ldflags-y += --wrap=pci_walk_bus +ldflags-y += --wrap=nvdimm_bus_register + +DRIVERS := ../../../drivers +CXL_SRC := $(DRIVERS)/cxl +CXL_CORE_SRC := $(DRIVERS)/cxl/core +ccflags-y := -I$(srctree)/drivers/cxl/ +ccflags-y += -D__mock=__weak + +obj-m += cxl_acpi.o + +cxl_acpi-y := $(CXL_SRC)/acpi.o +cxl_acpi-y += mock_acpi.o +cxl_acpi-y += config_check.o + +obj-m += cxl_pmem.o + +cxl_pmem-y := $(CXL_SRC)/pmem.o +cxl_pmem-y += config_check.o + +obj-m += cxl_core.o + +cxl_core-y := $(CXL_CORE_SRC)/bus.o +cxl_core-y += $(CXL_CORE_SRC)/pmem.o +cxl_core-y += $(CXL_CORE_SRC)/regs.o +cxl_core-y += $(CXL_CORE_SRC)/memdev.o +cxl_core-y += $(CXL_CORE_SRC)/mbox.o +cxl_core-y += config_check.o + +obj-m += test/ diff --git a/tools/testing/cxl/config_check.c b/tools/testing/cxl/config_check.c new file mode 100644 index 000000000000..de5e5b3652fd --- /dev/null +++ b/tools/testing/cxl/config_check.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +void check(void) +{ + /* + * These kconfig symbols must be set to "m" for cxl_test to load + * and operate. + */ + BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_BUS)); + BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_ACPI)); + BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_PMEM)); +} diff --git a/tools/testing/cxl/mock_acpi.c b/tools/testing/cxl/mock_acpi.c new file mode 100644 index 000000000000..4c8a493ace56 --- /dev/null +++ b/tools/testing/cxl/mock_acpi.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2021 Intel Corporation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include "test/mock.h" + +struct acpi_device *to_cxl_host_bridge(struct device *host, struct device *dev) +{ + int index; + struct acpi_device *adev, *found = NULL; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + + if (ops && ops->is_mock_bridge(dev)) { + found = ACPI_COMPANION(dev); + goto out; + } + + if (dev->bus == &platform_bus_type) + goto out; + + adev = to_acpi_device(dev); + if (!acpi_pci_find_root(adev->handle)) + goto out; + + if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) { + found = adev; + dev_dbg(host, "found host bridge %s\n", dev_name(&adev->dev)); + } +out: + put_cxl_mock_ops(index); + return found; +} + +static int match_add_root_port(struct pci_dev *pdev, void *data) +{ + struct cxl_walk_context *ctx = data; + struct pci_bus *root_bus = ctx->root; + struct cxl_port *port = ctx->port; + int type = pci_pcie_type(pdev); + struct device *dev = ctx->dev; + u32 lnkcap, port_num; + int rc; + + if (pdev->bus != root_bus) + return 0; + if (!pci_is_pcie(pdev)) + return 0; + if (type != PCI_EXP_TYPE_ROOT_PORT) + return 0; + if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP, + &lnkcap) != PCIBIOS_SUCCESSFUL) + return 0; + + /* TODO walk DVSEC to find component register base */ + port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); + rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE); + if (rc) { + dev_err(dev, "failed to add dport: %s (%d)\n", + dev_name(&pdev->dev), rc); + ctx->error = rc; + return rc; + } + ctx->count++; + + dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev)); + + return 0; +} + +static int mock_add_root_port(struct platform_device *pdev, void *data) +{ + struct cxl_walk_context *ctx = data; + struct cxl_port *port = ctx->port; + struct device *dev = ctx->dev; + int rc; + + rc = cxl_add_dport(port, &pdev->dev, pdev->id, CXL_RESOURCE_NONE); + if (rc) { + dev_err(dev, "failed to add dport: %s (%d)\n", + dev_name(&pdev->dev), rc); + ctx->error = rc; + return rc; + } + ctx->count++; + + dev_dbg(dev, "add dport%d: %s\n", pdev->id, dev_name(&pdev->dev)); + + return 0; +} + +int match_add_root_ports(struct pci_dev *dev, void *data) +{ + int index, rc; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + struct platform_device *pdev = (struct platform_device *) dev; + + if (ops && ops->is_mock_port(pdev)) + rc = mock_add_root_port(pdev, data); + else + rc = match_add_root_port(dev, data); + + put_cxl_mock_ops(index); + + return rc; +} diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild new file mode 100644 index 000000000000..7de4ddecfd21 --- /dev/null +++ b/tools/testing/cxl/test/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-m += cxl_test.o +obj-m += cxl_mock.o + +cxl_test-y := cxl.o +cxl_mock-y := mock.o diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c new file mode 100644 index 000000000000..1c47b34244a4 --- /dev/null +++ b/tools/testing/cxl/test/cxl.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright(c) 2021 Intel Corporation. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include "mock.h" + +#define NR_CXL_HOST_BRIDGES 4 +#define NR_CXL_ROOT_PORTS 2 + +static struct platform_device *cxl_acpi; +static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES]; +static struct platform_device + *cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; + +static struct acpi_device acpi0017_mock; +static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = { + [0] = { + .handle = &host_bridge[0], + }, + [1] = { + .handle = &host_bridge[1], + }, + [2] = { + .handle = &host_bridge[2], + }, + [3] = { + .handle = &host_bridge[3], + }, +}; + +static bool is_mock_dev(struct device *dev) +{ + if (dev == &cxl_acpi->dev) + return true; + return false; +} + +static bool is_mock_adev(struct acpi_device *adev) +{ + int i; + + if (adev == &acpi0017_mock) + return true; + + for (i = 0; i < ARRAY_SIZE(host_bridge); i++) + if (adev == &host_bridge[i]) + return true; + + return false; +} + +static struct { + struct acpi_table_cedt cedt; + struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES]; + struct { + struct acpi_cedt_cfmws cfmws; + u32 target[1]; + } cfmws0; + struct { + struct acpi_cedt_cfmws cfmws; + u32 target[4]; + } cfmws1; + struct { + struct acpi_cedt_cfmws cfmws; + u32 target[1]; + } cfmws2; + struct { + struct acpi_cedt_cfmws cfmws; + u32 target[4]; + } cfmws3; +} __packed mock_cedt = { + .cedt = { + .header = { + .signature = "CEDT", + .length = sizeof(mock_cedt), + .revision = 1, + }, + }, + .chbs[0] = { + .header = { + .type = ACPI_CEDT_TYPE_CHBS, + .length = sizeof(mock_cedt.chbs[0]), + }, + .uid = 0, + .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, + }, + .chbs[1] = { + .header = { + .type = ACPI_CEDT_TYPE_CHBS, + .length = sizeof(mock_cedt.chbs[0]), + }, + .uid = 1, + .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, + }, + .chbs[2] = { + .header = { + .type = ACPI_CEDT_TYPE_CHBS, + .length = sizeof(mock_cedt.chbs[0]), + }, + .uid = 2, + .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, + }, + .chbs[3] = { + .header = { + .type = ACPI_CEDT_TYPE_CHBS, + .length = sizeof(mock_cedt.chbs[0]), + }, + .uid = 3, + .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, + }, + .cfmws0 = { + .cfmws = { + .header = { + .type = ACPI_CEDT_TYPE_CFMWS, + .length = sizeof(mock_cedt.cfmws0), + }, + .interleave_ways = 0, + .granularity = 4, + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | + ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, + .qtg_id = 0, + .window_size = SZ_256M, + }, + .target = { 0 }, + }, + .cfmws1 = { + .cfmws = { + .header = { + .type = ACPI_CEDT_TYPE_CFMWS, + .length = sizeof(mock_cedt.cfmws1), + }, + .interleave_ways = 2, + .granularity = 4, + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | + ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, + .qtg_id = 1, + .window_size = SZ_256M * 4, + }, + .target = { 0, 1, 2, 3 }, + }, + .cfmws2 = { + .cfmws = { + .header = { + .type = ACPI_CEDT_TYPE_CFMWS, + .length = sizeof(mock_cedt.cfmws2), + }, + .interleave_ways = 0, + .granularity = 4, + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | + ACPI_CEDT_CFMWS_RESTRICT_PMEM, + .qtg_id = 2, + .window_size = SZ_256M, + }, + .target = { 0 }, + }, + .cfmws3 = { + .cfmws = { + .header = { + .type = ACPI_CEDT_TYPE_CFMWS, + .length = sizeof(mock_cedt.cfmws3), + }, + .interleave_ways = 2, + .granularity = 4, + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | + ACPI_CEDT_CFMWS_RESTRICT_PMEM, + .qtg_id = 3, + .window_size = SZ_256M * 4, + }, + .target = { 0, 1, 2, 3 }, + }, +}; + +struct cxl_mock_res { + struct list_head list; + struct range range; +}; + +static LIST_HEAD(mock_res); +static DEFINE_MUTEX(mock_res_lock); +static struct gen_pool *cxl_mock_pool; + +static void depopulate_all_mock_resources(void) +{ + struct cxl_mock_res *res, *_res; + + mutex_lock(&mock_res_lock); + list_for_each_entry_safe(res, _res, &mock_res, list) { + gen_pool_free(cxl_mock_pool, res->range.start, + range_len(&res->range)); + list_del(&res->list); + kfree(res); + } + mutex_unlock(&mock_res_lock); +} + +static struct cxl_mock_res *alloc_mock_res(resource_size_t size) +{ + struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL); + struct genpool_data_align data = { + .align = SZ_256M, + }; + unsigned long phys; + + INIT_LIST_HEAD(&res->list); + phys = gen_pool_alloc_algo(cxl_mock_pool, size, + gen_pool_first_fit_align, &data); + if (!phys) + return NULL; + + res->range = (struct range) { + .start = phys, + .end = phys + size - 1, + }; + mutex_lock(&mock_res_lock); + list_add(&res->list, &mock_res); + mutex_unlock(&mock_res_lock); + + return res; +} + +static int populate_cedt(void) +{ + struct acpi_cedt_cfmws *cfmws[4] = { + [0] = &mock_cedt.cfmws0.cfmws, + [1] = &mock_cedt.cfmws1.cfmws, + [2] = &mock_cedt.cfmws2.cfmws, + [3] = &mock_cedt.cfmws3.cfmws, + }; + struct cxl_mock_res *res; + int i; + + for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) { + struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i]; + resource_size_t size; + + if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20) + size = ACPI_CEDT_CHBS_LENGTH_CXL20; + else + size = ACPI_CEDT_CHBS_LENGTH_CXL11; + + res = alloc_mock_res(size); + if (!res) + return -ENOMEM; + chbs->base = res->range.start; + chbs->length = size; + } + + for (i = 0; i < ARRAY_SIZE(cfmws); i++) { + struct acpi_cedt_cfmws *window = cfmws[i]; + + res = alloc_mock_res(window->window_size); + if (!res) + return -ENOMEM; + window->base_hpa = res->range.start; + } + + return 0; +} + +static acpi_status mock_acpi_get_table(char *signature, u32 instance, + struct acpi_table_header **out_table) +{ + if (instance < U32_MAX || strcmp(signature, ACPI_SIG_CEDT) != 0) + return acpi_get_table(signature, instance, out_table); + + *out_table = (struct acpi_table_header *) &mock_cedt; + return AE_OK; +} + +static void mock_acpi_put_table(struct acpi_table_header *table) +{ + if (table == (struct acpi_table_header *) &mock_cedt) + return; + acpi_put_table(table); +} + +static bool is_mock_bridge(struct device *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) + if (dev == &cxl_host_bridge[i]->dev) + return true; + + return false; +} + +static int host_bridge_index(struct acpi_device *adev) +{ + return adev - host_bridge; +} + +static struct acpi_device *find_host_bridge(acpi_handle handle) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(host_bridge); i++) + if (handle == host_bridge[i].handle) + return &host_bridge[i]; + return NULL; +} + +static acpi_status +mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, + struct acpi_object_list *arguments, + unsigned long long *data) +{ + struct acpi_device *adev = find_host_bridge(handle); + + if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0) + return acpi_evaluate_integer(handle, pathname, arguments, data); + + *data = host_bridge_index(adev); + return AE_OK; +} + +static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES]; +static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = { + [0] = { + .bus = &mock_pci_bus[0], + }, + [1] = { + .bus = &mock_pci_bus[1], + }, + [2] = { + .bus = &mock_pci_bus[2], + }, + [3] = { + .bus = &mock_pci_bus[3], + }, +}; + +static struct platform_device *mock_cxl_root_port(struct pci_bus *bus, int index) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++) + if (bus == &mock_pci_bus[i]) + return cxl_root_port[index + i * NR_CXL_ROOT_PORTS]; + return NULL; +} + +static bool is_mock_port(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) + if (pdev == cxl_root_port[i]) + return true; + return false; +} + +static bool is_mock_bus(struct pci_bus *bus) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++) + if (bus == &mock_pci_bus[i]) + return true; + return false; +} + +static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle) +{ + struct acpi_device *adev = find_host_bridge(handle); + + if (!adev) + return acpi_pci_find_root(handle); + return &mock_pci_root[host_bridge_index(adev)]; +} + +static struct cxl_mock_ops cxl_mock_ops = { + .is_mock_adev = is_mock_adev, + .is_mock_bridge = is_mock_bridge, + .is_mock_bus = is_mock_bus, + .is_mock_port = is_mock_port, + .is_mock_dev = is_mock_dev, + .mock_port = mock_cxl_root_port, + .acpi_get_table = mock_acpi_get_table, + .acpi_put_table = mock_acpi_put_table, + .acpi_evaluate_integer = mock_acpi_evaluate_integer, + .acpi_pci_find_root = mock_acpi_pci_find_root, + .list = LIST_HEAD_INIT(cxl_mock_ops.list), +}; + +static void mock_companion(struct acpi_device *adev, struct device *dev) +{ + device_initialize(&adev->dev); + fwnode_init(&adev->fwnode, NULL); + dev->fwnode = &adev->fwnode; + adev->fwnode.dev = dev; +} + +#ifndef SZ_64G +#define SZ_64G (SZ_32G * 2) +#endif + +#ifndef SZ_512G +#define SZ_512G (SZ_64G * 8) +#endif + +static __init int cxl_test_init(void) +{ + int rc, i; + + register_cxl_mock_ops(&cxl_mock_ops); + + cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE); + if (!cxl_mock_pool) { + rc = -ENOMEM; + goto err_gen_pool_create; + } + + rc = gen_pool_add(cxl_mock_pool, SZ_512G, SZ_64G, NUMA_NO_NODE); + if (rc) + goto err_gen_pool_add; + + rc = populate_cedt(); + if (rc) + goto err_populate; + + for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) { + struct acpi_device *adev = &host_bridge[i]; + struct platform_device *pdev; + + pdev = platform_device_alloc("cxl_host_bridge", i); + if (!pdev) + goto err_bridge; + + mock_companion(adev, &pdev->dev); + rc = platform_device_add(pdev); + if (rc) { + platform_device_put(pdev); + goto err_bridge; + } + cxl_host_bridge[i] = pdev; + } + + for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) { + struct platform_device *bridge = + cxl_host_bridge[i / NR_CXL_ROOT_PORTS]; + struct platform_device *pdev; + + pdev = platform_device_alloc("cxl_root_port", i); + if (!pdev) + goto err_port; + pdev->dev.parent = &bridge->dev; + + rc = platform_device_add(pdev); + if (rc) { + platform_device_put(pdev); + goto err_port; + } + cxl_root_port[i] = pdev; + } + + cxl_acpi = platform_device_alloc("cxl_acpi", 0); + if (!cxl_acpi) + goto err_port; + + mock_companion(&acpi0017_mock, &cxl_acpi->dev); + acpi0017_mock.dev.bus = &platform_bus_type; + + rc = platform_device_add(cxl_acpi); + if (rc) + goto err_add; + + return 0; + +err_add: + platform_device_put(cxl_acpi); +err_port: + for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--) + platform_device_unregister(cxl_root_port[i]); +err_bridge: + for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) + platform_device_unregister(cxl_host_bridge[i]); +err_populate: + depopulate_all_mock_resources(); +err_gen_pool_add: + gen_pool_destroy(cxl_mock_pool); +err_gen_pool_create: + unregister_cxl_mock_ops(&cxl_mock_ops); + return rc; +} + +static __exit void cxl_test_exit(void) +{ + int i; + + platform_device_unregister(cxl_acpi); + for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--) + platform_device_unregister(cxl_root_port[i]); + for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) + platform_device_unregister(cxl_host_bridge[i]); + depopulate_all_mock_resources(); + gen_pool_destroy(cxl_mock_pool); + unregister_cxl_mock_ops(&cxl_mock_ops); +} + +module_init(cxl_test_init); +module_exit(cxl_test_exit); +MODULE_LICENSE("GPL v2"); diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c new file mode 100644 index 000000000000..b8c108abcf07 --- /dev/null +++ b/tools/testing/cxl/test/mock.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-only +//Copyright(c) 2021 Intel Corporation. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include "mock.h" + +static LIST_HEAD(mock); + +void register_cxl_mock_ops(struct cxl_mock_ops *ops) +{ + list_add_rcu(&ops->list, &mock); +} +EXPORT_SYMBOL_GPL(register_cxl_mock_ops); + +static DEFINE_SRCU(cxl_mock_srcu); + +void unregister_cxl_mock_ops(struct cxl_mock_ops *ops) +{ + list_del_rcu(&ops->list); + synchronize_srcu(&cxl_mock_srcu); +} +EXPORT_SYMBOL_GPL(unregister_cxl_mock_ops); + +struct cxl_mock_ops *get_cxl_mock_ops(int *index) +{ + *index = srcu_read_lock(&cxl_mock_srcu); + return list_first_or_null_rcu(&mock, struct cxl_mock_ops, list); +} +EXPORT_SYMBOL_GPL(get_cxl_mock_ops); + +void put_cxl_mock_ops(int index) +{ + srcu_read_unlock(&cxl_mock_srcu, index); +} +EXPORT_SYMBOL_GPL(put_cxl_mock_ops); + +bool __wrap_is_acpi_device_node(const struct fwnode_handle *fwnode) +{ + struct acpi_device *adev = + container_of(fwnode, struct acpi_device, fwnode); + int index; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + bool retval = false; + + if (ops) + retval = ops->is_mock_adev(adev); + + if (!retval) + retval = is_acpi_device_node(fwnode); + + put_cxl_mock_ops(index); + return retval; +} +EXPORT_SYMBOL(__wrap_is_acpi_device_node); + +acpi_status __wrap_acpi_get_table(char *signature, u32 instance, + struct acpi_table_header **out_table) +{ + int index; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + acpi_status status; + + if (ops) + status = ops->acpi_get_table(signature, instance, out_table); + else + status = acpi_get_table(signature, instance, out_table); + + put_cxl_mock_ops(index); + + return status; +} +EXPORT_SYMBOL(__wrap_acpi_get_table); + +void __wrap_acpi_put_table(struct acpi_table_header *table) +{ + int index; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + + if (ops) + ops->acpi_put_table(table); + else + acpi_put_table(table); + put_cxl_mock_ops(index); +} +EXPORT_SYMBOL(__wrap_acpi_put_table); + +acpi_status __wrap_acpi_evaluate_integer(acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, + unsigned long long *data) +{ + int index; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + acpi_status status; + + if (ops) + status = ops->acpi_evaluate_integer(handle, pathname, arguments, + data); + else + status = acpi_evaluate_integer(handle, pathname, arguments, + data); + put_cxl_mock_ops(index); + + return status; +} +EXPORT_SYMBOL(__wrap_acpi_evaluate_integer); + +struct acpi_pci_root *__wrap_acpi_pci_find_root(acpi_handle handle) +{ + int index; + struct acpi_pci_root *root; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + + if (ops) + root = ops->acpi_pci_find_root(handle); + else + root = acpi_pci_find_root(handle); + + put_cxl_mock_ops(index); + + return root; +} +EXPORT_SYMBOL_GPL(__wrap_acpi_pci_find_root); + +void __wrap_pci_walk_bus(struct pci_bus *bus, + int (*cb)(struct pci_dev *, void *), void *userdata) +{ + int index; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + + if (ops && ops->is_mock_bus(bus)) { + int rc, i; + + /* + * Simulate 2 root ports per host-bridge and no + * depth recursion. + */ + for (i = 0; i < 2; i++) { + rc = cb((struct pci_dev *) ops->mock_port(bus, i), + userdata); + if (rc) + break; + } + } else + pci_walk_bus(bus, cb, userdata); + + put_cxl_mock_ops(index); +} +EXPORT_SYMBOL_GPL(__wrap_pci_walk_bus); + +struct nvdimm_bus * +__wrap_nvdimm_bus_register(struct device *dev, + struct nvdimm_bus_descriptor *nd_desc) +{ + int index; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + + if (ops && ops->is_mock_dev(dev->parent->parent)) + nd_desc->provider_name = "cxl_test"; + put_cxl_mock_ops(index); + + return nvdimm_bus_register(dev, nd_desc); +} +EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register); + +MODULE_LICENSE("GPL v2"); diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h new file mode 100644 index 000000000000..805a94cb3fbe --- /dev/null +++ b/tools/testing/cxl/test/mock.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include + +struct cxl_mock_ops { + struct list_head list; + bool (*is_mock_adev)(struct acpi_device *dev); + acpi_status (*acpi_get_table)(char *signature, u32 instance, + struct acpi_table_header **out_table); + void (*acpi_put_table)(struct acpi_table_header *table); + bool (*is_mock_bridge)(struct device *dev); + acpi_status (*acpi_evaluate_integer)(acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, + unsigned long long *data); + struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle); + struct platform_device *(*mock_port)(struct pci_bus *bus, int index); + bool (*is_mock_bus)(struct pci_bus *bus); + bool (*is_mock_port)(struct platform_device *pdev); + bool (*is_mock_dev)(struct device *dev); +}; + +void register_cxl_mock_ops(struct cxl_mock_ops *ops); +void unregister_cxl_mock_ops(struct cxl_mock_ops *ops); +struct cxl_mock_ops *get_cxl_mock_ops(int *index); +void put_cxl_mock_ops(int index); -- cgit v1.2.3 From a5c25802168993c67a03a6e04142761dfb4a3bf5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:13:10 -0700 Subject: cxl/bus: Populate the target list at decoder create As found by cxl_test, the implementation populated the target_list for the single dport exceptional case, it missed populating the target_list for the typical multi-dport case. Root decoders always know their target list at the beginning of time, and even switch-level decoders should have a target list of one or more zeros by default, depending on the interleave-ways setting. Walk the hosting port's dport list and populate based on the passed in map. Move devm_cxl_add_passthrough_decoder() out of line now that it does the work of generating a target_map. Before: $ cat /sys/bus/cxl/devices/root2/decoder*/target_list 0 0 After: $ cat /sys/bus/cxl/devices/root2/decoder*/target_list 0 0,1,2,3 0 0,1,2,3 Where root2 is a CXL topology root object generated by 'cxl_test'. Acked-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 13 +++++++- drivers/cxl/core/bus.c | 80 +++++++++++++++++++++++++++++++++++++++++++------- drivers/cxl/cxl.h | 25 +++++++--------- 3 files changed, 91 insertions(+), 27 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 32775d1ac4b3..d39cc797a64e 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -52,6 +52,12 @@ static int cxl_acpi_cfmws_verify(struct device *dev, return -EINVAL; } + if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) { + dev_err(dev, "CFMWS Interleave Ways (%d) too large\n", + CFMWS_INTERLEAVE_WAYS(cfmws)); + return -EINVAL; + } + expected_len = struct_size((cfmws), interleave_targets, CFMWS_INTERLEAVE_WAYS(cfmws)); @@ -71,6 +77,7 @@ static int cxl_acpi_cfmws_verify(struct device *dev, static void cxl_add_cfmws_decoders(struct device *dev, struct cxl_port *root_port) { + int target_map[CXL_DECODER_MAX_INTERLEAVE]; struct acpi_cedt_cfmws *cfmws; struct cxl_decoder *cxld; acpi_size len, cur = 0; @@ -83,6 +90,7 @@ static void cxl_add_cfmws_decoders(struct device *dev, while (cur < len) { struct acpi_cedt_header *c = cedt_subtable + cur; + int i; if (c->type != ACPI_CEDT_TYPE_CFMWS) { cur += c->length; @@ -108,6 +116,9 @@ static void cxl_add_cfmws_decoders(struct device *dev, continue; } + for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++) + target_map[i] = cfmws->interleave_targets[i]; + flags = cfmws_to_decoder_flags(cfmws->restrictions); cxld = devm_cxl_add_decoder(dev, root_port, CFMWS_INTERLEAVE_WAYS(cfmws), @@ -115,7 +126,7 @@ static void cxl_add_cfmws_decoders(struct device *dev, CFMWS_INTERLEAVE_WAYS(cfmws), CFMWS_INTERLEAVE_GRANULARITY(cfmws), CXL_DECODER_EXPANDER, - flags); + flags, target_map); if (IS_ERR(cxld)) { dev_err(dev, "Failed to add decoder for %#llx-%#llx\n", diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 9a8fa88b634c..6dfdeaf999f0 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -453,11 +453,38 @@ err: } EXPORT_SYMBOL_GPL(cxl_add_dport); +static int decoder_populate_targets(struct device *host, + struct cxl_decoder *cxld, + struct cxl_port *port, int *target_map, + int nr_targets) +{ + int rc = 0, i; + + if (!target_map) + return 0; + + device_lock(&port->dev); + for (i = 0; i < nr_targets; i++) { + struct cxl_dport *dport = find_dport(port, target_map[i]); + + if (!dport) { + rc = -ENXIO; + break; + } + dev_dbg(host, "%s: target: %d\n", dev_name(dport->dport), i); + cxld->target[i] = dport; + } + device_unlock(&port->dev); + + return rc; +} + static struct cxl_decoder * -cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base, - resource_size_t len, int interleave_ways, - int interleave_granularity, enum cxl_decoder_type type, - unsigned long flags) +cxl_decoder_alloc(struct device *host, struct cxl_port *port, int nr_targets, + resource_size_t base, resource_size_t len, + int interleave_ways, int interleave_granularity, + enum cxl_decoder_type type, unsigned long flags, + int *target_map) { struct cxl_decoder *cxld; struct device *dev; @@ -493,10 +520,10 @@ cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base, .target_type = type, }; - /* handle implied target_list */ - if (interleave_ways == 1) - cxld->target[0] = - list_first_entry(&port->dports, struct cxl_dport, list); + rc = decoder_populate_targets(host, cxld, port, target_map, nr_targets); + if (rc) + goto err; + dev = &cxld->dev; device_initialize(dev); device_set_pm_not_required(dev); @@ -519,14 +546,19 @@ struct cxl_decoder * devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, resource_size_t base, resource_size_t len, int interleave_ways, int interleave_granularity, - enum cxl_decoder_type type, unsigned long flags) + enum cxl_decoder_type type, unsigned long flags, + int *target_map) { struct cxl_decoder *cxld; struct device *dev; int rc; - cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways, - interleave_granularity, type, flags); + if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) + return ERR_PTR(-EINVAL); + + cxld = cxl_decoder_alloc(host, port, nr_targets, base, len, + interleave_ways, interleave_granularity, type, + flags, target_map); if (IS_ERR(cxld)) return cxld; @@ -550,6 +582,32 @@ err: } EXPORT_SYMBOL_GPL(devm_cxl_add_decoder); +/* + * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) + * single ported host-bridges need not publish a decoder capability when a + * passthrough decode can be assumed, i.e. all transactions that the uport sees + * are claimed and passed to the single dport. Default the range a 0-base + * 0-length until the first CXL region is activated. + */ +struct cxl_decoder *devm_cxl_add_passthrough_decoder(struct device *host, + struct cxl_port *port) +{ + struct cxl_dport *dport; + int target_map[1]; + + device_lock(&port->dev); + dport = list_first_entry_or_null(&port->dports, typeof(*dport), list); + device_unlock(&port->dev); + + if (!dport) + return ERR_PTR(-ENXIO); + + target_map[0] = dport->port_id; + return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE, + CXL_DECODER_EXPANDER, 0, target_map); +} +EXPORT_SYMBOL_GPL(devm_cxl_add_passthrough_decoder); + /** * __cxl_driver_register - register a driver for the cxl bus * @cxl_drv: cxl driver structure to attach diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index c5152718267e..84b8836c1f91 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -180,6 +180,12 @@ enum cxl_decoder_type { CXL_DECODER_EXPANDER = 3, }; +/* + * Current specification goes up to 8, double that seems a reasonable + * software max for the foreseeable future + */ +#define CXL_DECODER_MAX_INTERLEAVE 16 + /** * struct cxl_decoder - CXL address range decode configuration * @dev: this decoder's device @@ -284,22 +290,11 @@ struct cxl_decoder * devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, resource_size_t base, resource_size_t len, int interleave_ways, int interleave_granularity, - enum cxl_decoder_type type, unsigned long flags); - -/* - * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) - * single ported host-bridges need not publish a decoder capability when a - * passthrough decode can be assumed, i.e. all transactions that the uport sees - * are claimed and passed to the single dport. Default the range a 0-base - * 0-length until the first CXL region is activated. - */ -static inline struct cxl_decoder * -devm_cxl_add_passthrough_decoder(struct device *host, struct cxl_port *port) -{ - return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE, - CXL_DECODER_EXPANDER, 0); -} + enum cxl_decoder_type type, unsigned long flags, + int *target_map); +struct cxl_decoder *devm_cxl_add_passthrough_decoder(struct device *host, + struct cxl_port *port); extern struct bus_type cxl_bus_type; struct cxl_driver { -- cgit v1.2.3 From 49be6dd807511db31809000a7b9d430b18d5e780 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:13:15 -0700 Subject: cxl/mbox: Move command definitions to common location In preparation for cxl_test to mock responses to mailbox command requests, move some definitions from core/mbox.c to cxlmem.h. No functional changes intended. Acked-by: Ben Widawsky Acked-by: Jonathan Cameron Link: https://lore.kernel.org/r/163116439547.2460985.10457111177103589574.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 45 ++++++-------------------------------- drivers/cxl/cxlmem.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++ drivers/cxl/pmem.c | 11 ++-------- 3 files changed, 65 insertions(+), 48 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 82e79da195fa..576796a5d9f3 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -485,11 +485,7 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) while (remaining) { u32 xfer_size = min_t(u32, remaining, cxlm->payload_size); - struct cxl_mbox_get_log { - uuid_t uuid; - __le32 offset; - __le32 length; - } __packed log = { + struct cxl_mbox_get_log log = { .uuid = *uuid, .offset = cpu_to_le32(offset), .length = cpu_to_le32(xfer_size) @@ -520,14 +516,11 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) */ static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) { - struct cel_entry { - __le16 opcode; - __le16 effect; - } __packed * cel_entry; + struct cxl_cel_entry *cel_entry; const int cel_entries = size / sizeof(*cel_entry); int i; - cel_entry = (struct cel_entry *)cel; + cel_entry = (struct cxl_cel_entry *) cel; for (i = 0; i < cel_entries; i++) { u16 opcode = le16_to_cpu(cel_entry[i].opcode); @@ -543,15 +536,6 @@ static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) } } -struct cxl_mbox_get_supported_logs { - __le16 entries; - u8 rsvd[6]; - struct gsl_entry { - uuid_t uuid; - __le32 size; - } __packed entry[]; -} __packed; - static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) { struct cxl_mbox_get_supported_logs *ret; @@ -578,10 +562,8 @@ enum { /* See CXL 2.0 Table 170. Get Log Input Payload */ static const uuid_t log_uuid[] = { - [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, - 0xb1, 0x62, 0x3b, 0x3f, 0x17), - [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, - 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86), + [CEL_UUID] = DEFINE_CXL_CEL_UUID, + [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID, }; /** @@ -698,22 +680,7 @@ static int cxl_mem_get_partition_info(struct cxl_mem *cxlm) int cxl_mem_identify(struct cxl_mem *cxlm) { /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ - struct cxl_mbox_identify { - char fw_revision[0x10]; - __le64 total_capacity; - __le64 volatile_capacity; - __le64 persistent_capacity; - __le64 partition_align; - __le16 info_event_log_size; - __le16 warning_event_log_size; - __le16 failure_event_log_size; - __le16 fatal_event_log_size; - __le32 lsa_size; - u8 poison_list_max_mer[3]; - __le16 inject_poison_limit; - u8 poison_caps; - u8 qos_telemetry_caps; - } __packed id; + struct cxl_mbox_identify id; int rc; rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 373add570ef6..c4f450ad434d 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -171,6 +171,63 @@ enum cxl_opcode { CXL_MBOX_OP_MAX = 0x10000 }; +#define DEFINE_CXL_CEL_UUID \ + UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, 0xb1, 0x62, \ + 0x3b, 0x3f, 0x17) + +#define DEFINE_CXL_VENDOR_DEBUG_UUID \ + UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \ + 0x40, 0x3d, 0x86) + +struct cxl_mbox_get_supported_logs { + __le16 entries; + u8 rsvd[6]; + struct cxl_gsl_entry { + uuid_t uuid; + __le32 size; + } __packed entry[]; +} __packed; + +struct cxl_cel_entry { + __le16 opcode; + __le16 effect; +} __packed; + +struct cxl_mbox_get_log { + uuid_t uuid; + __le32 offset; + __le32 length; +} __packed; + +/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ +struct cxl_mbox_identify { + char fw_revision[0x10]; + __le64 total_capacity; + __le64 volatile_capacity; + __le64 persistent_capacity; + __le64 partition_align; + __le16 info_event_log_size; + __le16 warning_event_log_size; + __le16 failure_event_log_size; + __le16 fatal_event_log_size; + __le32 lsa_size; + u8 poison_list_max_mer[3]; + __le16 inject_poison_limit; + u8 poison_caps; + u8 qos_telemetry_caps; +} __packed; + +struct cxl_mbox_get_lsa { + u32 offset; + u32 length; +} __packed; + +struct cxl_mbox_set_lsa { + u32 offset; + u32 reserved; + u8 data[]; +} __packed; + /** * struct cxl_mem_command - Driver representation of a memory device command * @info: Command information as it exists for the UAPI diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 5ccf9aa6b3ae..d7b5548551e9 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -99,10 +99,7 @@ static int cxl_pmem_get_config_data(struct cxl_mem *cxlm, struct nd_cmd_get_config_data_hdr *cmd, unsigned int buf_len) { - struct cxl_mbox_get_lsa { - u32 offset; - u32 length; - } get_lsa; + struct cxl_mbox_get_lsa get_lsa; int rc; if (sizeof(*cmd) > buf_len) @@ -127,11 +124,7 @@ static int cxl_pmem_set_config_data(struct cxl_mem *cxlm, struct nd_cmd_set_config_hdr *cmd, unsigned int buf_len) { - struct cxl_mbox_set_lsa { - u32 offset; - u32 reserved; - u8 data[]; - } *set_lsa; + struct cxl_mbox_set_lsa *set_lsa; int rc; if (sizeof(*cmd) > buf_len) -- cgit v1.2.3 From 7d3eb23c4ccf457b52cafdca1a7b20cddf29e021 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Sep 2021 22:13:21 -0700 Subject: tools/testing/cxl: Introduce a mock memory device + driver Introduce an emulated device-set plus driver to register CXL memory devices, 'struct cxl_memdev' instances, in the mock cxl_test topology. This enables the development of HDM Decoder (Host-managed Device Memory Decoder) programming flow (region provisioning) in an environment that can be updated alongside the kernel as it gains more functionality. Whereas the cxl_pci module looks for CXL memory expanders on the 'pci' bus, the cxl_mock_mem module attaches to CXL expanders on the platform bus emitted by cxl_test. Acked-by: Ben Widawsky Reported-by: kernel test robot Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163116440099.2460985.10692549614409346604.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/pmem.c | 6 +- drivers/cxl/cxl.h | 2 +- drivers/cxl/pmem.c | 2 +- tools/testing/cxl/Kbuild | 2 + tools/testing/cxl/mock_pmem.c | 24 ++++ tools/testing/cxl/test/Kbuild | 4 + tools/testing/cxl/test/cxl.c | 69 +++++++++++- tools/testing/cxl/test/mem.c | 256 ++++++++++++++++++++++++++++++++++++++++++ 8 files changed, 359 insertions(+), 6 deletions(-) create mode 100644 tools/testing/cxl/mock_pmem.c create mode 100644 tools/testing/cxl/test/mem.c (limited to 'drivers/cxl') diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 9e56be3994f1..74be5132df1c 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -51,16 +51,16 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) } EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge); -static int match_nvdimm_bridge(struct device *dev, const void *data) +__mock int match_nvdimm_bridge(struct device *dev, const void *data) { return dev->type == &cxl_nvdimm_bridge_type; } -struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void) +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd) { struct device *dev; - dev = bus_find_device(&cxl_bus_type, NULL, NULL, match_nvdimm_bridge); + dev = bus_find_device(&cxl_bus_type, NULL, cxl_nvd, match_nvdimm_bridge); if (!dev) return NULL; return to_cxl_nvdimm_bridge(dev); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 84b8836c1f91..9af5745ba2c0 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -327,7 +327,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm(struct device *dev); int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd); -struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void); +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd); /* * Unit test builds overrides this to __weak, find the 'strong' version diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index d7b5548551e9..ceb2115981e5 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -39,7 +39,7 @@ static int cxl_nvdimm_probe(struct device *dev) struct nvdimm *nvdimm; int rc; - cxl_nvb = cxl_find_nvdimm_bridge(); + cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd); if (!cxl_nvb) return -ENXIO; diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild index 63a4a07e71c4..86deba8308a1 100644 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@ -33,4 +33,6 @@ cxl_core-y += $(CXL_CORE_SRC)/memdev.o cxl_core-y += $(CXL_CORE_SRC)/mbox.o cxl_core-y += config_check.o +cxl_core-y += mock_pmem.o + obj-m += test/ diff --git a/tools/testing/cxl/mock_pmem.c b/tools/testing/cxl/mock_pmem.c new file mode 100644 index 000000000000..f7315e6f52c0 --- /dev/null +++ b/tools/testing/cxl/mock_pmem.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2021 Intel Corporation. All rights reserved. */ +#include +#include "test/mock.h" +#include + +int match_nvdimm_bridge(struct device *dev, const void *data) +{ + int index, rc = 0; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + const struct cxl_nvdimm *cxl_nvd = data; + + if (ops) { + if (dev->type == &cxl_nvdimm_bridge_type && + (ops->is_mock_dev(dev->parent->parent) == + ops->is_mock_dev(cxl_nvd->dev.parent->parent))) + rc = 1; + } else + rc = dev->type == &cxl_nvdimm_bridge_type; + + put_cxl_mock_ops(index); + + return rc; +} diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild index 7de4ddecfd21..4e59e2c911f6 100644 --- a/tools/testing/cxl/test/Kbuild +++ b/tools/testing/cxl/test/Kbuild @@ -1,6 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 +ccflags-y := -I$(srctree)/drivers/cxl/ + obj-m += cxl_test.o obj-m += cxl_mock.o +obj-m += cxl_mock_mem.o cxl_test-y := cxl.o cxl_mock-y := mock.o +cxl_mock_mem-y := mem.o diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 1c47b34244a4..cb32f9e27d5d 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -17,6 +17,7 @@ static struct platform_device *cxl_acpi; static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES]; static struct platform_device *cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; +struct platform_device *cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; static struct acpi_device acpi0017_mock; static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = { @@ -36,6 +37,11 @@ static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = { static bool is_mock_dev(struct device *dev) { + int i; + + for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) + if (dev == &cxl_mem[i]->dev) + return true; if (dev == &cxl_acpi->dev) return true; return false; @@ -405,6 +411,44 @@ static void mock_companion(struct acpi_device *adev, struct device *dev) #define SZ_512G (SZ_64G * 8) #endif +static struct platform_device *alloc_memdev(int id) +{ + struct resource res[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + .desc = IORES_DESC_PERSISTENT_MEMORY, + }, + }; + struct platform_device *pdev; + int i, rc; + + for (i = 0; i < ARRAY_SIZE(res); i++) { + struct cxl_mock_res *r = alloc_mock_res(SZ_256M); + + if (!r) + return NULL; + res[i].start = r->range.start; + res[i].end = r->range.end; + } + + pdev = platform_device_alloc("cxl_mem", id); + if (!pdev) + return NULL; + + rc = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); + if (rc) + goto err; + + return pdev; + +err: + platform_device_put(pdev); + return NULL; +} + static __init int cxl_test_init(void) { int rc, i; @@ -460,9 +504,27 @@ static __init int cxl_test_init(void) cxl_root_port[i] = pdev; } + BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_root_port)); + for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) { + struct platform_device *port = cxl_root_port[i]; + struct platform_device *pdev; + + pdev = alloc_memdev(i); + if (!pdev) + goto err_mem; + pdev->dev.parent = &port->dev; + + rc = platform_device_add(pdev); + if (rc) { + platform_device_put(pdev); + goto err_mem; + } + cxl_mem[i] = pdev; + } + cxl_acpi = platform_device_alloc("cxl_acpi", 0); if (!cxl_acpi) - goto err_port; + goto err_mem; mock_companion(&acpi0017_mock, &cxl_acpi->dev); acpi0017_mock.dev.bus = &platform_bus_type; @@ -475,6 +537,9 @@ static __init int cxl_test_init(void) err_add: platform_device_put(cxl_acpi); +err_mem: + for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) + platform_device_unregister(cxl_mem[i]); err_port: for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--) platform_device_unregister(cxl_root_port[i]); @@ -495,6 +560,8 @@ static __exit void cxl_test_exit(void) int i; platform_device_unregister(cxl_acpi); + for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) + platform_device_unregister(cxl_mem[i]); for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--) platform_device_unregister(cxl_root_port[i]); for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c new file mode 100644 index 000000000000..12a8437a9ca0 --- /dev/null +++ b/tools/testing/cxl/test/mem.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright(c) 2021 Intel Corporation. All rights reserved. + +#include +#include +#include +#include +#include +#include + +#define LSA_SIZE SZ_128K +#define EFFECT(x) (1U << x) + +static struct cxl_cel_entry mock_cel[] = { + { + .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS), + .effect = cpu_to_le16(0), + }, + { + .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY), + .effect = cpu_to_le16(0), + }, + { + .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA), + .effect = cpu_to_le16(0), + }, + { + .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA), + .effect = cpu_to_le16(EFFECT(1) | EFFECT(2)), + }, +}; + +static struct { + struct cxl_mbox_get_supported_logs gsl; + struct cxl_gsl_entry entry; +} mock_gsl_payload = { + .gsl = { + .entries = cpu_to_le16(1), + }, + .entry = { + .uuid = DEFINE_CXL_CEL_UUID, + .size = cpu_to_le32(sizeof(mock_cel)), + }, +}; + +static int mock_gsl(struct cxl_mbox_cmd *cmd) +{ + if (cmd->size_out < sizeof(mock_gsl_payload)) + return -EINVAL; + + memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload)); + cmd->size_out = sizeof(mock_gsl_payload); + + return 0; +} + +static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) +{ + struct cxl_mbox_get_log *gl = cmd->payload_in; + u32 offset = le32_to_cpu(gl->offset); + u32 length = le32_to_cpu(gl->length); + uuid_t uuid = DEFINE_CXL_CEL_UUID; + void *data = &mock_cel; + + if (cmd->size_in < sizeof(*gl)) + return -EINVAL; + if (length > cxlm->payload_size) + return -EINVAL; + if (offset + length > sizeof(mock_cel)) + return -EINVAL; + if (!uuid_equal(&gl->uuid, &uuid)) + return -EINVAL; + if (length > cmd->size_out) + return -EINVAL; + + memcpy(cmd->payload_out, data + offset, length); + + return 0; +} + +static int mock_id(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) +{ + struct platform_device *pdev = to_platform_device(cxlm->dev); + struct cxl_mbox_identify id = { + .fw_revision = { "mock fw v1 " }, + .lsa_size = cpu_to_le32(LSA_SIZE), + /* FIXME: Add partition support */ + .partition_align = cpu_to_le64(0), + }; + u64 capacity = 0; + int i; + + if (cmd->size_out < sizeof(id)) + return -EINVAL; + + for (i = 0; i < 2; i++) { + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) + break; + + capacity += resource_size(res) / CXL_CAPACITY_MULTIPLIER; + + if (le64_to_cpu(id.partition_align)) + continue; + + if (res->desc == IORES_DESC_PERSISTENT_MEMORY) + id.persistent_capacity = cpu_to_le64( + resource_size(res) / CXL_CAPACITY_MULTIPLIER); + else + id.volatile_capacity = cpu_to_le64( + resource_size(res) / CXL_CAPACITY_MULTIPLIER); + } + + id.total_capacity = cpu_to_le64(capacity); + + memcpy(cmd->payload_out, &id, sizeof(id)); + + return 0; +} + +static int mock_get_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) +{ + struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; + void *lsa = dev_get_drvdata(cxlm->dev); + u32 offset, length; + + if (sizeof(*get_lsa) > cmd->size_in) + return -EINVAL; + offset = le32_to_cpu(get_lsa->offset); + length = le32_to_cpu(get_lsa->length); + if (offset + length > LSA_SIZE) + return -EINVAL; + if (length > cmd->size_out) + return -EINVAL; + + memcpy(cmd->payload_out, lsa + offset, length); + return 0; +} + +static int mock_set_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) +{ + struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in; + void *lsa = dev_get_drvdata(cxlm->dev); + u32 offset, length; + + if (sizeof(*set_lsa) > cmd->size_in) + return -EINVAL; + offset = le32_to_cpu(set_lsa->offset); + length = cmd->size_in - sizeof(*set_lsa); + if (offset + length > LSA_SIZE) + return -EINVAL; + + memcpy(lsa + offset, &set_lsa->data[0], length); + return 0; +} + +static int cxl_mock_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) +{ + struct device *dev = cxlm->dev; + int rc = -EIO; + + switch (cmd->opcode) { + case CXL_MBOX_OP_GET_SUPPORTED_LOGS: + rc = mock_gsl(cmd); + break; + case CXL_MBOX_OP_GET_LOG: + rc = mock_get_log(cxlm, cmd); + break; + case CXL_MBOX_OP_IDENTIFY: + rc = mock_id(cxlm, cmd); + break; + case CXL_MBOX_OP_GET_LSA: + rc = mock_get_lsa(cxlm, cmd); + break; + case CXL_MBOX_OP_SET_LSA: + rc = mock_set_lsa(cxlm, cmd); + break; + default: + break; + } + + dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode, + cmd->size_in, cmd->size_out, rc); + + return rc; +} + +static void label_area_release(void *lsa) +{ + vfree(lsa); +} + +static int cxl_mock_mem_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cxl_memdev *cxlmd; + struct cxl_mem *cxlm; + void *lsa; + int rc; + + lsa = vmalloc(LSA_SIZE); + if (!lsa) + return -ENOMEM; + rc = devm_add_action_or_reset(dev, label_area_release, lsa); + if (rc) + return rc; + dev_set_drvdata(dev, lsa); + + cxlm = cxl_mem_create(dev); + if (IS_ERR(cxlm)) + return PTR_ERR(cxlm); + + cxlm->mbox_send = cxl_mock_mbox_send; + cxlm->payload_size = SZ_4K; + + rc = cxl_mem_enumerate_cmds(cxlm); + if (rc) + return rc; + + rc = cxl_mem_identify(cxlm); + if (rc) + return rc; + + rc = cxl_mem_create_range_info(cxlm); + if (rc) + return rc; + + cxlmd = devm_cxl_add_memdev(cxlm); + if (IS_ERR(cxlmd)) + return PTR_ERR(cxlmd); + + if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) + rc = devm_cxl_add_nvdimm(dev, cxlmd); + + return 0; +} + +static const struct platform_device_id cxl_mock_mem_ids[] = { + { .name = "cxl_mem", }, + { }, +}; +MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids); + +static struct platform_driver cxl_mock_mem_driver = { + .probe = cxl_mock_mem_probe, + .id_table = cxl_mock_mem_ids, + .driver = { + .name = KBUILD_MODNAME, + }, +}; + +module_platform_driver(cxl_mock_mem_driver); +MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(CXL); -- cgit v1.2.3 From 48667f676189eccfe9b7ac3a31772d55d6da40e5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 21 Sep 2021 12:22:16 -0700 Subject: cxl/core: Split decoder setup into alloc + add The kbuild robot reports: drivers/cxl/core/bus.c:516:1: warning: stack frame size (1032) exceeds limit (1024) in function 'devm_cxl_add_decoder' It is also the case the devm_cxl_add_decoder() is unwieldy to use for all the different decoder types. Fix the stack usage by splitting the creation into alloc and add steps. This also allows for context specific construction before adding. With the split the caller is responsible for registering a devm callback to trigger device_unregister() for the decoder rather than it being implicit in the decoder registration. I.e. the routine that calls alloc is responsible for calling put_device() if the "add" operation fails. Reported-by: kernel test robot Reported-by: Nathan Chancellor Reported-by: Dan Carpenter Reviewed-by: Ben Widawsky Link: https://lore.kernel.org/r/163225205828.3038145.6831131648369404859.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 84 ++++++++++++++++++++++--------- drivers/cxl/core/bus.c | 129 ++++++++++++++++-------------------------------- drivers/cxl/core/core.h | 5 -- drivers/cxl/core/pmem.c | 7 ++- drivers/cxl/cxl.h | 15 +++--- 5 files changed, 114 insertions(+), 126 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index d39cc797a64e..af1c6c1875ac 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -82,7 +82,6 @@ static void cxl_add_cfmws_decoders(struct device *dev, struct cxl_decoder *cxld; acpi_size len, cur = 0; void *cedt_subtable; - unsigned long flags; int rc; len = acpi_cedt->length - sizeof(*acpi_cedt); @@ -119,24 +118,36 @@ static void cxl_add_cfmws_decoders(struct device *dev, for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++) target_map[i] = cfmws->interleave_targets[i]; - flags = cfmws_to_decoder_flags(cfmws->restrictions); - cxld = devm_cxl_add_decoder(dev, root_port, - CFMWS_INTERLEAVE_WAYS(cfmws), - cfmws->base_hpa, cfmws->window_size, - CFMWS_INTERLEAVE_WAYS(cfmws), - CFMWS_INTERLEAVE_GRANULARITY(cfmws), - CXL_DECODER_EXPANDER, - flags, target_map); - - if (IS_ERR(cxld)) { + cxld = cxl_decoder_alloc(root_port, + CFMWS_INTERLEAVE_WAYS(cfmws)); + if (IS_ERR(cxld)) + goto next; + + cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); + cxld->target_type = CXL_DECODER_EXPANDER; + cxld->range = (struct range) { + .start = cfmws->base_hpa, + .end = cfmws->base_hpa + cfmws->window_size - 1, + }; + cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws); + cxld->interleave_granularity = + CFMWS_INTERLEAVE_GRANULARITY(cfmws); + + rc = cxl_decoder_add(cxld, target_map); + if (rc) + put_device(&cxld->dev); + else + rc = cxl_decoder_autoremove(dev, cxld); + if (rc) { dev_err(dev, "Failed to add decoder for %#llx-%#llx\n", cfmws->base_hpa, cfmws->base_hpa + cfmws->window_size - 1); - } else { - dev_dbg(dev, "add: %s range %#llx-%#llx\n", - dev_name(&cxld->dev), cfmws->base_hpa, - cfmws->base_hpa + cfmws->window_size - 1); + goto next; } + dev_dbg(dev, "add: %s range %#llx-%#llx\n", + dev_name(&cxld->dev), cfmws->base_hpa, + cfmws->base_hpa + cfmws->window_size - 1); +next: cur += c->length; } } @@ -266,6 +277,7 @@ static int add_host_bridge_uport(struct device *match, void *arg) struct acpi_device *bridge = to_cxl_host_bridge(host, match); struct acpi_pci_root *pci_root; struct cxl_walk_context ctx; + int single_port_map[1], rc; struct cxl_decoder *cxld; struct cxl_dport *dport; struct cxl_port *port; @@ -301,22 +313,46 @@ static int add_host_bridge_uport(struct device *match, void *arg) return -ENODEV; if (ctx.error) return ctx.error; + if (ctx.count > 1) + return 0; /* TODO: Scan CHBCR for HDM Decoder resources */ /* - * In the single-port host-bridge case there are no HDM decoders - * in the CHBCR and a 1:1 passthrough decode is implied. + * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability + * Structure) single ported host-bridges need not publish a decoder + * capability when a passthrough decode can be assumed, i.e. all + * transactions that the uport sees are claimed and passed to the single + * dport. Disable the range until the first CXL region is enumerated / + * activated. */ - if (ctx.count == 1) { - cxld = devm_cxl_add_passthrough_decoder(host, port); - if (IS_ERR(cxld)) - return PTR_ERR(cxld); + cxld = cxl_decoder_alloc(port, 1); + if (IS_ERR(cxld)) + return PTR_ERR(cxld); + + cxld->interleave_ways = 1; + cxld->interleave_granularity = PAGE_SIZE; + cxld->target_type = CXL_DECODER_EXPANDER; + cxld->range = (struct range) { + .start = 0, + .end = -1, + }; - dev_dbg(host, "add: %s\n", dev_name(&cxld->dev)); - } + device_lock(&port->dev); + dport = list_first_entry(&port->dports, typeof(*dport), list); + device_unlock(&port->dev); - return 0; + single_port_map[0] = dport->port_id; + + rc = cxl_decoder_add(cxld, single_port_map); + if (rc) + put_device(&cxld->dev); + else + rc = cxl_decoder_autoremove(host, cxld); + + if (rc == 0) + dev_dbg(host, "add: %s\n", dev_name(&cxld->dev)); + return rc; } static int add_host_bridge_dport(struct device *match, void *arg) diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 6dfdeaf999f0..ebd061d03950 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -453,10 +453,8 @@ err: } EXPORT_SYMBOL_GPL(cxl_add_dport); -static int decoder_populate_targets(struct device *host, - struct cxl_decoder *cxld, - struct cxl_port *port, int *target_map, - int nr_targets) +static int decoder_populate_targets(struct cxl_decoder *cxld, + struct cxl_port *port, int *target_map) { int rc = 0, i; @@ -464,66 +462,48 @@ static int decoder_populate_targets(struct device *host, return 0; device_lock(&port->dev); - for (i = 0; i < nr_targets; i++) { + if (list_empty(&port->dports)) { + rc = -EINVAL; + goto out_unlock; + } + + for (i = 0; i < cxld->nr_targets; i++) { struct cxl_dport *dport = find_dport(port, target_map[i]); if (!dport) { rc = -ENXIO; - break; + goto out_unlock; } - dev_dbg(host, "%s: target: %d\n", dev_name(dport->dport), i); cxld->target[i] = dport; } + +out_unlock: device_unlock(&port->dev); return rc; } -static struct cxl_decoder * -cxl_decoder_alloc(struct device *host, struct cxl_port *port, int nr_targets, - resource_size_t base, resource_size_t len, - int interleave_ways, int interleave_granularity, - enum cxl_decoder_type type, unsigned long flags, - int *target_map) +struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets) { - struct cxl_decoder *cxld; + struct cxl_decoder *cxld, cxld_const_init = { + .nr_targets = nr_targets, + }; struct device *dev; int rc = 0; - if (interleave_ways < 1) + if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets < 1) return ERR_PTR(-EINVAL); - device_lock(&port->dev); - if (list_empty(&port->dports)) - rc = -EINVAL; - device_unlock(&port->dev); - if (rc) - return ERR_PTR(rc); - cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL); if (!cxld) return ERR_PTR(-ENOMEM); + memcpy(cxld, &cxld_const_init, sizeof(cxld_const_init)); rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); if (rc < 0) goto err; - *cxld = (struct cxl_decoder) { - .id = rc, - .range = { - .start = base, - .end = base + len - 1, - }, - .flags = flags, - .interleave_ways = interleave_ways, - .interleave_granularity = interleave_granularity, - .target_type = type, - }; - - rc = decoder_populate_targets(host, cxld, port, target_map, nr_targets); - if (rc) - goto err; - + cxld->id = rc; dev = &cxld->dev; device_initialize(dev); device_set_pm_not_required(dev); @@ -541,72 +521,47 @@ err: kfree(cxld); return ERR_PTR(rc); } +EXPORT_SYMBOL_GPL(cxl_decoder_alloc); -struct cxl_decoder * -devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, - resource_size_t base, resource_size_t len, - int interleave_ways, int interleave_granularity, - enum cxl_decoder_type type, unsigned long flags, - int *target_map) +int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) { - struct cxl_decoder *cxld; + struct cxl_port *port; struct device *dev; int rc; - if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) - return ERR_PTR(-EINVAL); + if (WARN_ON_ONCE(!cxld)) + return -EINVAL; - cxld = cxl_decoder_alloc(host, port, nr_targets, base, len, - interleave_ways, interleave_granularity, type, - flags, target_map); - if (IS_ERR(cxld)) - return cxld; + if (WARN_ON_ONCE(IS_ERR(cxld))) + return PTR_ERR(cxld); - dev = &cxld->dev; - rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); - if (rc) - goto err; + if (cxld->interleave_ways < 1) + return -EINVAL; - rc = device_add(dev); + port = to_cxl_port(cxld->dev.parent); + rc = decoder_populate_targets(cxld, port, target_map); if (rc) - goto err; + return rc; - rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev); + dev = &cxld->dev; + rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); if (rc) - return ERR_PTR(rc); - return cxld; + return rc; -err: - put_device(dev); - return ERR_PTR(rc); + return device_add(dev); } -EXPORT_SYMBOL_GPL(devm_cxl_add_decoder); +EXPORT_SYMBOL_GPL(cxl_decoder_add); -/* - * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) - * single ported host-bridges need not publish a decoder capability when a - * passthrough decode can be assumed, i.e. all transactions that the uport sees - * are claimed and passed to the single dport. Default the range a 0-base - * 0-length until the first CXL region is activated. - */ -struct cxl_decoder *devm_cxl_add_passthrough_decoder(struct device *host, - struct cxl_port *port) +static void cxld_unregister(void *dev) { - struct cxl_dport *dport; - int target_map[1]; - - device_lock(&port->dev); - dport = list_first_entry_or_null(&port->dports, typeof(*dport), list); - device_unlock(&port->dev); - - if (!dport) - return ERR_PTR(-ENXIO); + device_unregister(dev); +} - target_map[0] = dport->port_id; - return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE, - CXL_DECODER_EXPANDER, 0, target_map); +int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) +{ + return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); } -EXPORT_SYMBOL_GPL(devm_cxl_add_passthrough_decoder); +EXPORT_SYMBOL_GPL(cxl_decoder_autoremove); /** * __cxl_driver_register - register a driver for the cxl bus diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index c85b7fbad02d..e0c9aacc4e9c 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -9,11 +9,6 @@ extern const struct device_type cxl_nvdimm_type; extern struct attribute_group cxl_base_attribute_group; -static inline void unregister_cxl_dev(void *dev) -{ - device_unregister(dev); -} - struct cxl_send_command; struct cxl_mem_query_commands; int cxl_query_cmd(struct cxl_memdev *cxlmd, diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 74be5132df1c..5032f4c1c69d 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -222,6 +222,11 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) return cxl_nvd; } +static void cxl_nvd_unregister(void *dev) +{ + device_unregister(dev); +} + /** * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm * @host: same host as @cxlmd @@ -251,7 +256,7 @@ int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), dev_name(dev)); - return devm_add_action_or_reset(host, unregister_cxl_dev, dev); + return devm_add_action_or_reset(host, cxl_nvd_unregister, dev); err: put_device(dev); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 9af5745ba2c0..a6687e7fd598 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -195,6 +195,7 @@ enum cxl_decoder_type { * @interleave_granularity: data stride per dport * @target_type: accelerator vs expander (type2 vs type3) selector * @flags: memory type capabilities and locking + * @nr_targets: number of elements in @target * @target: active ordered target list in current decoder configuration */ struct cxl_decoder { @@ -205,6 +206,7 @@ struct cxl_decoder { int interleave_granularity; enum cxl_decoder_type target_type; unsigned long flags; + const int nr_targets; struct cxl_dport *target[]; }; @@ -286,15 +288,10 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id, struct cxl_decoder *to_cxl_decoder(struct device *dev); bool is_root_decoder(struct device *dev); -struct cxl_decoder * -devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, - resource_size_t base, resource_size_t len, - int interleave_ways, int interleave_granularity, - enum cxl_decoder_type type, unsigned long flags, - int *target_map); - -struct cxl_decoder *devm_cxl_add_passthrough_decoder(struct device *host, - struct cxl_port *port); +struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets); +int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); +int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); + extern struct bus_type cxl_bus_type; struct cxl_driver { -- cgit v1.2.3 From ed97afb53365cd03dde266c9644334a558fe5a16 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 13 Sep 2021 09:33:24 -0700 Subject: cxl/pci: Disambiguate cxl_pci further from cxl_mem Commit 21e9f76733a8 ("cxl: Rename mem to pci") introduced the cxl_pci driver which had formerly been named cxl_mem. At the time, the goal was to be as light touch as possible because there were other patches in flight. Since things have settled now, and a new cxl_mem driver will be introduced shortly, spend the LOC now to clean up the existing names. While here, fix the kernel docs to explain the situation better after the core rework that has already landed. Reviewed-by: Jonathan Cameron Signed-off-by: Ben Widawsky Link: https://lore.kernel.org/r/20210913163324.1008564-4-ben.widawsky@intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 68 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 33 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 27f75b5a2ee2..64180f46c895 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -16,14 +16,16 @@ * * This implements the PCI exclusive functionality for a CXL device as it is * defined by the Compute Express Link specification. CXL devices may surface - * certain functionality even if it isn't CXL enabled. + * certain functionality even if it isn't CXL enabled. While this driver is + * focused around the PCI specific aspects of a CXL device, it binds to the + * specific CXL memory device class code, and therefore the implementation of + * cxl_pci is focused around CXL memory devices. * * The driver has several responsibilities, mainly: * - Create the memX device and register on the CXL bus. * - Enumerate device's register interface and map them. - * - Probe the device attributes to establish sysfs interface. - * - Provide an IOCTL interface to userspace to communicate with the device for - * things like firmware update. + * - Registers nvdimm bridge device with cxl_core. + * - Registers a CXL mailbox with cxl_core. */ #define cxl_doorbell_busy(cxlm) \ @@ -33,7 +35,7 @@ /* CXL 2.0 - 8.2.8.4 */ #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) -static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) +static int cxl_pci_mbox_wait_for_doorbell(struct cxl_mem *cxlm) { const unsigned long start = jiffies; unsigned long end = start; @@ -55,7 +57,7 @@ static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) return 0; } -static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, +static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm, struct cxl_mbox_cmd *mbox_cmd) { struct device *dev = cxlm->dev; @@ -65,7 +67,7 @@ static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, } /** - * __cxl_mem_mbox_send_cmd() - Execute a mailbox command + * __cxl_pci_mbox_send_cmd() - Execute a mailbox command * @cxlm: The CXL memory device to communicate with. * @mbox_cmd: Command to send to the memory device. * @@ -86,7 +88,7 @@ static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, * not need to coordinate with each other. The driver only uses the primary * mailbox. */ -static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, +static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm, struct cxl_mbox_cmd *mbox_cmd) { void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; @@ -140,9 +142,9 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); /* #5 */ - rc = cxl_mem_wait_for_doorbell(cxlm); + rc = cxl_pci_mbox_wait_for_doorbell(cxlm); if (rc == -ETIMEDOUT) { - cxl_mem_mbox_timeout(cxlm, mbox_cmd); + cxl_pci_mbox_timeout(cxlm, mbox_cmd); return rc; } @@ -181,13 +183,13 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, } /** - * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox. + * cxl_pci_mbox_get() - Acquire exclusive access to the mailbox. * @cxlm: The memory device to gain access to. * * Context: Any context. Takes the mbox_mutex. * Return: 0 if exclusive access was acquired. */ -static int cxl_mem_mbox_get(struct cxl_mem *cxlm) +static int cxl_pci_mbox_get(struct cxl_mem *cxlm) { struct device *dev = cxlm->dev; u64 md_status; @@ -212,7 +214,7 @@ static int cxl_mem_mbox_get(struct cxl_mem *cxlm) * Mailbox Interface Ready bit. Therefore, waiting for the doorbell * to be ready is sufficient. */ - rc = cxl_mem_wait_for_doorbell(cxlm); + rc = cxl_pci_mbox_wait_for_doorbell(cxlm); if (rc) { dev_warn(dev, "Mailbox interface not ready\n"); goto out; @@ -252,12 +254,12 @@ out: } /** - * cxl_mem_mbox_put() - Release exclusive access to the mailbox. + * cxl_pci_mbox_put() - Release exclusive access to the mailbox. * @cxlm: The CXL memory device to communicate with. * * Context: Any context. Expects mbox_mutex to be held. */ -static void cxl_mem_mbox_put(struct cxl_mem *cxlm) +static void cxl_pci_mbox_put(struct cxl_mem *cxlm) { mutex_unlock(&cxlm->mbox_mutex); } @@ -266,17 +268,17 @@ static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) { int rc; - rc = cxl_mem_mbox_get(cxlm); + rc = cxl_pci_mbox_get(cxlm); if (rc) return rc; - rc = __cxl_mem_mbox_send_cmd(cxlm, cmd); - cxl_mem_mbox_put(cxlm); + rc = __cxl_pci_mbox_send_cmd(cxlm, cmd); + cxl_pci_mbox_put(cxlm); return rc; } -static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) +static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm) { const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); @@ -304,7 +306,7 @@ static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) return 0; } -static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, +static void __iomem *cxl_pci_map_regblock(struct cxl_mem *cxlm, u8 bar, u64 offset) { void __iomem *addr; @@ -330,12 +332,12 @@ static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, return addr; } -static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) +static void cxl_pci_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) { pci_iounmap(to_pci_dev(cxlm->dev), base); } -static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) +static int cxl_pci_dvsec(struct pci_dev *pdev, int dvsec) { int pos; @@ -427,7 +429,7 @@ static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, } /** - * cxl_mem_setup_regs() - Setup necessary MMIO. + * cxl_pci_setup_regs() - Setup necessary MMIO. * @cxlm: The CXL memory device to communicate with. * * Return: 0 if all necessary registers mapped. @@ -436,7 +438,7 @@ static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, * regions. The purpose of this function is to enumerate and map those * registers. */ -static int cxl_mem_setup_regs(struct cxl_mem *cxlm) +static int cxl_pci_setup_regs(struct cxl_mem *cxlm) { void __iomem *base; u32 regloc_size, regblocks; @@ -445,7 +447,7 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) struct pci_dev *pdev = to_pci_dev(dev); struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; - regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); + regloc = cxl_pci_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); if (!regloc) { dev_err(dev, "register location dvsec not found\n"); return -ENXIO; @@ -480,7 +482,7 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) if (reg_type > CXL_REGLOC_RBI_MEMDEV) continue; - base = cxl_mem_map_regblock(cxlm, bar, offset); + base = cxl_pci_map_regblock(cxlm, bar, offset); if (!base) return -ENOMEM; @@ -492,7 +494,7 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) ret = cxl_probe_regs(cxlm, base + offset, map); /* Always unmap the regblock regardless of probe success */ - cxl_mem_unmap_regblock(cxlm, base); + cxl_pci_unmap_regblock(cxlm, base); if (ret) return ret; @@ -511,7 +513,7 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) return ret; } -static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct cxl_memdev *cxlmd; struct cxl_mem *cxlm; @@ -532,11 +534,11 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_ERR(cxlm)) return PTR_ERR(cxlm); - rc = cxl_mem_setup_regs(cxlm); + rc = cxl_pci_setup_regs(cxlm); if (rc) return rc; - rc = cxl_mem_setup_mailbox(cxlm); + rc = cxl_pci_setup_mailbox(cxlm); if (rc) return rc; @@ -569,15 +571,15 @@ static const struct pci_device_id cxl_mem_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); -static struct pci_driver cxl_mem_driver = { +static struct pci_driver cxl_pci_driver = { .name = KBUILD_MODNAME, .id_table = cxl_mem_pci_tbl, - .probe = cxl_mem_probe, + .probe = cxl_pci_probe, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; MODULE_LICENSE("GPL v2"); -module_pci_driver(cxl_mem_driver); +module_pci_driver(cxl_pci_driver); MODULE_IMPORT_NS(CXL); -- cgit v1.2.3 From 91a45b12d49e2b43d86caba25ed59fae43344ab8 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Thu, 7 Oct 2021 14:34:26 -0700 Subject: cxl/acpi: Do not fail cxl_acpi_probe() based on a missing CHBS When an ACPI0016 Host Bridge device is present yet no corresponding CEDT Host Bridge Structure (CHBS) exists, the ACPI probe method fails. Rather than fail, emit this warning and continue: cxl_acpi ACPI0017:00: No CHBS found for Host Bridge: ACPI0016:02 This error may occur on systems that are not compliant with the ACPI specification. Compliant systems include a CHBS entry for every CXL host bridge that is present at boot. Suggested-by: Ira Weiny Signed-off-by: Alison Schofield Tested-by: Vishal Verma Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/20211007213426.392644-1-alison.schofield@intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index af1c6c1875ac..dadc7f64b9ff 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -288,7 +288,7 @@ static int add_host_bridge_uport(struct device *match, void *arg) dport = find_dport_by_dev(root_port, match); if (!dport) { dev_dbg(host, "host bridge expected and not found\n"); - return -ENODEV; + return 0; } port = devm_cxl_add_port(host, match, dport->component_reg_phys, @@ -377,9 +377,11 @@ static int add_host_bridge_dport(struct device *match, void *arg) } chbs = cxl_acpi_match_chbs(host, uid); - if (IS_ERR(chbs)) - dev_dbg(host, "No CHBS found for Host Bridge: %s\n", - dev_name(match)); + if (IS_ERR(chbs)) { + dev_warn(host, "No CHBS found for Host Bridge: %s\n", + dev_name(match)); + return 0; + } rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs)); if (rc) { -- cgit v1.2.3 From cdcce47cb33a6a2ecb1bc113d0ba42ec300a33fe Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 9 Oct 2021 09:44:02 -0700 Subject: cxl/pci: Convert register block identifiers to an enum In preparation for passing around the Register Block Indicator (RBI) as a parameter, it is desirable to convert the type to an enum so that the interface can use a well defined parameter. Signed-off-by: Ben Widawsky [djbw: changelog fixups ] Link: https://lore.kernel.org/r/163379784199.692348.4366131432595112771.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h index 8c1a58813816..7d3e4bf06b45 100644 --- a/drivers/cxl/pci.h +++ b/drivers/cxl/pci.h @@ -20,13 +20,15 @@ #define CXL_REGLOC_BIR_MASK GENMASK(2, 0) /* Register Block Identifier (RBI) */ -#define CXL_REGLOC_RBI_MASK GENMASK(15, 8) -#define CXL_REGLOC_RBI_EMPTY 0 -#define CXL_REGLOC_RBI_COMPONENT 1 -#define CXL_REGLOC_RBI_VIRT 2 -#define CXL_REGLOC_RBI_MEMDEV 3 -#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1 +enum cxl_regloc_type { + CXL_REGLOC_RBI_EMPTY = 0, + CXL_REGLOC_RBI_COMPONENT, + CXL_REGLOC_RBI_VIRT, + CXL_REGLOC_RBI_MEMDEV, + CXL_REGLOC_RBI_TYPES +}; +#define CXL_REGLOC_RBI_MASK GENMASK(15, 8) #define CXL_REGLOC_ADDR_MASK GENMASK(31, 16) #endif /* __CXL_PCI_H__ */ -- cgit v1.2.3 From d22fed9c2b70d8ccc91c9a56ed2df2c1a0c2ebab Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 9 Oct 2021 09:44:07 -0700 Subject: cxl/pci: Remove dev_dbg for unknown register blocks While interesting to driver developers, the dev_dbg message doesn't do much except clutter up logs. This information should be attainable through sysfs, and someday lspci like utilities. This change additionally helps reduce the LOC in a subsequent patch to refactor some of cxl_pci register mapping. Signed-off-by: Ben Widawsky Link: https://lore.kernel.org/r/163379784717.692348.3478221381958300790.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 64180f46c895..ccc7c2573ddc 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -475,9 +475,6 @@ static int cxl_pci_setup_regs(struct cxl_mem *cxlm) cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, ®_type); - dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", - bar, offset, reg_type); - /* Ignore unknown register block types */ if (reg_type > CXL_REGLOC_RBI_MEMDEV) continue; -- cgit v1.2.3 From ca76a3a8052b71c0334d5c094859cfa340c290a8 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 15 Oct 2021 14:29:58 -0700 Subject: cxl/pci: Fix NULL vs ERR_PTR confusion cxl_pci_map_regblock() may return an ERR_PTR(), but cxl_pci_setup_regs() is only prepared for NULL as the error case. Pick the minimal fix for -stable backport purposes and just have cxl_pci_map_regblock() return NULL for errors. Fixes: f8a7e8c29be8 ("cxl/pci: Reserve all device regions at once") Cc: Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163433325724.834522.17809774578178224149.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index ccc7c2573ddc..9c178002d49e 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -317,7 +317,7 @@ static void __iomem *cxl_pci_map_regblock(struct cxl_mem *cxlm, if (pci_resource_len(pdev, bar) < offset) { dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, &pdev->resource[bar], (unsigned long long)offset); - return IOMEM_ERR_PTR(-ENXIO); + return NULL; } addr = pci_iomap(pdev, bar, 0); -- cgit v1.2.3 From 84e36a9d1bbd2c41481e7160e0553480781b008b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 9 Oct 2021 09:44:18 -0700 Subject: cxl/pci: Remove pci request/release regions Quoting Dan, "... the request + release regions should probably just be dropped. It's not like any of the register enumeration would collide with someone else who already has the registers mapped. The collision only comes when the registers are mapped for their final usage, and that will have more precision in the request." Suggested-by: Dan Williams Signed-off-by: Ben Widawsky Link: https://lore.kernel.org/r/163379785872.692348.8981679111988251260.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 9c178002d49e..21dd10a77eb3 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -453,9 +453,6 @@ static int cxl_pci_setup_regs(struct cxl_mem *cxlm) return -ENXIO; } - if (pci_request_mem_regions(pdev, pci_name(pdev))) - return -ENODEV; - /* Get the size of the Register Locator DVSEC */ pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); @@ -499,8 +496,6 @@ static int cxl_pci_setup_regs(struct cxl_mem *cxlm) n_maps++; } - pci_release_mem_regions(pdev); - for (i = 0; i < n_maps; i++) { ret = cxl_map_regs(cxlm, &maps[i]); if (ret) -- cgit v1.2.3 From 7dc7a64de2bb3daf613f4c2e809e49678c579148 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 13 Oct 2021 16:53:29 -0700 Subject: cxl/pci: Make more use of cxl_register_map The structure exists to pass around information about register mapping. Use it for passing @barno and @block_offset, and eliminate duplicate local variables. The helpers that use @map do not care about @cxlm, so just pass them a pdev instead. [djbw: reorder before cxl_pci_setup_regs() refactor to improver readability] Signed-off-by: Ben Widawsky Reported-by: kernel test robot Reviewed-by: Ira Weiny [djbw: separate @base conversion] Link: https://lore.kernel.org/r/163416901172.806743.10056306321247850914.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 59 +++++++++++++++++++++++-------------------------------- 1 file changed, 25 insertions(+), 34 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 21dd10a77eb3..eb0c2f1b9e65 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -306,17 +306,18 @@ static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm) return 0; } -static void __iomem *cxl_pci_map_regblock(struct cxl_mem *cxlm, - u8 bar, u64 offset) +static void __iomem *cxl_pci_map_regblock(struct pci_dev *pdev, + struct cxl_register_map *map) { void __iomem *addr; - struct device *dev = cxlm->dev; - struct pci_dev *pdev = to_pci_dev(dev); + int bar = map->barno; + struct device *dev = &pdev->dev; + resource_size_t offset = map->block_offset; /* Basic sanity check that BAR is big enough */ if (pci_resource_len(pdev, bar) < offset) { - dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, - &pdev->resource[bar], (unsigned long long)offset); + dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar, + &pdev->resource[bar], &offset); return NULL; } @@ -326,15 +327,15 @@ static void __iomem *cxl_pci_map_regblock(struct cxl_mem *cxlm, return addr; } - dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n", - bar, offset); + dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n", + bar, &offset); return addr; } -static void cxl_pci_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) +static void cxl_pci_unmap_regblock(struct pci_dev *pdev, void __iomem *base) { - pci_iounmap(to_pci_dev(cxlm->dev), base); + pci_iounmap(pdev, base); } static int cxl_pci_dvsec(struct pci_dev *pdev, int dvsec) @@ -360,12 +361,12 @@ static int cxl_pci_dvsec(struct pci_dev *pdev, int dvsec) return 0; } -static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, +static int cxl_probe_regs(struct pci_dev *pdev, void __iomem *base, struct cxl_register_map *map) { struct cxl_component_reg_map *comp_map; struct cxl_device_reg_map *dev_map; - struct device *dev = cxlm->dev; + struct device *dev = &pdev->dev; switch (map->reg_type) { case CXL_REGLOC_RBI_COMPONENT: @@ -420,12 +421,13 @@ static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) return 0; } -static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, - u8 *bar, u64 *offset, u8 *reg_type) +static void cxl_decode_regblock(u32 reg_lo, u32 reg_hi, + struct cxl_register_map *map) { - *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); - *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); - *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); + map->block_offset = + ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); + map->barno = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); + map->reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); } /** @@ -462,34 +464,23 @@ static int cxl_pci_setup_regs(struct cxl_mem *cxlm) for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { u32 reg_lo, reg_hi; - u8 reg_type; - u64 offset; - u8 bar; pci_read_config_dword(pdev, regloc, ®_lo); pci_read_config_dword(pdev, regloc + 4, ®_hi); - cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, - ®_type); + map = &maps[n_maps]; + cxl_decode_regblock(reg_lo, reg_hi, map); /* Ignore unknown register block types */ - if (reg_type > CXL_REGLOC_RBI_MEMDEV) + if (map->reg_type > CXL_REGLOC_RBI_MEMDEV) continue; - base = cxl_pci_map_regblock(cxlm, bar, offset); + base = cxl_pci_map_regblock(pdev, map); if (!base) return -ENOMEM; - map = &maps[n_maps]; - map->barno = bar; - map->block_offset = offset; - map->reg_type = reg_type; - - ret = cxl_probe_regs(cxlm, base + offset, map); - - /* Always unmap the regblock regardless of probe success */ - cxl_pci_unmap_regblock(cxlm, base); - + ret = cxl_probe_regs(pdev, base + map->block_offset, map); + cxl_pci_unmap_regblock(pdev, base); if (ret) return ret; -- cgit v1.2.3 From a261e9a1576ab32966be907e73786282d52afb61 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 15 Oct 2021 14:57:27 -0700 Subject: cxl/pci: Add @base to cxl_register_map In addition to carrying @barno, @block_offset, and @reg_type, add @base to keep all map/unmap parameters in one object. The helpers cxl_{map,unmap}_regblock() handle adjusting @base to the @block_offset at map and unmap time. Document that @base incorporates @block_offset so that downstream consumers of a mapped cxl_register_map instance do not need perform any fixups / can use @base directly. Cc: Jonathan Cameron Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163433497228.889435.11271988238496181536.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/cxl.h | 10 ++++++++++ drivers/cxl/pci.c | 31 ++++++++++++++++--------------- 2 files changed, 26 insertions(+), 15 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index a6687e7fd598..5e2e93451928 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -139,7 +139,17 @@ struct cxl_device_reg_map { struct cxl_reg_map memdev; }; +/** + * struct cxl_register_map - DVSEC harvested register block mapping parameters + * @base: virtual base of the register-block-BAR + @block_offset + * @block_offset: offset to start of register block in @barno + * @reg_type: see enum cxl_regloc_type + * @barno: PCI BAR number containing the register block + * @component_map: cxl_reg_map for component registers + * @device_map: cxl_reg_maps for device registers + */ struct cxl_register_map { + void __iomem *base; u64 block_offset; u8 reg_type; u8 barno; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index eb0c2f1b9e65..7d5e5548b316 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -306,8 +306,7 @@ static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm) return 0; } -static void __iomem *cxl_pci_map_regblock(struct pci_dev *pdev, - struct cxl_register_map *map) +static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map) { void __iomem *addr; int bar = map->barno; @@ -318,24 +317,27 @@ static void __iomem *cxl_pci_map_regblock(struct pci_dev *pdev, if (pci_resource_len(pdev, bar) < offset) { dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar, &pdev->resource[bar], &offset); - return NULL; + return -ENXIO; } addr = pci_iomap(pdev, bar, 0); if (!addr) { dev_err(dev, "failed to map registers\n"); - return addr; + return -ENOMEM; } dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n", bar, &offset); - return addr; + map->base = addr + map->block_offset; + return 0; } -static void cxl_pci_unmap_regblock(struct pci_dev *pdev, void __iomem *base) +static void cxl_unmap_regblock(struct pci_dev *pdev, + struct cxl_register_map *map) { - pci_iounmap(pdev, base); + pci_iounmap(pdev, map->base - map->block_offset); + map->base = NULL; } static int cxl_pci_dvsec(struct pci_dev *pdev, int dvsec) @@ -361,12 +363,12 @@ static int cxl_pci_dvsec(struct pci_dev *pdev, int dvsec) return 0; } -static int cxl_probe_regs(struct pci_dev *pdev, void __iomem *base, - struct cxl_register_map *map) +static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) { struct cxl_component_reg_map *comp_map; struct cxl_device_reg_map *dev_map; struct device *dev = &pdev->dev; + void __iomem *base = map->base; switch (map->reg_type) { case CXL_REGLOC_RBI_COMPONENT: @@ -442,7 +444,6 @@ static void cxl_decode_regblock(u32 reg_lo, u32 reg_hi, */ static int cxl_pci_setup_regs(struct cxl_mem *cxlm) { - void __iomem *base; u32 regloc_size, regblocks; int regloc, i, n_maps, ret = 0; struct device *dev = cxlm->dev; @@ -475,12 +476,12 @@ static int cxl_pci_setup_regs(struct cxl_mem *cxlm) if (map->reg_type > CXL_REGLOC_RBI_MEMDEV) continue; - base = cxl_pci_map_regblock(pdev, map); - if (!base) - return -ENOMEM; + ret = cxl_map_regblock(pdev, map); + if (ret) + return ret; - ret = cxl_probe_regs(pdev, base + map->block_offset, map); - cxl_pci_unmap_regblock(pdev, base); + ret = cxl_probe_regs(pdev, map); + cxl_unmap_regblock(pdev, map); if (ret) return ret; -- cgit v1.2.3 From 85afc3175aeb100d72e59e3d0470ad75a0e26249 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 15 Oct 2021 16:30:42 -0700 Subject: cxl/pci: Split cxl_pci_setup_regs() In preparation for moving parts of register mapping to cxl_core, split cxl_pci_setup_regs() into a helper that finds register blocks, (cxl_find_regblock()), and a generic wrapper that probes the precise register sets within a block (cxl_setup_regs()). Move the actual mapping (cxl_map_regs()) of the only register-set that cxl_pci cares about (memory device registers) up a level from the former cxl_pci_setup_regs() into cxl_pci_probe(). With this change the unused component registers are no longer mapped, but the helpers are primed to move into the core. [djbw: drop cxl_map_regs() for component registers] Signed-off-by: Ben Widawsky [djbw: rebase on the cxl_register_map refactor] Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163434053788.914258.18412599112859205220.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 73 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 36 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 7d5e5548b316..691a4e59ad8b 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -433,72 +433,69 @@ static void cxl_decode_regblock(u32 reg_lo, u32 reg_hi, } /** - * cxl_pci_setup_regs() - Setup necessary MMIO. - * @cxlm: The CXL memory device to communicate with. + * cxl_find_regblock() - Locate register blocks by type + * @pdev: The CXL PCI device to enumerate. + * @type: Register Block Indicator id + * @map: Enumeration output, clobbered on error * - * Return: 0 if all necessary registers mapped. + * Return: 0 if register block enumerated, negative error code otherwise * - * A memory device is required by spec to implement a certain set of MMIO - * regions. The purpose of this function is to enumerate and map those - * registers. + * A CXL DVSEC may point to one or more register blocks, search for them + * by @type. */ -static int cxl_pci_setup_regs(struct cxl_mem *cxlm) +static int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, + struct cxl_register_map *map) { u32 regloc_size, regblocks; - int regloc, i, n_maps, ret = 0; - struct device *dev = cxlm->dev; - struct pci_dev *pdev = to_pci_dev(dev); - struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; + int regloc, i; regloc = cxl_pci_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); - if (!regloc) { - dev_err(dev, "register location dvsec not found\n"); + if (!regloc) return -ENXIO; - } - /* Get the size of the Register Locator DVSEC */ pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; - for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { + for (i = 0; i < regblocks; i++, regloc += 8) { u32 reg_lo, reg_hi; pci_read_config_dword(pdev, regloc, ®_lo); pci_read_config_dword(pdev, regloc + 4, ®_hi); - map = &maps[n_maps]; cxl_decode_regblock(reg_lo, reg_hi, map); - /* Ignore unknown register block types */ - if (map->reg_type > CXL_REGLOC_RBI_MEMDEV) - continue; + if (map->reg_type == type) + return 0; + } - ret = cxl_map_regblock(pdev, map); - if (ret) - return ret; + return -ENODEV; +} - ret = cxl_probe_regs(pdev, map); - cxl_unmap_regblock(pdev, map); - if (ret) - return ret; +static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, + struct cxl_register_map *map) +{ + int rc; - n_maps++; - } + rc = cxl_find_regblock(pdev, type, map); + if (rc) + return rc; - for (i = 0; i < n_maps; i++) { - ret = cxl_map_regs(cxlm, &maps[i]); - if (ret) - break; - } + rc = cxl_map_regblock(pdev, map); + if (rc) + return rc; + + rc = cxl_probe_regs(pdev, map); + cxl_unmap_regblock(pdev, map); - return ret; + return rc; } static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct cxl_register_map map; struct cxl_memdev *cxlmd; struct cxl_mem *cxlm; int rc; @@ -518,7 +515,11 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_ERR(cxlm)) return PTR_ERR(cxlm); - rc = cxl_pci_setup_regs(cxlm); + rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); + if (rc) + return rc; + + rc = cxl_map_regs(cxlm, &map); if (rc) return rc; -- cgit v1.2.3 From 55006a2c94645b4da85dea48c3752c4eb28f1711 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 9 Oct 2021 09:44:45 -0700 Subject: cxl/pci: Use pci core's DVSEC functionality Reduce maintenance burden of DVSEC query implementation by using the centralized PCI core implementation. Signed-off-by: Ben Widawsky [djbw: kill cxl_pci_dvsec()] Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163379788528.692348.11581080806976608802.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) (limited to 'drivers/cxl') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 691a4e59ad8b..c734e21fb4e0 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -340,29 +340,6 @@ static void cxl_unmap_regblock(struct pci_dev *pdev, map->base = NULL; } -static int cxl_pci_dvsec(struct pci_dev *pdev, int dvsec) -{ - int pos; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); - if (!pos) - return 0; - - while (pos) { - u16 vendor, id; - - pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); - pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); - if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) - return pos; - - pos = pci_find_next_ext_capability(pdev, pos, - PCI_EXT_CAP_ID_DVSEC); - } - - return 0; -} - static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) { struct cxl_component_reg_map *comp_map; @@ -449,7 +426,8 @@ static int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, u32 regloc_size, regblocks; int regloc, i; - regloc = cxl_pci_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); + regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL, + PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); if (!regloc) return -ENXIO; -- cgit v1.2.3