diff options
author | Linus Torvalds | 2020-03-31 16:50:25 -0700 |
---|---|---|
committer | Linus Torvalds | 2020-03-31 16:50:25 -0700 |
commit | 56a451b780676bc1cdac011735fe2869fa2e9abf (patch) | |
tree | 7de92d7978a211028632eb0b116aff4f5d0abdb6 | |
parent | dba43fc4ba2fed63e898867fa973c69c37623939 (diff) | |
parent | b350f0a3eb264962caefeb892af56c1b727ee03f (diff) |
Merge tag 'ntb-5.7' of git://github.com/jonmason/ntb
Pull NTB updates from Jon Mason:
"Bug fixes for a few printing issues, link status detection bug on AMD
hardware, and a DMA address issue with ntb_perf.
Also, large series of AMD NTB patches"
* tag 'ntb-5.7' of git://github.com/jonmason/ntb: (21 commits)
NTB: add pci shutdown handler for AMD NTB
NTB: send DB event when driver is loaded or un-loaded
NTB: remove redundant setting of DB valid mask
NTB: return link up status correctly for PRI and SEC
NTB: add helper functions to set and clear sideinfo
NTB: move ntb_ctrl handling to init and deinit
NTB: handle link up, D0 and D3 events correctly
NTB: handle link down event correctly
NTB: remove handling of peer_sta from amd_link_is_up
NTB: set peer_sta within event handler itself
NTB: return the side info status from amd_poll_link
NTB: define a new function to get link status
NTB: Enable link up and down event notification
NTB: clear interrupt status register
NTB: Fix access to link status and control register
MAINTAINERS: update maintainer list for AMD NTB driver
NTB: ntb_transport: Use scnprintf() for avoiding potential buffer overflow
ntb_hw_switchtec: Fix ntb_mw_clear_trans error if size == 0
ntb_tool: Fix printk format
NTB: ntb_perf: Fix address err in perf_copy_chunk
...
-rw-r--r-- | MAINTAINERS | 1 | ||||
-rw-r--r-- | drivers/ntb/hw/amd/ntb_hw_amd.c | 290 | ||||
-rw-r--r-- | drivers/ntb/hw/amd/ntb_hw_amd.h | 8 | ||||
-rw-r--r-- | drivers/ntb/hw/mscc/ntb_hw_switchtec.c | 2 | ||||
-rw-r--r-- | drivers/ntb/ntb_transport.c | 58 | ||||
-rw-r--r-- | drivers/ntb/test/ntb_perf.c | 57 | ||||
-rw-r--r-- | drivers/ntb/test/ntb_tool.c | 14 |
7 files changed, 332 insertions, 98 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 1b718c05ad3e..a2b9bbf6e8b2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11896,6 +11896,7 @@ F: scripts/nsdeps F: Documentation/core-api/symbol-namespaces.rst NTB AMD DRIVER +M: Sanjay R Mehta <sanju.mehta@amd.com> M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> L: linux-ntb@googlegroups.com S: Supported diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index e52b300b2f5b..9e310e1ad4d0 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -195,26 +195,117 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, return 0; } -static int amd_link_is_up(struct amd_ntb_dev *ndev) +static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev) { - if (!ndev->peer_sta) - return NTB_LNK_STA_ACTIVE(ndev->cntl_sta); + struct pci_dev *pdev = NULL; + struct pci_dev *pci_swds = NULL; + struct pci_dev *pci_swus = NULL; + u32 stat; + int rc; - if (ndev->peer_sta & AMD_LINK_UP_EVENT) { - ndev->peer_sta = 0; - return 1; + if (ndev->ntb.topo == NTB_TOPO_SEC) { + /* Locate the pointer to Downstream Switch for this device */ + pci_swds = pci_upstream_bridge(ndev->ntb.pdev); + if (pci_swds) { + /* + * Locate the pointer to Upstream Switch for + * the Downstream Switch. + */ + pci_swus = pci_upstream_bridge(pci_swds); + if (pci_swus) { + rc = pcie_capability_read_dword(pci_swus, + PCI_EXP_LNKCTL, + &stat); + if (rc) + return 0; + } else { + return 0; + } + } else { + return 0; + } + } else if (ndev->ntb.topo == NTB_TOPO_PRI) { + /* + * For NTB primary, we simply read the Link Status and control + * register of the NTB device itself. + */ + pdev = ndev->ntb.pdev; + rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat); + if (rc) + return 0; + } else { + /* Catch all for everything else */ + return 0; } - /* If peer_sta is reset or D0 event, the ISR has - * started a timer to check link status of hardware. - * So here just clear status bit. And if peer_sta is - * D3 or PME_TO, D0/reset event will be happened when - * system wakeup/poweron, so do nothing here. + ndev->lnk_sta = stat; + + return 1; +} + +static int amd_link_is_up(struct amd_ntb_dev *ndev) +{ + int ret; + + /* + * We consider the link to be up under two conditions: + * + * - When a link-up event is received. This is indicated by + * AMD_LINK_UP_EVENT set in peer_sta. + * - When driver on both sides of the link have been loaded. + * This is indicated by bit 1 being set in the peer + * SIDEINFO register. + * + * This function should return 1 when the latter of the above + * two conditions is true. + * + * Now consider the sequence of events - Link-Up event occurs, + * then the peer side driver loads. In this case, we would have + * received LINK_UP event and bit 1 of peer SIDEINFO is also + * set. What happens now if the link goes down? Bit 1 of + * peer SIDEINFO remains set, but LINK_DOWN bit is set in + * peer_sta. So we should return 0 from this function. Not only + * that, we clear bit 1 of peer SIDEINFO to 0, since the peer + * side driver did not even get a chance to clear it before + * the link went down. This can be the case of surprise link + * removal. + * + * LINK_UP event will always occur before the peer side driver + * gets loaded the very first time. So there can be a case when + * the LINK_UP event has occurred, but the peer side driver hasn't + * yet loaded. We return 0 in that case. + * + * There is also a special case when the primary side driver is + * unloaded and then loaded again. Since there is no change in + * the status of NTB secondary in this case, there is no Link-Up + * or Link-Down notification received. We recognize this condition + * with peer_sta being set to 0. + * + * If bit 1 of peer SIDEINFO register is not set, then we + * simply return 0 irrespective of the link up or down status + * set in peer_sta. */ - if (ndev->peer_sta & AMD_PEER_RESET_EVENT) - ndev->peer_sta &= ~AMD_PEER_RESET_EVENT; - else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT)) - ndev->peer_sta = 0; + ret = amd_poll_link(ndev); + if (ret) { + /* + * We need to check the below only for NTB primary. For NTB + * secondary, simply checking the result of PSIDE_INFO + * register will suffice. + */ + if (ndev->ntb.topo == NTB_TOPO_PRI) { + if ((ndev->peer_sta & AMD_LINK_UP_EVENT) || + (ndev->peer_sta == 0)) + return ret; + else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) { + /* Clear peer sideinfo register */ + amd_clear_side_info_reg(ndev, true); + + return 0; + } + } else { /* NTB_TOPO_SEC */ + return ret; + } + } return 0; } @@ -253,7 +344,6 @@ static int amd_ntb_link_enable(struct ntb_dev *ntb, { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; - u32 ntb_ctl; /* Enable event interrupt */ ndev->int_mask &= ~AMD_EVENT_INTMASK; @@ -263,10 +353,6 @@ static int amd_ntb_link_enable(struct ntb_dev *ntb, return -EINVAL; dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); - ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); - ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); - writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); - return 0; } @@ -274,7 +360,6 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; - u32 ntb_ctl; /* Disable event interrupt */ ndev->int_mask |= AMD_EVENT_INTMASK; @@ -284,10 +369,6 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb) return -EINVAL; dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); - ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); - ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); - writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); - return 0; } @@ -493,8 +574,6 @@ static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit) reg = readl(mmio + AMD_SMUACK_OFFSET); reg |= bit; writel(reg, mmio + AMD_SMUACK_OFFSET); - - ndev->peer_sta |= bit; } static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) @@ -512,10 +591,16 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) status &= AMD_EVENT_INTMASK; switch (status) { case AMD_PEER_FLUSH_EVENT: + ndev->peer_sta |= AMD_PEER_FLUSH_EVENT; dev_info(dev, "Flush is done.\n"); break; case AMD_PEER_RESET_EVENT: - amd_ack_smu(ndev, AMD_PEER_RESET_EVENT); + case AMD_LINK_DOWN_EVENT: + ndev->peer_sta |= status; + if (status == AMD_LINK_DOWN_EVENT) + ndev->peer_sta &= ~AMD_LINK_UP_EVENT; + + amd_ack_smu(ndev, status); /* link down first */ ntb_link_event(&ndev->ntb); @@ -526,7 +611,12 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) case AMD_PEER_D3_EVENT: case AMD_PEER_PMETO_EVENT: case AMD_LINK_UP_EVENT: - case AMD_LINK_DOWN_EVENT: + ndev->peer_sta |= status; + if (status == AMD_LINK_UP_EVENT) + ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT; + else if (status == AMD_PEER_D3_EVENT) + ndev->peer_sta &= ~AMD_PEER_D0_EVENT; + amd_ack_smu(ndev, status); /* link down */ @@ -540,6 +630,8 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) if (status & 0x1) dev_info(dev, "Wakeup is done.\n"); + ndev->peer_sta |= AMD_PEER_D0_EVENT; + ndev->peer_sta &= ~AMD_PEER_D3_EVENT; amd_ack_smu(ndev, AMD_PEER_D0_EVENT); /* start a timer to poll link status */ @@ -550,6 +642,39 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) dev_info(dev, "event status = 0x%x.\n", status); break; } + + /* Clear the interrupt status */ + writel(status, mmio + AMD_INTSTAT_OFFSET); +} + +static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec) +{ + struct device *dev = &ndev->ntb.pdev->dev; + u64 status; + + status = amd_ntb_db_read(&ndev->ntb); + + dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec); + + /* + * Since we had reserved highest order bit of DB for signaling peer of + * a special event, this is the only status bit we should be concerned + * here now. + */ + if (status & BIT(ndev->db_last_bit)) { + ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit)); + /* send link down event notification */ + ntb_link_event(&ndev->ntb); + + /* + * If we are here, that means the peer has signalled a special + * event which notifies that the peer driver has been + * un-loaded for some reason. Since there is a chance that the + * peer will load its driver again sometime, we schedule link + * polling routine. + */ + schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); + } } static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec) @@ -559,8 +684,10 @@ static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec) if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1)) amd_handle_event(ndev, vec); - if (vec < AMD_DB_CNT) + if (vec < AMD_DB_CNT) { + amd_handle_db_event(ndev, vec); ntb_db_event(&ndev->ntb, vec); + } return IRQ_HANDLED; } @@ -842,26 +969,18 @@ static inline void ndev_init_struct(struct amd_ntb_dev *ndev, static int amd_poll_link(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->peer_mmio; - u32 reg, stat; - int rc; + u32 reg; reg = readl(mmio + AMD_SIDEINFO_OFFSET); - reg &= NTB_LIN_STA_ACTIVE_BIT; + reg &= AMD_SIDE_READY; dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg); - if (reg == ndev->cntl_sta) - return 0; - ndev->cntl_sta = reg; - rc = pci_read_config_dword(ndev->ntb.pdev, - AMD_LINK_STATUS_OFFSET, &stat); - if (rc) - return 0; - ndev->lnk_sta = stat; + amd_ntb_get_link_status(ndev); - return 1; + return ndev->cntl_sta; } static void amd_link_hb(struct work_struct *work) @@ -880,11 +999,16 @@ static int amd_init_isr(struct amd_ntb_dev *ndev) return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT); } -static void amd_init_side_info(struct amd_ntb_dev *ndev) +static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer) { - void __iomem *mmio = ndev->self_mmio; + void __iomem *mmio = NULL; unsigned int reg; + if (peer) + mmio = ndev->peer_mmio; + else + mmio = ndev->self_mmio; + reg = readl(mmio + AMD_SIDEINFO_OFFSET); if (!(reg & AMD_SIDE_READY)) { reg |= AMD_SIDE_READY; @@ -892,11 +1016,16 @@ static void amd_init_side_info(struct amd_ntb_dev *ndev) } } -static void amd_deinit_side_info(struct amd_ntb_dev *ndev) +static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer) { - void __iomem *mmio = ndev->self_mmio; + void __iomem *mmio = NULL; unsigned int reg; + if (peer) + mmio = ndev->peer_mmio; + else + mmio = ndev->self_mmio; + reg = readl(mmio + AMD_SIDEINFO_OFFSET); if (reg & AMD_SIDE_READY) { reg &= ~AMD_SIDE_READY; @@ -905,6 +1034,30 @@ static void amd_deinit_side_info(struct amd_ntb_dev *ndev) } } +static void amd_init_side_info(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + u32 ntb_ctl; + + amd_set_side_info_reg(ndev, false); + + ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); + ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); + writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); +} + +static void amd_deinit_side_info(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + u32 ntb_ctl; + + amd_clear_side_info_reg(ndev, false); + + ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); + ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); + writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); +} + static int amd_init_ntb(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->self_mmio; @@ -935,8 +1088,6 @@ static int amd_init_ntb(struct amd_ntb_dev *ndev) return -EINVAL; } - ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; - /* Mask event interrupts */ writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); @@ -957,6 +1108,7 @@ static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev) static int amd_init_dev(struct amd_ntb_dev *ndev) { + void __iomem *mmio = ndev->self_mmio; struct pci_dev *pdev; int rc = 0; @@ -977,6 +1129,25 @@ static int amd_init_dev(struct amd_ntb_dev *ndev) } ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + /* + * We reserve the highest order bit of the DB register which will + * be used to notify peer when the driver on this side is being + * un-loaded. + */ + ndev->db_last_bit = + find_last_bit((unsigned long *)&ndev->db_valid_mask, + hweight64(ndev->db_valid_mask)); + writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET); + /* + * Since now there is one less bit to account for, the DB count + * and DB mask should be adjusted accordingly. + */ + ndev->db_count -= 1; + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + + /* Enable Link-Up and Link-Down event interrupts */ + ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT); + writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); return 0; } @@ -1111,9 +1282,31 @@ static void amd_ntb_pci_remove(struct pci_dev *pdev) { struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); + /* + * Clear the READY bit in SIDEINFO register before sending DB event + * to the peer. This will make sure that when the peer handles the + * DB event, it correctly reads this bit as being 0. + */ + amd_deinit_side_info(ndev); + ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); ntb_unregister_device(&ndev->ntb); ndev_deinit_debugfs(ndev); + amd_deinit_dev(ndev); + amd_ntb_deinit_pci(ndev); + kfree(ndev); +} + +static void amd_ntb_pci_shutdown(struct pci_dev *pdev) +{ + struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); + + /* Send link down notification */ + ntb_link_event(&ndev->ntb); + amd_deinit_side_info(ndev); + ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); + ntb_unregister_device(&ndev->ntb); + ndev_deinit_debugfs(ndev); amd_deinit_dev(ndev); amd_ntb_deinit_pci(ndev); kfree(ndev); @@ -1149,6 +1342,7 @@ static struct pci_driver amd_ntb_pci_driver = { .id_table = amd_ntb_pci_tbl, .probe = amd_ntb_pci_probe, .remove = amd_ntb_pci_remove, + .shutdown = amd_ntb_pci_shutdown, }; static int __init amd_ntb_pci_driver_init(void) diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h index 139a307147bc..5f337b1572a0 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.h +++ b/drivers/ntb/hw/amd/ntb_hw_amd.h @@ -53,11 +53,8 @@ #include <linux/pci.h> #define AMD_LINK_HB_TIMEOUT msecs_to_jiffies(1000) -#define AMD_LINK_STATUS_OFFSET 0x68 -#define NTB_LIN_STA_ACTIVE_BIT 0x00000002 #define NTB_LNK_STA_SPEED_MASK 0x000F0000 #define NTB_LNK_STA_WIDTH_MASK 0x03F00000 -#define NTB_LNK_STA_ACTIVE(x) (!!((x) & NTB_LIN_STA_ACTIVE_BIT)) #define NTB_LNK_STA_SPEED(x) (((x) & NTB_LNK_STA_SPEED_MASK) >> 16) #define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 20) @@ -196,6 +193,7 @@ struct amd_ntb_dev { u64 db_valid_mask; u64 db_mask; + u64 db_last_bit; u32 int_mask; struct msix_entry *msix; @@ -218,4 +216,8 @@ struct amd_ntb_dev { #define ntb_ndev(__ntb) container_of(__ntb, struct amd_ntb_dev, ntb) #define hb_ndev(__work) container_of(__work, struct amd_ntb_dev, hb_timer.work) +static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer); +static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer); +static int amd_poll_link(struct amd_ntb_dev *ndev); + #endif diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 86ffa716eaf2..4c6eb61a6ac6 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -285,7 +285,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, if (widx >= switchtec_ntb_mw_count(ntb, pidx)) return -EINVAL; - if (xlate_pos < 12) + if (size != 0 && xlate_pos < 12) return -EINVAL; if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 00a5d5764993..e6d1f5b298f3 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -481,70 +481,70 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, return -ENOMEM; out_offset = 0; - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "\nNTB QP stats:\n\n"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_bytes - \t%llu\n", qp->rx_bytes); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_pkts - \t%llu\n", qp->rx_pkts); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_memcpy - \t%llu\n", qp->rx_memcpy); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_async - \t%llu\n", qp->rx_async); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_ring_empty - %llu\n", qp->rx_ring_empty); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_err_ver - \t%llu\n", qp->rx_err_ver); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_buff - \t0x%p\n", qp->rx_buff); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_index - \t%u\n", qp->rx_index); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_max_entry - \t%u\n", qp->rx_max_entry); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_bytes - \t%llu\n", qp->tx_bytes); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_pkts - \t%llu\n", qp->tx_pkts); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_memcpy - \t%llu\n", qp->tx_memcpy); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_async - \t%llu\n", qp->tx_async); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_ring_full - \t%llu\n", qp->tx_ring_full); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_mw - \t0x%p\n", qp->tx_mw); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_index (H) - \t%u\n", qp->tx_index); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "RRI (T) - \t%u\n", qp->remote_rx_info->entry); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_max_entry - \t%u\n", qp->tx_max_entry); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "free tx - \t%u\n", ntb_transport_tx_free_entry(qp)); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "\n"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Using TX DMA - \t%s\n", qp->tx_dma_chan ? "Yes" : "No"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Using RX DMA - \t%s\n", qp->rx_dma_chan ? "Yes" : "No"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "QP Link - \t%s\n", qp->link_is_up ? "Up" : "Down"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "\n"); if (out_offset > out_count) diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index e9b7c2dfc730..972f6d984f6d 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -149,7 +149,8 @@ struct perf_peer { u64 outbuf_xlat; resource_size_t outbuf_size; void __iomem *outbuf; - + phys_addr_t out_phys_addr; + dma_addr_t dma_dst_addr; /* Inbound MW params */ dma_addr_t inbuf_xlat; resource_size_t inbuf_size; @@ -782,6 +783,10 @@ static int perf_copy_chunk(struct perf_thread *pthr, struct dmaengine_unmap_data *unmap; struct device *dma_dev; int try = 0, ret = 0; + struct perf_peer *peer = pthr->perf->test_peer; + void __iomem *vbase; + void __iomem *dst_vaddr; + dma_addr_t dst_dma_addr; if (!use_dma) { memcpy_toio(dst, src, len); @@ -794,6 +799,10 @@ static int perf_copy_chunk(struct perf_thread *pthr, offset_in_page(dst), len)) return -EIO; + vbase = peer->outbuf; + dst_vaddr = dst; + dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase); + unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT); if (!unmap) return -ENOMEM; @@ -807,8 +816,7 @@ static int perf_copy_chunk(struct perf_thread *pthr, } unmap->to_cnt = 1; - unmap->addr[1] = dma_map_page(dma_dev, virt_to_page(dst), - offset_in_page(dst), len, DMA_FROM_DEVICE); + unmap->addr[1] = dst_dma_addr; if (dma_mapping_error(dma_dev, unmap->addr[1])) { ret = -EIO; goto err_free_resource; @@ -865,6 +873,7 @@ static int perf_init_test(struct perf_thread *pthr) { struct perf_ctx *perf = pthr->perf; dma_cap_mask_t dma_mask; + struct perf_peer *peer = pthr->perf->test_peer; pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL, dev_to_node(&perf->ntb->dev)); @@ -882,15 +891,33 @@ static int perf_init_test(struct perf_thread *pthr) if (!pthr->dma_chan) { dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n", pthr->tidx); - atomic_dec(&perf->tsync); - wake_up(&perf->twait); - kfree(pthr->src); - return -ENODEV; + goto err_free; } + peer->dma_dst_addr = + dma_map_resource(pthr->dma_chan->device->dev, + peer->out_phys_addr, peer->outbuf_size, + DMA_FROM_DEVICE, 0); + if (dma_mapping_error(pthr->dma_chan->device->dev, + peer->dma_dst_addr)) { + dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n", + pthr->tidx); + peer->dma_dst_addr = 0; + dma_release_channel(pthr->dma_chan); + goto err_free; + } + dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n", + pthr->tidx, + &peer->out_phys_addr, + &peer->dma_dst_addr); atomic_set(&pthr->dma_sync, 0); - return 0; + +err_free: + atomic_dec(&perf->tsync); + wake_up(&perf->twait); + kfree(pthr->src); + return -ENODEV; } static int perf_run_test(struct perf_thread *pthr) @@ -978,8 +1005,13 @@ static void perf_clear_test(struct perf_thread *pthr) * We call it anyway just to be sure of the transfers completion. */ (void)dmaengine_terminate_sync(pthr->dma_chan); - - dma_release_channel(pthr->dma_chan); + if (pthr->perf->test_peer->dma_dst_addr) + dma_unmap_resource(pthr->dma_chan->device->dev, + pthr->perf->test_peer->dma_dst_addr, + pthr->perf->test_peer->outbuf_size, + DMA_FROM_DEVICE, 0); + if (pthr->dma_chan) + dma_release_channel(pthr->dma_chan); no_dma_notify: atomic_dec(&perf->tsync); @@ -1195,6 +1227,9 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf, "\tOut buffer addr 0x%pK\n", peer->outbuf); pos += scnprintf(buf + pos, buf_size - pos, + "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr); + + pos += scnprintf(buf + pos, buf_size - pos, "\tOut buffer size %pa\n", &peer->outbuf_size); pos += scnprintf(buf + pos, buf_size - pos, @@ -1388,6 +1423,8 @@ static int perf_setup_peer_mw(struct perf_peer *peer) if (!peer->outbuf) return -ENOMEM; + peer->out_phys_addr = phys_addr; + if (max_mw_size && peer->outbuf_size > max_mw_size) { peer->outbuf_size = max_mw_size; dev_warn(&peer->perf->ntb->dev, diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index d592c0ffbd19..69da758fe64c 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -678,19 +678,19 @@ static ssize_t tool_mw_trans_read(struct file *filep, char __user *ubuf, &inmw->dma_base); off += scnprintf(buf + off, buf_size - off, - "Window Size \t%pa[p]\n", + "Window Size \t%pap\n", &inmw->size); off += scnprintf(buf + off, buf_size - off, - "Alignment \t%pa[p]\n", + "Alignment \t%pap\n", &addr_align); off += scnprintf(buf + off, buf_size - off, - "Size Alignment \t%pa[p]\n", + "Size Alignment \t%pap\n", &size_align); off += scnprintf(buf + off, buf_size - off, - "Size Max \t%pa[p]\n", + "Size Max \t%pap\n", &size_max); ret = simple_read_from_buffer(ubuf, size, offp, buf, off); @@ -907,16 +907,16 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep, char __user *ubuf, "Virtual address \t0x%pK\n", outmw->io_base); off += scnprintf(buf + off, buf_size - off, - "Phys Address \t%pa[p]\n", &map_base); + "Phys Address \t%pap\n", &map_base); off += scnprintf(buf + off, buf_size - off, - "Mapping Size \t%pa[p]\n", &map_size); + "Mapping Size \t%pap\n", &map_size); off += scnprintf(buf + off, buf_size - off, "Translation Address \t0x%016llx\n", outmw->tr_base); off += scnprintf(buf + off, buf_size - off, - "Window Size \t%pa[p]\n", &outmw->size); + "Window Size \t%pap\n", &outmw->size); ret = simple_read_from_buffer(ubuf, size, offp, buf, off); kfree(buf); |