diff options
author | Tom Rini | 2019-04-12 12:22:43 -0400 |
---|---|---|
committer | Tom Rini | 2019-04-12 12:22:43 -0400 |
commit | 40a9546c7b6217a78a3a010a0142529a837e46b6 (patch) | |
tree | 87ef70a898d33b0f2b9194f3f96f1283a6325c16 /drivers | |
parent | 02f173ca156cee8526dff87603d5e446b443cde3 (diff) | |
parent | d2c05f50e12f87128597a28146de7092aaa847c3 (diff) |
Merge branch '2019-04-11-ti-master-imports'
- Improve Keystone 3 SoC support (DMA, TI SCI)
- Improve Keystone 2 SoC support (PHY fixes on various platforms)
- Improve am335x families (new platforms, more boot mode options in SPL
via DM).
- General DaVinci, OMAP5 fixes.
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/ti/Kconfig | 14 | ||||
-rw-r--r-- | drivers/dma/ti/Makefile | 3 | ||||
-rw-r--r-- | drivers/dma/ti/k3-udma-hwdef.h | 184 | ||||
-rw-r--r-- | drivers/dma/ti/k3-udma.c | 1730 | ||||
-rw-r--r-- | drivers/firmware/ti_sci.c | 813 | ||||
-rw-r--r-- | drivers/firmware/ti_sci.h | 661 | ||||
-rw-r--r-- | drivers/mmc/omap_hsmmc.c | 21 | ||||
-rw-r--r-- | drivers/net/ti/cpsw-common.c | 127 | ||||
-rw-r--r-- | drivers/net/ti/cpsw.c | 202 | ||||
-rw-r--r-- | drivers/net/ti/keystone_net.c | 22 | ||||
-rw-r--r-- | drivers/power/regulator/pbias_regulator.c | 2 | ||||
-rw-r--r-- | drivers/soc/Kconfig | 5 | ||||
-rw-r--r-- | drivers/soc/Makefile | 2 | ||||
-rw-r--r-- | drivers/soc/keystone/Makefile | 3 | ||||
-rw-r--r-- | drivers/soc/ti/Kconfig | 26 | ||||
-rw-r--r-- | drivers/soc/ti/Makefile | 4 | ||||
-rw-r--r-- | drivers/soc/ti/k3-navss-ringacc.c | 1057 | ||||
-rw-r--r-- | drivers/soc/ti/keystone_serdes.c (renamed from drivers/soc/keystone/keystone_serdes.c) | 0 |
21 files changed, 4676 insertions, 206 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index f24351ac4f1..e6702eced46 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -98,6 +98,8 @@ source "drivers/smem/Kconfig" source "drivers/sound/Kconfig" +source "drivers/soc/Kconfig" + source "drivers/spi/Kconfig" source "drivers/spmi/Kconfig" diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1820676d7a1..4f37ba7d35e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -57,4 +57,6 @@ config APBH_DMA_BURST8 endif +source "drivers/dma/ti/Kconfig" + endmenu # menu "DMA Support" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b5f9147e0a5..afab324461b 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_SANDBOX_DMA) += sandbox-dma-test.o obj-$(CONFIG_TI_KSNAV) += keystone_nav.o keystone_nav_cfg.o obj-$(CONFIG_TI_EDMA3) += ti-edma3.o obj-$(CONFIG_DMA_LPC32XX) += lpc32xx_dma.o + +obj-y += ti/ diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig new file mode 100644 index 00000000000..3d5498326c4 --- /dev/null +++ b/drivers/dma/ti/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0+ + +if ARCH_K3 + +config TI_K3_NAVSS_UDMA + bool "Texas Instruments UDMA" + depends on ARCH_K3 + select DMA + select TI_K3_NAVSS_RINGACC + select TI_K3_NAVSS_PSILCFG + default n + help + Support for UDMA used in K3 devices. +endif diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile new file mode 100644 index 00000000000..de2f9ac91a4 --- /dev/null +++ b/drivers/dma/ti/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0+ + +obj-$(CONFIG_TI_K3_NAVSS_UDMA) += k3-udma.o diff --git a/drivers/dma/ti/k3-udma-hwdef.h b/drivers/dma/ti/k3-udma-hwdef.h new file mode 100644 index 00000000000..c88399a815e --- /dev/null +++ b/drivers/dma/ti/k3-udma-hwdef.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef K3_NAVSS_UDMA_HWDEF_H_ +#define K3_NAVSS_UDMA_HWDEF_H_ + +#define UDMA_PSIL_DST_THREAD_ID_OFFSET 0x8000 + +/* Global registers */ +#define UDMA_REV_REG 0x0 +#define UDMA_PERF_CTL_REG 0x4 +#define UDMA_EMU_CTL_REG 0x8 +#define UDMA_PSIL_TO_REG 0x10 +#define UDMA_UTC_CTL_REG 0x1c +#define UDMA_CAP_REG(i) (0x20 + (i * 4)) +#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80 +#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88 + +/* RX Flow regs */ +#define UDMA_RFLOW_RFA_REG 0x0 +#define UDMA_RFLOW_RFB_REG 0x4 +#define UDMA_RFLOW_RFC_REG 0x8 +#define UDMA_RFLOW_RFD_REG 0xc +#define UDMA_RFLOW_RFE_REG 0x10 +#define UDMA_RFLOW_RFF_REG 0x14 +#define UDMA_RFLOW_RFG_REG 0x18 +#define UDMA_RFLOW_RFH_REG 0x1c + +#define UDMA_RFLOW_REG(x) (UDMA_RFLOW_RF##x##_REG) + +/* TX chan regs */ +#define UDMA_TCHAN_TCFG_REG 0x0 +#define UDMA_TCHAN_TCREDIT_REG 0x4 +#define UDMA_TCHAN_TCQ_REG 0x14 +#define UDMA_TCHAN_TOES_REG(i) (0x20 + (i) * 4) +#define UDMA_TCHAN_TEOES_REG 0x60 +#define UDMA_TCHAN_TPRI_CTRL_REG 0x64 +#define UDMA_TCHAN_THREAD_ID_REG 0x68 +#define UDMA_TCHAN_TFIFO_DEPTH_REG 0x70 +#define UDMA_TCHAN_TST_SCHED_REG 0x80 + +/* RX chan regs */ +#define UDMA_RCHAN_RCFG_REG 0x0 +#define UDMA_RCHAN_RCQ_REG 0x14 +#define UDMA_RCHAN_ROES_REG(i) (0x20 + (i) * 4) +#define UDMA_RCHAN_REOES_REG 0x60 +#define UDMA_RCHAN_RPRI_CTRL_REG 0x64 +#define UDMA_RCHAN_THREAD_ID_REG 0x68 +#define UDMA_RCHAN_RST_SCHED_REG 0x80 +#define UDMA_RCHAN_RFLOW_RNG_REG 0xf0 + +/* TX chan RT regs */ +#define UDMA_TCHAN_RT_CTL_REG 0x0 +#define UDMA_TCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_TCHAN_RT_STDATA_REG 0x80 + +#define UDMA_TCHAN_RT_PEERn_REG(i) (0x200 + (i * 0x4)) +#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_TCHAN_RT_PEERn_REG(0) /* PSI-L: 0x400 */ +#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_TCHAN_RT_PEERn_REG(1) /* PSI-L: 0x401 */ +#define UDMA_TCHAN_RT_PEER_BCNT_REG \ + UDMA_TCHAN_RT_PEERn_REG(4) /* PSI-L: 0x404 */ +#define UDMA_TCHAN_RT_PEER_RT_EN_REG \ + UDMA_TCHAN_RT_PEERn_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_TCHAN_RT_PCNT_REG 0x400 +#define UDMA_TCHAN_RT_BCNT_REG 0x408 +#define UDMA_TCHAN_RT_SBCNT_REG 0x410 + +/* RX chan RT regs */ +#define UDMA_RCHAN_RT_CTL_REG 0x0 +#define UDMA_RCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_RCHAN_RT_STDATA_REG 0x80 + +#define UDMA_RCHAN_RT_PEERn_REG(i) (0x200 + (i * 0x4)) +#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_RCHAN_RT_PEERn_REG(0) /* PSI-L: 0x400 */ +#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_RCHAN_RT_PEERn_REG(1) /* PSI-L: 0x401 */ +#define UDMA_RCHAN_RT_PEER_BCNT_REG \ + UDMA_RCHAN_RT_PEERn_REG(4) /* PSI-L: 0x404 */ +#define UDMA_RCHAN_RT_PEER_RT_EN_REG \ + UDMA_RCHAN_RT_PEERn_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_RCHAN_RT_PCNT_REG 0x400 +#define UDMA_RCHAN_RT_BCNT_REG 0x408 +#define UDMA_RCHAN_RT_SBCNT_REG 0x410 + +/* UDMA_TCHAN_TCFG_REG/UDMA_RCHAN_RCFG_REG */ +#define UDMA_CHAN_CFG_PAUSE_ON_ERR BIT(31) +#define UDMA_TCHAN_CFG_FILT_EINFO BIT(30) +#define UDMA_TCHAN_CFG_FILT_PSWORDS BIT(29) +#define UDMA_CHAN_CFG_ATYPE_MASK GENMASK(25, 24) +#define UDMA_CHAN_CFG_ATYPE_SHIFT 24 +#define UDMA_CHAN_CFG_CHAN_TYPE_MASK GENMASK(19, 16) +#define UDMA_CHAN_CFG_CHAN_TYPE_SHIFT 16 +/* + * PBVR - using pass by value rings + * PBRR - using pass by reference rings + * 3RDP - Third Party DMA + * BC - Block Copy + * SB - single buffer packet mode enabled + */ +#define UDMA_CHAN_CFG_CHAN_TYPE_PACKET_PBRR \ + (2 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_CHAN_CFG_CHAN_TYPE_PACKET_SB_PBRR \ + (3 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_CHAN_CFG_CHAN_TYPE_3RDP_PBRR \ + (10 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_CHAN_CFG_CHAN_TYPE_3RDP_PBVR \ + (11 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_CHAN_CFG_CHAN_TYPE_3RDP_BC_PBRR \ + (12 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_RCHAN_CFG_IGNORE_SHORT BIT(15) +#define UDMA_RCHAN_CFG_IGNORE_LONG BIT(14) +#define UDMA_TCHAN_CFG_SUPR_TDPKT BIT(8) +#define UDMA_CHAN_CFG_FETCH_SIZE_MASK GENMASK(6, 0) +#define UDMA_CHAN_CFG_FETCH_SIZE_SHIFT 0 + +/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */ +#define UDMA_CHAN_RT_CTL_EN BIT(31) +#define UDMA_CHAN_RT_CTL_TDOWN BIT(30) +#define UDMA_CHAN_RT_CTL_PAUSE BIT(29) +#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28) +#define UDMA_CHAN_RT_CTL_ERROR BIT(0) + +/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */ +#define UDMA_PEER_RT_EN_ENABLE BIT(31) +#define UDMA_PEER_RT_EN_TEARDOWN BIT(30) +#define UDMA_PEER_RT_EN_PAUSE BIT(29) +#define UDMA_PEER_RT_EN_FLUSH BIT(28) +#define UDMA_PEER_RT_EN_IDLE BIT(1) + +/* RX Flow reg RFA */ +#define UDMA_RFLOW_RFA_EINFO BIT(30) +#define UDMA_RFLOW_RFA_PSINFO BIT(29) +#define UDMA_RFLOW_RFA_ERR_HANDLING BIT(28) +#define UDMA_RFLOW_RFA_DESC_TYPE_MASK GENMASK(27, 26) +#define UDMA_RFLOW_RFA_DESC_TYPE_SHIFT 26 +#define UDMA_RFLOW_RFA_PS_LOC BIT(25) +#define UDMA_RFLOW_RFA_SOP_OFF_MASK GENMASK(24, 16) +#define UDMA_RFLOW_RFA_SOP_OFF_SHIFT 16 +#define UDMA_RFLOW_RFA_DEST_QNUM_MASK GENMASK(15, 0) +#define UDMA_RFLOW_RFA_DEST_QNUM_SHIFT 0 + +/* RX Flow reg RFC */ +#define UDMA_RFLOW_RFC_SRC_TAG_HI_SEL_SHIFT 28 +#define UDMA_RFLOW_RFC_SRC_TAG_LO_SEL_SHIFT 24 +#define UDMA_RFLOW_RFC_DST_TAG_HI_SEL_SHIFT 20 +#define UDMA_RFLOW_RFC_DST_TAG_LO_SE_SHIFT 16 + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG + */ +#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24) +#define PDMA_STATIC_TR_X_SHIFT (24) +#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0) +#define PDMA_STATIC_TR_Y_SHIFT (0) + +#define PDMA_STATIC_TR_Y(x) \ + (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK) +#define PDMA_STATIC_TR_X(x) \ + (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG + */ +#define PDMA_STATIC_TR_Z_MASK GENMASK(11, 0) +#define PDMA_STATIC_TR_Z_SHIFT (0) +#define PDMA_STATIC_TR_Z(x) \ + (((x) << PDMA_STATIC_TR_Z_SHIFT) & PDMA_STATIC_TR_Z_MASK) + +#endif /* K3_NAVSS_UDMA_HWDEF_H_ */ diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c new file mode 100644 index 00000000000..f78a01aa8f8 --- /dev/null +++ b/drivers/dma/ti/k3-udma.c @@ -0,0 +1,1730 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ +#define pr_fmt(fmt) "udma: " fmt + +#include <common.h> +#include <asm/io.h> +#include <asm/bitops.h> +#include <malloc.h> +#include <asm/dma-mapping.h> +#include <dm.h> +#include <dm/read.h> +#include <dm/of_access.h> +#include <dma.h> +#include <dma-uclass.h> +#include <linux/delay.h> +#include <dt-bindings/dma/k3-udma.h> +#include <linux/soc/ti/k3-navss-ringacc.h> +#include <linux/soc/ti/cppi5.h> +#include <linux/soc/ti/ti-udma.h> +#include <linux/soc/ti/ti_sci_protocol.h> + +#include "k3-udma-hwdef.h" + +#if BITS_PER_LONG == 64 +#define RINGACC_RING_USE_PROXY (0) +#else +#define RINGACC_RING_USE_PROXY (1) +#endif + +struct udma_chan; + +enum udma_mmr { + MMR_GCFG = 0, + MMR_RCHANRT, + MMR_TCHANRT, + MMR_LAST, +}; + +static const char * const mmr_names[] = { + "gcfg", "rchanrt", "tchanrt" +}; + +struct udma_tchan { + void __iomem *reg_rt; + + int id; + struct k3_nav_ring *t_ring; /* Transmit ring */ + struct k3_nav_ring *tc_ring; /* Transmit Completion ring */ +}; + +struct udma_rchan { + void __iomem *reg_rt; + + int id; + struct k3_nav_ring *fd_ring; /* Free Descriptor ring */ + struct k3_nav_ring *r_ring; /* Receive ring*/ +}; + +struct udma_rflow { + int id; +}; + +struct udma_dev { + struct device *dev; + void __iomem *mmrs[MMR_LAST]; + + struct k3_nav_ringacc *ringacc; + + u32 features; + + int tchan_cnt; + int echan_cnt; + int rchan_cnt; + int rflow_cnt; + unsigned long *tchan_map; + unsigned long *rchan_map; + unsigned long *rflow_map; + + struct udma_tchan *tchans; + struct udma_rchan *rchans; + struct udma_rflow *rflows; + + struct udma_chan *channels; + u32 psil_base; + + u32 ch_count; + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_udmap_ops *tisci_udmap_ops; + const struct ti_sci_rm_psil_ops *tisci_psil_ops; + u32 tisci_dev_id; + u32 tisci_navss_dev_id; + bool is_coherent; +}; + +struct udma_chan { + struct udma_dev *ud; + char name[20]; + + struct udma_tchan *tchan; + struct udma_rchan *rchan; + struct udma_rflow *rflow; + + u32 bcnt; /* number of bytes completed since the start of the channel */ + + bool pkt_mode; /* TR or packet */ + bool needs_epib; /* EPIB is needed for the communication or not */ + u32 psd_size; /* size of Protocol Specific Data */ + u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ + int slave_thread_id; + u32 src_thread; + u32 dst_thread; + u32 static_tr_type; + + u32 id; + enum dma_direction dir; + + struct cppi5_host_desc_t *desc_tx; + u32 hdesc_size; + bool in_use; + void *desc_rx; + u32 num_rx_bufs; + u32 desc_rx_cur; + +}; + +#define UDMA_CH_1000(ch) (ch * 0x1000) +#define UDMA_CH_100(ch) (ch * 0x100) +#define UDMA_CH_40(ch) (ch * 0x40) + +#ifdef PKTBUFSRX +#define UDMA_RX_DESC_NUM PKTBUFSRX +#else +#define UDMA_RX_DESC_NUM 4 +#endif + +/* Generic register access functions */ +static inline u32 udma_read(void __iomem *base, int reg) +{ + u32 v; + + v = __raw_readl(base + reg); + pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg); + return v; +} + +static inline void udma_write(void __iomem *base, int reg, u32 val) +{ + pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg); + __raw_writel(val, base + reg); +} + +static inline void udma_update_bits(void __iomem *base, int reg, + u32 mask, u32 val) +{ + u32 tmp, orig; + + orig = udma_read(base, reg); + tmp = orig & ~mask; + tmp |= (val & mask); + + if (tmp != orig) + udma_write(base, reg, tmp); +} + +/* TCHANRT */ +static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg) +{ + if (!tchan) + return 0; + return udma_read(tchan->reg_rt, reg); +} + +static inline void udma_tchanrt_write(struct udma_tchan *tchan, + int reg, u32 val) +{ + if (!tchan) + return; + udma_write(tchan->reg_rt, reg, val); +} + +/* RCHANRT */ +static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg) +{ + if (!rchan) + return 0; + return udma_read(rchan->reg_rt, reg); +} + +static inline void udma_rchanrt_write(struct udma_rchan *rchan, + int reg, u32 val) +{ + if (!rchan) + return; + udma_write(rchan->reg_rt, reg, val); +} + +static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread) +{ + dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET; + return ud->tisci_psil_ops->pair(ud->tisci, + ud->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread) +{ + dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET; + return ud->tisci_psil_ops->unpair(ud->tisci, + ud->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static inline char *udma_get_dir_text(enum dma_direction dir) +{ + switch (dir) { + case DMA_DEV_TO_MEM: + return "DEV_TO_MEM"; + case DMA_MEM_TO_DEV: + return "MEM_TO_DEV"; + case DMA_MEM_TO_MEM: + return "MEM_TO_MEM"; + case DMA_DEV_TO_DEV: + return "DEV_TO_DEV"; + default: + break; + } + + return "invalid"; +} + +static inline bool udma_is_chan_running(struct udma_chan *uc) +{ + u32 trt_ctl = 0; + u32 rrt_ctl = 0; + + switch (uc->dir) { + case DMA_DEV_TO_MEM: + rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n", + __func__, rrt_ctl, + udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG)); + break; + case DMA_MEM_TO_DEV: + trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n", + __func__, trt_ctl, + udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG)); + break; + case DMA_MEM_TO_MEM: + trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + break; + default: + break; + } + + if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) + return true; + + return false; +} + +static int udma_is_coherent(struct udma_chan *uc) +{ + return uc->ud->is_coherent; +} + +static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) +{ + struct k3_nav_ring *ring = NULL; + int ret = -ENOENT; + + switch (uc->dir) { + case DMA_DEV_TO_MEM: + ring = uc->rchan->r_ring; + break; + case DMA_MEM_TO_DEV: + ring = uc->tchan->tc_ring; + break; + case DMA_MEM_TO_MEM: + ring = uc->tchan->tc_ring; + break; + default: + break; + } + + if (ring && k3_nav_ringacc_ring_get_occ(ring)) + ret = k3_nav_ringacc_ring_pop(ring, addr); + + return ret; +} + +static void udma_reset_rings(struct udma_chan *uc) +{ + struct k3_nav_ring *ring1 = NULL; + struct k3_nav_ring *ring2 = NULL; + + switch (uc->dir) { + case DMA_DEV_TO_MEM: + ring1 = uc->rchan->fd_ring; + ring2 = uc->rchan->r_ring; + break; + case DMA_MEM_TO_DEV: + ring1 = uc->tchan->t_ring; + ring2 = uc->tchan->tc_ring; + break; + case DMA_MEM_TO_MEM: + ring1 = uc->tchan->t_ring; + ring2 = uc->tchan->tc_ring; + break; + default: + break; + } + + if (ring1) + k3_nav_ringacc_ring_reset_dma(ring1, 0); + if (ring2) + k3_nav_ringacc_ring_reset(ring2); +} + +static void udma_reset_counters(struct udma_chan *uc) +{ + u32 val; + + if (uc->tchan) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val); + } + + if (uc->rchan) { + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val); + } + + uc->bcnt = 0; +} + +static inline int udma_stop_hard(struct udma_chan *uc) +{ + pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id); + + switch (uc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_start(struct udma_chan *uc) +{ + /* Channel is already running, no need to proceed further */ + if (udma_is_chan_running(uc)) + goto out; + + pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n", + __func__, uc->id, udma_get_dir_text(uc->dir), + uc->static_tr_type); + + /* Make sure that we clear the teardown bit, if it is set */ + udma_stop_hard(uc); + + /* Reset all counters */ + udma_reset_counters(uc); + + switch (uc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + /* Enable remote */ + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n", + __func__, + udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_CTL_REG), + udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG)); + break; + case DMA_MEM_TO_DEV: + /* Enable remote */ + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n", + __func__, + udma_rchanrt_read(uc->rchan, + UDMA_TCHAN_RT_CTL_REG), + udma_rchanrt_read(uc->rchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG)); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + default: + return -EINVAL; + } + + pr_debug("%s: DONE chan:%d\n", __func__, uc->id); +out: + return 0; +} + +static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync) +{ + int i = 0; + u32 val; + + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + udelay(1); + if (i > 1000) { + printf(" %s TIMEOUT !\n", __func__); + break; + } + i++; + } + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG); + if (val & UDMA_PEER_RT_EN_ENABLE) + printf("%s: peer not stopped TIMEOUT !\n", __func__); +} + +static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync) +{ + int i = 0; + u32 val; + + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_TEARDOWN); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + udelay(1); + if (i > 1000) { + printf("%s TIMEOUT !\n", __func__); + break; + } + i++; + } + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG); + if (val & UDMA_PEER_RT_EN_ENABLE) + printf("%s: peer not stopped TIMEOUT !\n", __func__); +} + +static inline int udma_stop(struct udma_chan *uc) +{ + pr_debug("%s: chan:%d dir:%s\n", + __func__, uc->id, udma_get_dir_text(uc->dir)); + + udma_reset_counters(uc); + switch (uc->dir) { + case DMA_DEV_TO_MEM: + udma_stop_dev2mem(uc, true); + break; + case DMA_MEM_TO_DEV: + udma_stop_mem2dev(uc, true); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr) +{ + int i = 1; + + while (udma_pop_from_ring(uc, paddr)) { + udelay(1); + if (!(i % 1000000)) + printf("."); + i++; + } +} + +#define UDMA_RESERVE_RESOURCE(res) \ +static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ + int id) \ +{ \ + if (id >= 0) { \ + if (test_bit(id, ud->res##_map)) { \ + dev_err(ud->dev, "res##%d is in use\n", id); \ + return ERR_PTR(-ENOENT); \ + } \ + } else { \ + id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \ + if (id == ud->res##_cnt) { \ + return ERR_PTR(-ENOENT); \ + } \ + } \ + \ + __set_bit(id, ud->res##_map); \ + return &ud->res##s[id]; \ +} + +UDMA_RESERVE_RESOURCE(tchan); +UDMA_RESERVE_RESOURCE(rchan); +UDMA_RESERVE_RESOURCE(rflow); + +static int udma_get_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + uc->tchan = __udma_reserve_tchan(ud, -1); + if (IS_ERR(uc->tchan)) + return PTR_ERR(uc->tchan); + + pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id); + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: tchan%d is running!\n", uc->id, + uc->tchan->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + } + + return 0; +} + +static int udma_get_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return 0; + } + + uc->rchan = __udma_reserve_rchan(ud, -1); + if (IS_ERR(uc->rchan)) + return PTR_ERR(uc->rchan); + + pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id); + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: rchan%d is running!\n", uc->id, + uc->rchan->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + } + + return 0; +} + +static int udma_get_chan_pair(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int chan_id, end; + + if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { + dev_info(ud->dev, "chan%d: already have %d pair allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + if (uc->tchan) { + dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return -EBUSY; + } else if (uc->rchan) { + dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return -EBUSY; + } + + /* Can be optimized, but let's have it like this for now */ + end = min(ud->tchan_cnt, ud->rchan_cnt); + for (chan_id = 0; chan_id < end; chan_id++) { + if (!test_bit(chan_id, ud->tchan_map) && + !test_bit(chan_id, ud->rchan_map)) + break; + } + + if (chan_id == end) + return -ENOENT; + + __set_bit(chan_id, ud->tchan_map); + __set_bit(chan_id, ud->rchan_map); + uc->tchan = &ud->tchans[chan_id]; + uc->rchan = &ud->rchans[chan_id]; + + pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id); + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: t/rchan%d pair is running!\n", + uc->id, chan_id); + udma_stop(uc); + if (udma_is_chan_running(uc)) + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + } + + return 0; +} + +static int udma_get_rflow(struct udma_chan *uc, int flow_id) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", + uc->id, uc->rflow->id); + return 0; + } + + if (!uc->rchan) + dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id); + + uc->rflow = __udma_reserve_rflow(ud, flow_id); + if (IS_ERR(uc->rflow)) + return PTR_ERR(uc->rflow); + + pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id); + return 0; +} + +static void udma_put_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, + uc->rchan->id); + __clear_bit(uc->rchan->id, ud->rchan_map); + uc->rchan = NULL; + } +} + +static void udma_put_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, + uc->tchan->id); + __clear_bit(uc->tchan->id, ud->tchan_map); + uc->tchan = NULL; + } +} + +static void udma_put_rflow(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, + uc->rflow->id); + __clear_bit(uc->rflow->id, ud->rflow_map); + uc->rflow = NULL; + } +} + +static void udma_free_tx_resources(struct udma_chan *uc) +{ + if (!uc->tchan) + return; + + k3_nav_ringacc_ring_free(uc->tchan->t_ring); + k3_nav_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->t_ring = NULL; + uc->tchan->tc_ring = NULL; + + udma_put_tchan(uc); +} + +static int udma_alloc_tx_resources(struct udma_chan *uc) +{ + struct k3_nav_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + int ret; + + ret = udma_get_tchan(uc); + if (ret) + return ret; + + uc->tchan->t_ring = k3_nav_ringacc_request_ring( + ud->ringacc, uc->tchan->id, + RINGACC_RING_USE_PROXY); + if (!uc->tchan->t_ring) { + ret = -EBUSY; + goto err_tx_ring; + } + + uc->tchan->tc_ring = k3_nav_ringacc_request_ring( + ud->ringacc, -1, RINGACC_RING_USE_PROXY); + if (!uc->tchan->tc_ring) { + ret = -EBUSY; + goto err_txc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = 16; + ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE; + + ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg); + ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_nav_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->tc_ring = NULL; +err_txc_ring: + k3_nav_ringacc_ring_free(uc->tchan->t_ring); + uc->tchan->t_ring = NULL; +err_tx_ring: + udma_put_tchan(uc); + + return ret; +} + +static void udma_free_rx_resources(struct udma_chan *uc) +{ + if (!uc->rchan) + return; + + k3_nav_ringacc_ring_free(uc->rchan->fd_ring); + k3_nav_ringacc_ring_free(uc->rchan->r_ring); + uc->rchan->fd_ring = NULL; + uc->rchan->r_ring = NULL; + + udma_put_rflow(uc); + udma_put_rchan(uc); +} + +static int udma_alloc_rx_resources(struct udma_chan *uc) +{ + struct k3_nav_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + int fd_ring_id; + int ret; + + ret = udma_get_rchan(uc); + if (ret) + return ret; + + /* For MEM_TO_MEM we don't need rflow or rings */ + if (uc->dir == DMA_MEM_TO_MEM) + return 0; + + ret = udma_get_rflow(uc, uc->rchan->id); + if (ret) { + ret = -EBUSY; + goto err_rflow; + } + + fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; + + uc->rchan->fd_ring = k3_nav_ringacc_request_ring( + ud->ringacc, fd_ring_id, + RINGACC_RING_USE_PROXY); + if (!uc->rchan->fd_ring) { + ret = -EBUSY; + goto err_rx_ring; + } + + uc->rchan->r_ring = k3_nav_ringacc_request_ring( + ud->ringacc, -1, RINGACC_RING_USE_PROXY); + if (!uc->rchan->r_ring) { + ret = -EBUSY; + goto err_rxc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = 16; + ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE; + + ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg); + ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_nav_ringacc_ring_free(uc->rchan->r_ring); + uc->rchan->r_ring = NULL; +err_rxc_ring: + k3_nav_ringacc_ring_free(uc->rchan->fd_ring); + uc->rchan->fd_ring = NULL; +err_rx_ring: + udma_put_rflow(uc); +err_rflow: + udma_put_rchan(uc); + + return ret; +} + +static int udma_alloc_tchan_sci_req(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req; + u32 mode; + int ret; + + if (uc->pkt_mode) + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + else + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; + req.nav_id = ud->tisci_dev_id; + req.index = uc->tchan->id; + req.tx_chan_type = mode; + if (uc->dir == DMA_MEM_TO_MEM) + req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + else + req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib, + uc->psd_size, + 0) >> 2; + req.txcq_qnum = tc_ring; + + ret = ud->tisci_udmap_ops->tx_ch_cfg(ud->tisci, &req); + if (ret) + dev_err(ud->dev, "tisci tx alloc failed %d\n", ret); + + return ret; +} + +static int udma_alloc_rchan_sci_req(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring); + int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring); + int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 }; + struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; + u32 mode; + int ret; + + if (uc->pkt_mode) + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + else + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID; + req.nav_id = ud->tisci_dev_id; + req.index = uc->rchan->id; + req.rx_chan_type = mode; + if (uc->dir == DMA_MEM_TO_MEM) { + req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req.rxcq_qnum = tc_ring; + } else { + req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib, + uc->psd_size, + 0) >> 2; + req.rxcq_qnum = rx_ring; + } + if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) { + req.flowid_start = uc->rflow->id; + req.flowid_cnt = 1; + req.valid_params |= + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; + } + + ret = ud->tisci_udmap_ops->rx_ch_cfg(ud->tisci, &req); + if (ret) { + dev_err(ud->dev, "tisci rx %u cfg failed %d\n", + uc->rchan->id, ret); + return ret; + } + if (uc->dir == DMA_MEM_TO_MEM) + return ret; + + flow_req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID; + + flow_req.nav_id = ud->tisci_dev_id; + flow_req.flow_index = uc->rflow->id; + + if (uc->needs_epib) + flow_req.rx_einfo_present = 1; + else + flow_req.rx_einfo_present = 0; + + if (uc->psd_size) + flow_req.rx_psinfo_present = 1; + else + flow_req.rx_psinfo_present = 0; + + flow_req.rx_error_handling = 0; + flow_req.rx_desc_type = 0; + flow_req.rx_dest_qnum = rx_ring; + flow_req.rx_src_tag_hi_sel = 2; + flow_req.rx_src_tag_lo_sel = 4; + flow_req.rx_dest_tag_hi_sel = 5; + flow_req.rx_dest_tag_lo_sel = 4; + flow_req.rx_fdq0_sz0_qnum = fd_ring; + flow_req.rx_fdq1_qnum = fd_ring; + flow_req.rx_fdq2_qnum = fd_ring; + flow_req.rx_fdq3_qnum = fd_ring; + flow_req.rx_ps_location = 0; + + ret = ud->tisci_udmap_ops->rx_flow_cfg(ud->tisci, &flow_req); + if (ret) + dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n", + uc->rchan->id, uc->rflow->id, ret); + + return ret; +} + +static int udma_alloc_chan_resources(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int ret; + + pr_debug("%s: chan:%d as %s\n", + __func__, uc->id, udma_get_dir_text(uc->dir)); + + switch (uc->dir) { + case DMA_MEM_TO_MEM: + /* Non synchronized - mem to mem type of transfer */ + ret = udma_get_chan_pair(uc); + if (ret) + return ret; + + ret = udma_alloc_tx_resources(uc); + if (ret) + goto err_free_res; + + ret = udma_alloc_rx_resources(uc); + if (ret) + goto err_free_res; + + uc->src_thread = ud->psil_base + uc->tchan->id; + uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000; + break; + case DMA_MEM_TO_DEV: + /* Slave transfer synchronized - mem to dev (TX) trasnfer */ + ret = udma_alloc_tx_resources(uc); + if (ret) + goto err_free_res; + + uc->src_thread = ud->psil_base + uc->tchan->id; + uc->dst_thread = uc->slave_thread_id; + if (!(uc->dst_thread & 0x8000)) + uc->dst_thread |= 0x8000; + + break; + case DMA_DEV_TO_MEM: + /* Slave transfer synchronized - dev to mem (RX) trasnfer */ + ret = udma_alloc_rx_resources(uc); + if (ret) + goto err_free_res; + + uc->src_thread = uc->slave_thread_id; + uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000; + + break; + default: + /* Can not happen */ + pr_debug("%s: chan:%d invalid direction (%u)\n", + __func__, uc->id, uc->dir); + return -EINVAL; + } + + /* We have channel indexes and rings */ + if (uc->dir == DMA_MEM_TO_MEM) { + ret = udma_alloc_tchan_sci_req(uc); + if (ret) + goto err_free_res; + + ret = udma_alloc_rchan_sci_req(uc); + if (ret) + goto err_free_res; + } else { + /* Slave transfer */ + if (uc->dir == DMA_MEM_TO_DEV) { + ret = udma_alloc_tchan_sci_req(uc); + if (ret) + goto err_free_res; + } else { + ret = udma_alloc_rchan_sci_req(uc); + if (ret) + goto err_free_res; + } + } + + /* PSI-L pairing */ + ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread); + if (ret) { + dev_err(ud->dev, "k3_nav_psil_request_link fail\n"); + goto err_free_res; + } + + return 0; + +err_free_res: + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + uc->slave_thread_id = -1; + return ret; +} + +static void udma_free_chan_resources(struct udma_chan *uc) +{ + /* Some configuration to UDMA-P channel: disable, reset, whatever */ + + /* Release PSI-L pairing */ + udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread); + + /* Reset the rings for a new start */ + udma_reset_rings(uc); + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + uc->slave_thread_id = -1; + uc->dir = DMA_MEM_TO_MEM; +} + +static int udma_get_mmrs(struct udevice *dev) +{ + struct udma_dev *ud = dev_get_priv(dev); + int i; + + for (i = 0; i < MMR_LAST; i++) { + ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev, + mmr_names[i]); + if (!ud->mmrs[i]) + return -EINVAL; + } + + return 0; +} + +#define UDMA_MAX_CHANNELS 192 + +static int udma_probe(struct udevice *dev) +{ + struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); + struct udma_dev *ud = dev_get_priv(dev); + int i, ret; + u32 cap2, cap3; + struct udevice *tmp; + struct udevice *tisci_dev = NULL; + + ret = udma_get_mmrs(dev); + if (ret) + return ret; + + ret = uclass_get_device_by_phandle(UCLASS_MISC, dev, + "ti,ringacc", &tmp); + ud->ringacc = dev_get_priv(tmp); + if (IS_ERR(ud->ringacc)) + return PTR_ERR(ud->ringacc); + + ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0); + if (!ud->psil_base) { + dev_info(dev, + "Missing ti,psil-base property, using %d.\n", ret); + return -EINVAL; + } + + ret = uclass_get_device_by_name(UCLASS_FIRMWARE, "dmsc", &tisci_dev); + if (ret) { + debug("TISCI RA RM get failed (%d)\n", ret); + ud->tisci = NULL; + return 0; + } + ud->tisci = (struct ti_sci_handle *) + (ti_sci_get_handle_from_sysfw(tisci_dev)); + + ret = dev_read_u32_default(dev, "ti,sci", 0); + if (!ret) { + dev_err(dev, "TISCI RA RM disabled\n"); + ud->tisci = NULL; + } + + if (ud->tisci) { + ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev)); + + ud->tisci_dev_id = -1; + ret = dev_read_u32(dev, "ti,sci-dev-id", &ud->tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); + return ret; + } + + ud->tisci_navss_dev_id = -1; + ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id", + &ud->tisci_navss_dev_id); + if (ret) { + dev_err(dev, "navss sci-dev-id read failure %d\n", ret); + return ret; + } + + ud->tisci_udmap_ops = &ud->tisci->ops.rm_udmap_ops; + ud->tisci_psil_ops = &ud->tisci->ops.rm_psil_ops; + } + + ud->is_coherent = dev_read_bool(dev, "dma-coherent"); + + cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); + cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); + + ud->rflow_cnt = cap3 & 0x3fff; + ud->tchan_cnt = cap2 & 0x1ff; + ud->echan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + ud->ch_count = ud->tchan_cnt + ud->rchan_cnt; + + dev_info(dev, + "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n", + ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt, + ud->tisci_dev_id); + dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt); + + ud->channels = devm_kcalloc(dev, ud->ch_count, sizeof(*ud->channels), + GFP_KERNEL); + ud->tchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, + sizeof(*ud->tchans), GFP_KERNEL); + ud->rchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, + sizeof(*ud->rchans), GFP_KERNEL); + ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, + sizeof(*ud->rflows), GFP_KERNEL); + + if (!ud->channels || !ud->tchan_map || !ud->rchan_map || + !ud->rflow_map || !ud->tchans || !ud->rchans || !ud->rflows) + return -ENOMEM; + + for (i = 0; i < ud->tchan_cnt; i++) { + struct udma_tchan *tchan = &ud->tchans[i]; + + tchan->id = i; + tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i); + } + + for (i = 0; i < ud->rchan_cnt; i++) { + struct udma_rchan *rchan = &ud->rchans[i]; + + rchan->id = i; + rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i); + } + + for (i = 0; i < ud->rflow_cnt; i++) { + struct udma_rflow *rflow = &ud->rflows[i]; + + rflow->id = i; + } + + for (i = 0; i < ud->ch_count; i++) { + struct udma_chan *uc = &ud->channels[i]; + + uc->ud = ud; + uc->id = i; + uc->slave_thread_id = -1; + uc->tchan = NULL; + uc->rchan = NULL; + uc->dir = DMA_MEM_TO_MEM; + sprintf(uc->name, "UDMA chan%d\n", i); + if (!i) + uc->in_use = true; + } + + pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + udma_read(ud->mmrs[MMR_GCFG], 0), + udma_read(ud->mmrs[MMR_GCFG], 0x20), + udma_read(ud->mmrs[MMR_GCFG], 0x24), + udma_read(ud->mmrs[MMR_GCFG], 0x28), + udma_read(ud->mmrs[MMR_GCFG], 0x2c)); + + uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV; + + return ret; +} + +static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest, + dma_addr_t src, size_t len) +{ + u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + struct cppi5_tr_type15_t *tr_req; + int num_tr; + size_t tr_size = sizeof(struct cppi5_tr_type15_t); + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + unsigned long dummy; + void *tr_desc; + size_t desc_size; + + if (len < SZ_64K) { + num_tr = 1; + tr0_cnt0 = len; + tr0_cnt1 = 1; + } else { + unsigned long align_to = __ffs(src | dest); + + if (align_to > 3) + align_to = 3; + /* + * Keep simple: tr0: SZ_64K-alignment blocks, + * tr1: the remaining + */ + num_tr = 2; + tr0_cnt0 = (SZ_64K - BIT(align_to)); + if (len / tr0_cnt0 >= SZ_64K) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; + } + + tr0_cnt1 = len / tr0_cnt0; + tr1_cnt0 = len % tr0_cnt0; + } + + desc_size = cppi5_trdesc_calc_size(num_tr, tr_size); + tr_desc = dma_alloc_coherent(desc_size, &dummy); + if (!tr_desc) + return NULL; + memset(tr_desc, 0, desc_size); + + cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0); + cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff); + cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id); + + tr_req = tr_desc + tr_size; + + cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 1); + cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[0].addr = src; + tr_req[0].icnt0 = tr0_cnt0; + tr_req[0].icnt1 = tr0_cnt1; + tr_req[0].icnt2 = 1; + tr_req[0].icnt3 = 1; + tr_req[0].dim1 = tr0_cnt0; + + tr_req[0].daddr = dest; + tr_req[0].dicnt0 = tr0_cnt0; + tr_req[0].dicnt1 = tr0_cnt1; + tr_req[0].dicnt2 = 1; + tr_req[0].dicnt3 = 1; + tr_req[0].ddim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; + tr_req[1].icnt0 = tr1_cnt0; + tr_req[1].icnt1 = 1; + tr_req[1].icnt2 = 1; + tr_req[1].icnt3 = 1; + + tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; + tr_req[1].dicnt0 = tr1_cnt0; + tr_req[1].dicnt1 = 1; + tr_req[1].dicnt2 = 1; + tr_req[1].dicnt3 = 1; + } + + cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP); + + if (!udma_is_coherent(uc)) { + flush_dcache_range((u64)tr_desc, + ALIGN((u64)tr_desc + desc_size, + ARCH_DMA_MINALIGN)); + } + + k3_nav_ringacc_ring_push(uc->tchan->t_ring, &tr_desc); + + return 0; +} + +static int udma_transfer(struct udevice *dev, int direction, + void *dst, void *src, size_t len) +{ + struct udma_dev *ud = dev_get_priv(dev); + /* Channel0 is reserved for memcpy */ + struct udma_chan *uc = &ud->channels[0]; + dma_addr_t paddr = 0; + int ret; + + ret = udma_alloc_chan_resources(uc); + if (ret) + return ret; + + udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len); + udma_start(uc); + udma_poll_completion(uc, &paddr); + udma_stop(uc); + + udma_free_chan_resources(uc); + return 0; +} + +static int udma_request(struct dma *dma) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc; + unsigned long dummy; + int ret; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + + uc = &ud->channels[dma->id]; + ret = udma_alloc_chan_resources(uc); + if (ret) { + dev_err(dma->dev, "alloc dma res failed %d\n", ret); + return -EINVAL; + } + + uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib, + uc->psd_size, 0); + uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN); + + if (uc->dir == DMA_MEM_TO_DEV) { + uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy); + memset(uc->desc_tx, 0, uc->hdesc_size); + } else { + uc->desc_rx = dma_alloc_coherent( + uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy); + memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM); + } + + uc->in_use = true; + uc->desc_rx_cur = 0; + uc->num_rx_bufs = 0; + + return 0; +} + +static int udma_free(struct dma *dma) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (udma_is_chan_running(uc)) + udma_stop(uc); + udma_free_chan_resources(uc); + + uc->in_use = false; + + return 0; +} + +static int udma_enable(struct dma *dma) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc; + int ret; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + ret = udma_start(uc); + + return ret; +} + +static int udma_disable(struct dma *dma) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc; + int ret = 0; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (udma_is_chan_running(uc)) + ret = udma_stop(uc); + else + dev_err(dma->dev, "%s not running\n", __func__); + + return ret; +} + +static int udma_send(struct dma *dma, void *src, size_t len, void *metadata) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct cppi5_host_desc_t *desc_tx; + dma_addr_t dma_src = (dma_addr_t)src; + struct ti_udma_drv_packet_data packet_data = { 0 }; + dma_addr_t paddr; + struct udma_chan *uc; + u32 tc_ring_id; + int ret; + + if (!metadata) + packet_data = *((struct ti_udma_drv_packet_data *)metadata); + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (uc->dir != DMA_MEM_TO_DEV) + return -EINVAL; + + tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + + desc_tx = uc->desc_tx; + + cppi5_hdesc_reset_hbdesc(desc_tx); + + cppi5_hdesc_init(desc_tx, + uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0, + uc->psd_size); + cppi5_hdesc_set_pktlen(desc_tx, len); + cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len); + cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff); + cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id); + /* pass below information from caller */ + cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type); + cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag); + + if (!udma_is_coherent(uc)) { + flush_dcache_range((u64)dma_src, + ALIGN((u64)dma_src + len, + ARCH_DMA_MINALIGN)); + flush_dcache_range((u64)desc_tx, + ALIGN((u64)desc_tx + uc->hdesc_size, + ARCH_DMA_MINALIGN)); + } + + ret = k3_nav_ringacc_ring_push(uc->tchan->t_ring, &uc->desc_tx); + if (ret) { + dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n", + dma->id, ret); + return ret; + } + + udma_poll_completion(uc, &paddr); + + return 0; +} + +static int udma_receive(struct dma *dma, void **dst, void *metadata) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct cppi5_host_desc_t *desc_rx; + dma_addr_t buf_dma; + struct udma_chan *uc; + u32 buf_dma_len, pkt_len; + u32 port_id = 0; + int ret; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (uc->dir != DMA_DEV_TO_MEM) + return -EINVAL; + if (!uc->num_rx_bufs) + return -EINVAL; + + ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx); + if (ret && ret != -ENODATA) { + dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret); + return ret; + } else if (ret == -ENODATA) { + return 0; + } + + /* invalidate cache data */ + if (!udma_is_coherent(uc)) { + invalidate_dcache_range((ulong)desc_rx, + (ulong)(desc_rx + uc->hdesc_size)); + } + + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); + + /* invalidate cache data */ + if (!udma_is_coherent(uc)) { + invalidate_dcache_range((ulong)buf_dma, + (ulong)(buf_dma + buf_dma_len)); + } + + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); + + *dst = (void *)buf_dma; + uc->num_rx_bufs--; + + return pkt_len; +} + +static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc = &ud->channels[0]; + ofnode chconf_node, slave_node; + char prop[50]; + u32 val; + + for (val = 0; val < ud->ch_count; val++) { + uc = &ud->channels[val]; + if (!uc->in_use) + break; + } + + if (val == ud->ch_count) + return -EBUSY; + + uc->dir = DMA_DEV_TO_MEM; + if (args->args[2] == UDMA_DIR_TX) + uc->dir = DMA_MEM_TO_DEV; + + slave_node = ofnode_get_by_phandle(args->args[0]); + if (!ofnode_valid(slave_node)) { + dev_err(ud->dev, "slave node is missing\n"); + return -EINVAL; + } + + snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]); + chconf_node = ofnode_find_subnode(slave_node, prop); + if (!ofnode_valid(chconf_node)) { + dev_err(ud->dev, "Channel configuration node is missing\n"); + return -EINVAL; + } + + if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) { + if (val == UDMA_PKT_MODE) + uc->pkt_mode = true; + } + + if (!ofnode_read_u32(chconf_node, "statictr-type", &val)) + uc->static_tr_type = val; + + uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib"); + if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val)) + uc->psd_size = val; + uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size; + + if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) { + dev_err(ud->dev, "ti,psil-base is missing\n"); + return -EINVAL; + } + + uc->slave_thread_id = val + args->args[1]; + + dma->id = uc->id; + pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n", + dma->id, uc->needs_epib, + uc->psd_size, uc->metadata_size, + uc->slave_thread_id); + + return 0; +} + +int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct cppi5_host_desc_t *desc_rx; + dma_addr_t dma_dst; + struct udma_chan *uc; + u32 desc_num; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (uc->dir != DMA_DEV_TO_MEM) + return -EINVAL; + + if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM) + return -EINVAL; + + desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM; + desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size); + dma_dst = (dma_addr_t)dst; + + cppi5_hdesc_reset_hbdesc(desc_rx); + + cppi5_hdesc_init(desc_rx, + uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0, + uc->psd_size); + cppi5_hdesc_set_pktlen(desc_rx, size); + cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size); + + if (!udma_is_coherent(uc)) { + flush_dcache_range((u64)desc_rx, + ALIGN((u64)desc_rx + uc->hdesc_size, + ARCH_DMA_MINALIGN)); + } + + k3_nav_ringacc_ring_push(uc->rchan->fd_ring, &desc_rx); + + uc->num_rx_bufs++; + uc->desc_rx_cur++; + + return 0; +} + +static const struct dma_ops udma_ops = { + .transfer = udma_transfer, + .of_xlate = udma_of_xlate, + .request = udma_request, + .free = udma_free, + .enable = udma_enable, + .disable = udma_disable, + .send = udma_send, + .receive = udma_receive, + .prepare_rcv_buf = udma_prepare_rcv_buf, +}; + +static const struct udevice_id udma_ids[] = { + { .compatible = "ti,k3-navss-udmap" }, + { } +}; + +U_BOOT_DRIVER(ti_edma3) = { + .name = "ti-udma", + .id = UCLASS_DMA, + .of_match = udma_ids, + .ops = &udma_ops, + .probe = udma_probe, + .priv_auto_alloc_size = sizeof(struct udma_dev), +}; diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 91481260411..d47d22fff3e 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -12,6 +12,7 @@ #include <errno.h> #include <mailbox.h> #include <dm/device.h> +#include <linux/compat.h> #include <linux/err.h> #include <linux/soc/ti/k3-sec-proxy.h> #include <linux/soc/ti/ti_sci_protocol.h> @@ -32,15 +33,36 @@ struct ti_sci_xfer { }; /** + * struct ti_sci_rm_type_map - Structure representing TISCI Resource + * management representation of dev_ids. + * @dev_id: TISCI device ID + * @type: Corresponding id as identified by TISCI RM. + * + * Note: This is used only as a work around for using RM range apis + * for AM654 SoC. For future SoCs dev_id will be used as type + * for RM range APIs. In order to maintain ABI backward compatibility + * type is not being changed for AM654 SoC. + */ +struct ti_sci_rm_type_map { + u32 dev_id; + u16 type; +}; + +/** * struct ti_sci_desc - Description of SoC integration - * @host_id: Host identifier representing the compute entity - * @max_rx_timeout_us: Timeout for communication with SoC (in Microseconds) - * @max_msg_size: Maximum size of data per message that can be handled. + * @default_host_id: Host identifier representing the compute entity + * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) + * @max_msgs: Maximum number of messages that can be pending + * simultaneously in the system + * @max_msg_size: Maximum size of data per message that can be handled. + * @rm_type_map: RM resource type mapping structure. */ struct ti_sci_desc { - u8 host_id; - int max_rx_timeout_us; + u8 default_host_id; + int max_rx_timeout_ms; + int max_msgs; int max_msg_size; + struct ti_sci_rm_type_map *rm_type_map; }; /** @@ -136,7 +158,7 @@ static inline int ti_sci_get_response(struct ti_sci_info *info, int ret; /* Receive the response */ - ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_us); + ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms); if (ret) { dev_err(info->dev, "%s: Message receive failed. ret = %d\n", __func__, ret); @@ -1441,6 +1463,199 @@ static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) return ret; } +static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id, + u16 *type) +{ + struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map; + bool found = false; + int i; + + /* If map is not provided then assume dev_id is used as type */ + if (!rm_type_map) { + *type = dev_id; + return 0; + } + + for (i = 0; rm_type_map[i].dev_id; i++) { + if (rm_type_map[i].dev_id == dev_id) { + *type = rm_type_map[i].type; + found = true; + break; + } + } + + if (!found) + return -EINVAL; + + return 0; +} + +/** + * ti_sci_get_resource_range - Helper to get a range of resources assigned + * to a host. Resource is uniquely identified by + * type and subtype. + * @handle: Pointer to TISCI handle. + * @dev_id: TISCI device ID. + * @subtype: Resource assignment subtype that is being requested + * from the given device. + * @s_host: Host processor ID to which the resources are allocated + * @range_start: Start index of the resource range + * @range_num: Number of resources in the range + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, + u32 dev_id, u8 subtype, u8 s_host, + u16 *range_start, u16 *range_num) +{ + struct ti_sci_msg_resp_get_resource_range *resp; + struct ti_sci_msg_req_get_resource_range req; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info; + u16 type; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + ret = ti_sci_get_resource_type(info, dev_id, &type); + if (ret) { + dev_err(dev, "rm type lookup failed for %u\n", dev_id); + goto fail; + } + + req.secondary_host = s_host; + req.type = type & MSG_RM_RESOURCE_TYPE_MASK; + req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf; + if (!ti_sci_is_response_ack(resp)) { + ret = -ENODEV; + } else if (!resp->range_start && !resp->range_num) { + ret = -ENODEV; + } else { + *range_start = resp->range_start; + *range_num = resp->range_num; + }; + +fail: + return ret; +} + +/** + * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host + * that is same as ti sci interface host. + * @handle: Pointer to TISCI handle. + * @dev_id: TISCI device ID. + * @subtype: Resource assignment subtype that is being requested + * from the given device. + * @range_start: Start index of the resource range + * @range_num: Number of resources in the range + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle, + u32 dev_id, u8 subtype, + u16 *range_start, u16 *range_num) +{ + return ti_sci_get_resource_range(handle, dev_id, subtype, + TI_SCI_IRQ_SECONDARY_HOST_INVALID, + range_start, range_num); +} + +/** + * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources + * assigned to a specified host. + * @handle: Pointer to TISCI handle. + * @dev_id: TISCI device ID. + * @subtype: Resource assignment subtype that is being requested + * from the given device. + * @s_host: Host processor ID to which the resources are allocated + * @range_start: Start index of the resource range + * @range_num: Number of resources in the range + * + * Return: 0 if all went fine, else return appropriate error. + */ +static +int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle, + u32 dev_id, u8 subtype, u8 s_host, + u16 *range_start, u16 *range_num) +{ + return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, + range_start, range_num); +} + +/** + * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory + * @handle: pointer to TI SCI handle + * @msms_start: MSMC start as returned by tisci + * @msmc_end: MSMC end as returned by tisci + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle, + u64 *msmc_start, u64 *msmc_end) +{ + struct ti_sci_msg_resp_query_msmc *resp; + struct ti_sci_msg_hdr req; + struct ti_sci_info *info; + struct ti_sci_xfer *xfer; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + return ret; + } + + resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf; + + if (!ti_sci_is_response_ack(resp)) + return -ENODEV; + + *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) | + resp->msmc_start_low; + *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) | + resp->msmc_end_low; + + return ret; +} + /** * ti_sci_cmd_proc_request() - Command to request a physical processor control * @handle: Pointer to TI SCI handle @@ -1803,6 +2018,416 @@ static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle, return ret; } +/** + * ti_sci_cmd_ring_config() - configure RA ring + * @handle: pointer to TI SCI handle + * @valid_params: Bitfield defining validity of ring configuration parameters. + * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated + * @index: Ring index. + * @addr_lo: The ring base address lo 32 bits + * @addr_hi: The ring base address hi 32 bits + * @count: Number of ring elements. + * @mode: The mode of the ring + * @size: The ring element size. + * @order_id: Specifies the ring's bus order ID. + * + * Return: 0 if all went well, else returns appropriate error value. + * + * See @ti_sci_msg_rm_ring_cfg_req for more info. + */ +static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle, + u32 valid_params, u16 nav_id, u16 index, + u32 addr_lo, u32 addr_hi, u32 count, + u8 mode, u8 size, u8 order_id) +{ + struct ti_sci_msg_rm_ring_cfg_resp *resp; + struct ti_sci_msg_rm_ring_cfg_req req; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret); + return ret; + } + req.valid_params = valid_params; + req.nav_id = nav_id; + req.index = index; + req.addr_lo = addr_lo; + req.addr_hi = addr_hi; + req.count = count; + req.mode = mode; + req.size = size; + req.order_id = order_id; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf; + + ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; + +fail: + dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret); + return ret; +} + +/** + * ti_sci_cmd_ring_get_config() - get RA ring configuration + * @handle: pointer to TI SCI handle + * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated + * @index: Ring index. + * @addr_lo: returns ring's base address lo 32 bits + * @addr_hi: returns ring's base address hi 32 bits + * @count: returns number of ring elements. + * @mode: returns mode of the ring + * @size: returns ring element size. + * @order_id: returns ring's bus order ID. + * + * Return: 0 if all went well, else returns appropriate error value. + * + * See @ti_sci_msg_rm_ring_get_cfg_req for more info. + */ +static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle, + u32 nav_id, u32 index, u8 *mode, + u32 *addr_lo, u32 *addr_hi, + u32 *count, u8 *size, u8 *order_id) +{ + struct ti_sci_msg_rm_ring_get_cfg_resp *resp; + struct ti_sci_msg_rm_ring_get_cfg_req req; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, + "RM_RA:Message get config failed(%d)\n", ret); + return ret; + } + req.nav_id = nav_id; + req.index = index; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(info->dev, "RM_RA:Mbox get config send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->tx_message.buf; + + if (!ti_sci_is_response_ack(resp)) { + ret = -ENODEV; + } else { + if (mode) + *mode = resp->mode; + if (addr_lo) + *addr_lo = resp->addr_lo; + if (addr_hi) + *addr_hi = resp->addr_hi; + if (count) + *count = resp->count; + if (size) + *size = resp->size; + if (order_id) + *order_id = resp->order_id; + }; + +fail: + dev_dbg(info->dev, "RM_RA:get config ring %u ret:%d\n", index, ret); + return ret; +} + +static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle, + u32 nav_id, u32 src_thread, u32 dst_thread) +{ + struct ti_sci_msg_hdr *resp; + struct ti_sci_msg_psil_pair req; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret); + return ret; + } + req.nav_id = nav_id; + req.src_thread = src_thread; + req.dst_thread = dst_thread; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; + ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; + +fail: + dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n", + nav_id, src_thread, dst_thread, ret); + return ret; +} + +static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle, + u32 nav_id, u32 src_thread, u32 dst_thread) +{ + struct ti_sci_msg_hdr *resp; + struct ti_sci_msg_psil_unpair req; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret); + return ret; + } + req.nav_id = nav_id; + req.src_thread = src_thread; + req.dst_thread = dst_thread; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; + ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; + +fail: + dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n", + src_thread, dst_thread, ret); + return ret; +} + +static int ti_sci_cmd_rm_udmap_tx_ch_cfg( + const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params) +{ + struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp; + struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret); + return ret; + } + req.valid_params = params->valid_params; + req.nav_id = params->nav_id; + req.index = params->index; + req.tx_pause_on_err = params->tx_pause_on_err; + req.tx_filt_einfo = params->tx_filt_einfo; + req.tx_filt_pswords = params->tx_filt_pswords; + req.tx_atype = params->tx_atype; + req.tx_chan_type = params->tx_chan_type; + req.tx_supr_tdpkt = params->tx_supr_tdpkt; + req.tx_fetch_size = params->tx_fetch_size; + req.tx_credit_count = params->tx_credit_count; + req.txcq_qnum = params->txcq_qnum; + req.tx_priority = params->tx_priority; + req.tx_qos = params->tx_qos; + req.tx_orderid = params->tx_orderid; + req.fdepth = params->fdepth; + req.tx_sched_priority = params->tx_sched_priority; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret); + goto fail; + } + + resp = + (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf; + ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; + +fail: + dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret); + return ret; +} + +static int ti_sci_cmd_rm_udmap_rx_ch_cfg( + const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params) +{ + struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp; + struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret); + return ret; + } + + req.valid_params = params->valid_params; + req.nav_id = params->nav_id; + req.index = params->index; + req.rx_fetch_size = params->rx_fetch_size; + req.rxcq_qnum = params->rxcq_qnum; + req.rx_priority = params->rx_priority; + req.rx_qos = params->rx_qos; + req.rx_orderid = params->rx_orderid; + req.rx_sched_priority = params->rx_sched_priority; + req.flowid_start = params->flowid_start; + req.flowid_cnt = params->flowid_cnt; + req.rx_pause_on_err = params->rx_pause_on_err; + req.rx_atype = params->rx_atype; + req.rx_chan_type = params->rx_chan_type; + req.rx_ignore_short = params->rx_ignore_short; + req.rx_ignore_long = params->rx_ignore_long; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret); + goto fail; + } + + resp = + (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf; + ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; + +fail: + dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret); + return ret; +} + +static int ti_sci_cmd_rm_udmap_rx_flow_cfg( + const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_flow_cfg *params) +{ + struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp; + struct ti_sci_msg_rm_udmap_flow_cfg_req req; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + (u32 *)&req, sizeof(req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret); + return ret; + } + + req.valid_params = params->valid_params; + req.nav_id = params->nav_id; + req.flow_index = params->flow_index; + req.rx_einfo_present = params->rx_einfo_present; + req.rx_psinfo_present = params->rx_psinfo_present; + req.rx_error_handling = params->rx_error_handling; + req.rx_desc_type = params->rx_desc_type; + req.rx_sop_offset = params->rx_sop_offset; + req.rx_dest_qnum = params->rx_dest_qnum; + req.rx_src_tag_hi = params->rx_src_tag_hi; + req.rx_src_tag_lo = params->rx_src_tag_lo; + req.rx_dest_tag_hi = params->rx_dest_tag_hi; + req.rx_dest_tag_lo = params->rx_dest_tag_lo; + req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel; + req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel; + req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel; + req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel; + req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum; + req.rx_fdq1_qnum = params->rx_fdq1_qnum; + req.rx_fdq2_qnum = params->rx_fdq2_qnum; + req.rx_fdq3_qnum = params->rx_fdq3_qnum; + req.rx_ps_location = params->rx_ps_location; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret); + goto fail; + } + + resp = + (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf; + ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; + +fail: + dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret); + return ret; +} + /* * ti_sci_setup_ops() - Setup the operations structures * @info: pointer to TISCI pointer @@ -1814,7 +2439,11 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) struct ti_sci_dev_ops *dops = &ops->dev_ops; struct ti_sci_clk_ops *cops = &ops->clk_ops; struct ti_sci_core_ops *core_ops = &ops->core_ops; + struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; struct ti_sci_proc_ops *pops = &ops->proc_ops; + struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops; + struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops; + struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops; bops->board_config = ti_sci_cmd_set_board_config; bops->board_config_rm = ti_sci_cmd_set_board_config_rm; @@ -1849,6 +2478,11 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) cops->get_freq = ti_sci_cmd_clk_get_freq; core_ops->reboot_device = ti_sci_cmd_core_reboot; + core_ops->query_msmc = ti_sci_cmd_query_msmc; + + rm_core_ops->get_range = ti_sci_cmd_get_resource_range; + rm_core_ops->get_range_from_shost = + ti_sci_cmd_get_resource_range_from_shost; pops->proc_request = ti_sci_cmd_proc_request; pops->proc_release = ti_sci_cmd_proc_release; @@ -1857,6 +2491,16 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl; pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image; pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status; + + rops->config = ti_sci_cmd_ring_config; + rops->get_config = ti_sci_cmd_ring_get_config; + + psilops->pair = ti_sci_cmd_rm_psil_pair; + psilops->unpair = ti_sci_cmd_rm_psil_unpair; + + udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg; + udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg; + udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg; } /** @@ -1969,7 +2613,7 @@ static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info) } info->host_id = dev_read_u32_default(dev, "ti,host-id", - info->desc->host_id); + info->desc->default_host_id); info->is_secure = dev_read_bool(dev, "ti,secure-host"); @@ -2009,17 +2653,164 @@ static int ti_sci_probe(struct udevice *dev) return ret; } +/* + * ti_sci_get_free_resource() - Get a free resource from TISCI resource. + * @res: Pointer to the TISCI resource + * + * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL. + */ +u16 ti_sci_get_free_resource(struct ti_sci_resource *res) +{ + u16 set, free_bit; + + for (set = 0; set < res->sets; set++) { + free_bit = find_first_zero_bit(res->desc[set].res_map, + res->desc[set].num); + if (free_bit != res->desc[set].num) { + set_bit(free_bit, res->desc[set].res_map); + return res->desc[set].start + free_bit; + } + } + + return TI_SCI_RESOURCE_NULL; +} + +/** + * ti_sci_release_resource() - Release a resource from TISCI resource. + * @res: Pointer to the TISCI resource + */ +void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) +{ + u16 set; + + for (set = 0; set < res->sets; set++) { + if (res->desc[set].start <= id && + (res->desc[set].num + res->desc[set].start) > id) + clear_bit(id - res->desc[set].start, + res->desc[set].res_map); + } +} + +/** + * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device + * @handle: TISCI handle + * @dev: Device pointer to which the resource is assigned + * @of_prop: property name by which the resource are represented + * + * Note: This function expects of_prop to be in the form of tuples + * <type, subtype>. Allocates and initializes ti_sci_resource structure + * for each of_prop. Client driver can directly call + * ti_sci_(get_free, release)_resource apis for handling the resource. + * + * Return: Pointer to ti_sci_resource if all went well else appropriate + * error pointer. + */ +struct ti_sci_resource * +devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, + struct udevice *dev, u32 dev_id, char *of_prop) +{ + u32 resource_subtype; + u16 resource_type; + struct ti_sci_resource *res; + int sets, i, ret; + u32 *temp; + + res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); + if (!res) + return ERR_PTR(-ENOMEM); + + sets = dev_read_size(dev, of_prop); + if (sets < 0) { + dev_err(dev, "%s resource type ids not available\n", of_prop); + return ERR_PTR(sets); + } + temp = malloc(sets); + sets /= sizeof(u32); + res->sets = sets; + + res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), + GFP_KERNEL); + if (!res->desc) + return ERR_PTR(-ENOMEM); + + ret = ti_sci_get_resource_type(handle_to_ti_sci_info(handle), dev_id, + &resource_type); + if (ret) { + dev_err(dev, "No valid resource type for %u\n", dev_id); + return ERR_PTR(-EINVAL); + } + + ret = dev_read_u32_array(dev, of_prop, temp, res->sets); + if (ret) + return ERR_PTR(-EINVAL); + + for (i = 0; i < res->sets; i++) { + resource_subtype = temp[i]; + ret = handle->ops.rm_core_ops.get_range(handle, dev_id, + resource_subtype, + &res->desc[i].start, + &res->desc[i].num); + if (ret) { + dev_err(dev, "type %d subtype %d not allocated for host %d\n", + resource_type, resource_subtype, + handle_to_ti_sci_info(handle)->host_id); + return ERR_PTR(ret); + } + + dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n", + resource_type, resource_subtype, res->desc[i].start, + res->desc[i].num); + + res->desc[i].res_map = + devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) * + sizeof(*res->desc[i].res_map), GFP_KERNEL); + if (!res->desc[i].res_map) + return ERR_PTR(-ENOMEM); + } + + return res; +} + +/* Description for K2G */ +static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { + .default_host_id = 2, + /* Conservative duration */ + .max_rx_timeout_ms = 10000, + /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ + .max_msgs = 20, + .max_msg_size = 64, + .rm_type_map = NULL, +}; + +static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = { + {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */ + {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */ + {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */ + {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */ + {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */ + {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */ + {.dev_id = 0, .type = 0x000}, /* end of table */ +}; + /* Description for AM654 */ -static const struct ti_sci_desc ti_sci_sysfw_am654_desc = { - .host_id = 4, - .max_rx_timeout_us = 1000000, +static const struct ti_sci_desc ti_sci_pmmc_am654_desc = { + .default_host_id = 12, + /* Conservative duration */ + .max_rx_timeout_ms = 10000, + /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ + .max_msgs = 20, .max_msg_size = 60, + .rm_type_map = ti_sci_am654_rm_type_map, }; static const struct udevice_id ti_sci_ids[] = { { .compatible = "ti,k2g-sci", - .data = (ulong)&ti_sci_sysfw_am654_desc + .data = (ulong)&ti_sci_pmmc_k2g_desc + }, + { + .compatible = "ti,am654-sci", + .data = (ulong)&ti_sci_pmmc_am654_desc }, { /* Sentinel */ }, }; diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index 81591fb0c71..2d87cdd2cf9 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -25,6 +25,7 @@ #define TI_SCI_MSG_BOARD_CONFIG_RM 0x000c #define TI_SCI_MSG_BOARD_CONFIG_SECURITY 0x000d #define TI_SCI_MSG_BOARD_CONFIG_PM 0x000e +#define TISCI_MSG_QUERY_MSMC 0x0020 /* Device requests */ #define TI_SCI_MSG_SET_DEVICE_STATE 0x0200 @@ -50,6 +51,34 @@ #define TISCI_MSG_PROC_AUTH_BOOT_IMIAGE 0xc120 #define TISCI_MSG_GET_PROC_BOOT_STATUS 0xc400 +/* Resource Management Requests */ +#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500 + +/* NAVSS resource management */ +/* Ringacc requests */ +#define TI_SCI_MSG_RM_RING_CFG 0x1110 +#define TI_SCI_MSG_RM_RING_GET_CFG 0x1111 + +/* PSI-L requests */ +#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280 +#define TI_SCI_MSG_RM_PSIL_UNPAIR 0x1281 + +#define TI_SCI_MSG_RM_UDMAP_TX_ALLOC 0x1200 +#define TI_SCI_MSG_RM_UDMAP_TX_FREE 0x1201 +#define TI_SCI_MSG_RM_UDMAP_RX_ALLOC 0x1210 +#define TI_SCI_MSG_RM_UDMAP_RX_FREE 0x1211 +#define TI_SCI_MSG_RM_UDMAP_FLOW_CFG 0x1220 +#define TI_SCI_MSG_RM_UDMAP_OPT_FLOW_CFG 0x1221 + +#define TISCI_MSG_RM_UDMAP_TX_CH_CFG 0x1205 +#define TISCI_MSG_RM_UDMAP_TX_CH_GET_CFG 0x1206 +#define TISCI_MSG_RM_UDMAP_RX_CH_CFG 0x1215 +#define TISCI_MSG_RM_UDMAP_RX_CH_GET_CFG 0x1216 +#define TISCI_MSG_RM_UDMAP_FLOW_CFG 0x1230 +#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_CFG 0x1231 +#define TISCI_MSG_RM_UDMAP_FLOW_GET_CFG 0x1232 +#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_GET_CFG 0x1233 + /** * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses * @type: Type of messages: One of TI_SCI_MSG* values @@ -134,6 +163,24 @@ struct ti_sci_msg_board_config { } __packed; /** + * struct ti_sci_msg_resp_query_msmc - Query msmc message response structure + * @hdr: Generic Header + * @msmc_start_low: Lower 32 bit of msmc start + * @msmc_start_high: Upper 32 bit of msmc start + * @msmc_end_low: Lower 32 bit of msmc end + * @msmc_end_high: Upper 32 bit of msmc end + * + * Response to a generic message with message type TISCI_MSG_QUERY_MSMC + */ +struct ti_sci_msg_resp_query_msmc { + struct ti_sci_msg_hdr hdr; + u32 msmc_start_low; + u32 msmc_start_high; + u32 msmc_end_low; + u32 msmc_end_high; +} __packed; + +/** * struct ti_sci_msg_req_set_device_state - Set the desired state of the device * @hdr: Generic header * @id: Indicates which device to modify @@ -505,6 +552,45 @@ struct ti_sci_msg_resp_get_clock_freq { u64 freq_hz; } __packed; +#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff + +/** + * struct ti_sci_msg_req_get_resource_range - Request to get a host's assigned + * range of resources. + * @hdr: Generic Header + * @type: Unique resource assignment type + * @subtype: Resource assignment subtype within the resource type. + * @secondary_host: Host processing entity to which the resources are + * allocated. This is required only when the destination + * host id id different from ti sci interface host id, + * else TI_SCI_IRQ_SECONDARY_HOST_INVALID can be passed. + * + * Request type is TI_SCI_MSG_GET_RESOURCE_RANGE. Responded with requested + * resource range which is of type TI_SCI_MSG_GET_RESOURCE_RANGE. + */ +struct ti_sci_msg_req_get_resource_range { + struct ti_sci_msg_hdr hdr; +#define MSG_RM_RESOURCE_TYPE_MASK GENMASK(9, 0) +#define MSG_RM_RESOURCE_SUBTYPE_MASK GENMASK(5, 0) + u16 type; + u8 subtype; + u8 secondary_host; +} __packed; + +/** + * struct ti_sci_msg_resp_get_resource_range - Response to resource get range. + * @hdr: Generic Header + * @range_start: Start index of the resource range. + * @range_num: Number of resources in the range. + * + * Response to request TI_SCI_MSG_GET_RESOURCE_RANGE. + */ +struct ti_sci_msg_resp_get_resource_range { + struct ti_sci_msg_hdr hdr; + u16 range_start; + u16 range_num; +} __packed; + #define TISCI_ADDR_LOW_MASK GENMASK_ULL(31, 0) #define TISCI_ADDR_HIGH_MASK GENMASK_ULL(63, 32) #define TISCI_ADDR_HIGH_SHIFT 32 @@ -677,4 +763,579 @@ struct ti_sci_msg_resp_get_proc_boot_status { u32 status_flags; } __packed; +/** + * struct ti_sci_msg_rm_ring_cfg_req - Configure a Navigator Subsystem ring + * + * Configures the non-real-time registers of a Navigator Subsystem ring. + * @hdr: Generic Header + * @valid_params: Bitfield defining validity of ring configuration parameters. + * The ring configuration fields are not valid, and will not be used for + * ring configuration, if their corresponding valid bit is zero. + * Valid bit usage: + * 0 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_lo + * 1 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_hi + * 2 - Valid bit for @tisci_msg_rm_ring_cfg_req count + * 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode + * 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size + * 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id + * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated + * @index: ring index to be configured. + * @addr_lo: 32 LSBs of ring base address to be programmed into the ring's + * RING_BA_LO register + * @addr_hi: 16 MSBs of ring base address to be programmed into the ring's + * RING_BA_HI register. + * @count: Number of ring elements. Must be even if mode is CREDENTIALS or QM + * modes. + * @mode: Specifies the mode the ring is to be configured. + * @size: Specifies encoded ring element size. To calculate the encoded size use + * the formula (log2(size_bytes) - 2), where size_bytes cannot be + * greater than 256. + * @order_id: Specifies the ring's bus order ID. + */ +struct ti_sci_msg_rm_ring_cfg_req { + struct ti_sci_msg_hdr hdr; + u32 valid_params; + u16 nav_id; + u16 index; + u32 addr_lo; + u32 addr_hi; + u32 count; + u8 mode; + u8 size; + u8 order_id; +} __packed; + +/** + * struct ti_sci_msg_rm_ring_cfg_resp - Response to configuring a ring. + * + * @hdr: Generic Header + */ +struct ti_sci_msg_rm_ring_cfg_resp { + struct ti_sci_msg_hdr hdr; +} __packed; + +/** + * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration + * + * Gets the configuration of the non-real-time register fields of a ring. The + * host, or a supervisor of the host, who owns the ring must be the requesting + * host. The values of the non-real-time registers are returned in + * @ti_sci_msg_rm_ring_get_cfg_resp. + * + * @hdr: Generic Header + * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated + * @index: ring index. + */ +struct ti_sci_msg_rm_ring_get_cfg_req { + struct ti_sci_msg_hdr hdr; + u16 nav_id; + u16 index; +} __packed; + +/** + * struct ti_sci_msg_rm_ring_get_cfg_resp - Ring get configuration response + * + * Response received by host processor after RM has handled + * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's + * non-real-time register values. + * + * @hdr: Generic Header + * @addr_lo: Ring 32 LSBs of base address + * @addr_hi: Ring 16 MSBs of base address. + * @count: Ring number of elements. + * @mode: Ring mode. + * @size: encoded Ring element size + * @order_id: ing order ID. + */ +struct ti_sci_msg_rm_ring_get_cfg_resp { + struct ti_sci_msg_hdr hdr; + u32 addr_lo; + u32 addr_hi; + u32 count; + u8 mode; + u8 size; + u8 order_id; +} __packed; + +/** + * struct ti_sci_msg_psil_pair - Pairs a PSI-L source thread to a destination + * thread + * @hdr: Generic Header + * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is + * used to pair the source and destination threads. + * @src_thread: PSI-L source thread ID within the PSI-L System thread map. + * + * UDMAP transmit channels mapped to source threads will have their + * TCHAN_THRD_ID register programmed with the destination thread if the pairing + * is successful. + + * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map. + * PSI-L destination threads start at index 0x8000. The request is NACK'd if + * the destination thread is not greater than or equal to 0x8000. + * + * UDMAP receive channels mapped to destination threads will have their + * RCHAN_THRD_ID register programmed with the source thread if the pairing + * is successful. + * + * Request type is TI_SCI_MSG_RM_PSIL_PAIR, response is a generic ACK or NACK + * message. + */ +struct ti_sci_msg_psil_pair { + struct ti_sci_msg_hdr hdr; + u32 nav_id; + u32 src_thread; + u32 dst_thread; +} __packed; + +/** + * struct ti_sci_msg_psil_unpair - Unpairs a PSI-L source thread from a + * destination thread + * @hdr: Generic Header + * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is + * used to unpair the source and destination threads. + * @src_thread: PSI-L source thread ID within the PSI-L System thread map. + * + * UDMAP transmit channels mapped to source threads will have their + * TCHAN_THRD_ID register cleared if the unpairing is successful. + * + * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map. + * PSI-L destination threads start at index 0x8000. The request is NACK'd if + * the destination thread is not greater than or equal to 0x8000. + * + * UDMAP receive channels mapped to destination threads will have their + * RCHAN_THRD_ID register cleared if the unpairing is successful. + * + * Request type is TI_SCI_MSG_RM_PSIL_UNPAIR, response is a generic ACK or NACK + * message. + */ +struct ti_sci_msg_psil_unpair { + struct ti_sci_msg_hdr hdr; + u32 nav_id; + u32 src_thread; + u32 dst_thread; +} __packed; + +/** + * Configures a Navigator Subsystem UDMAP transmit channel + * + * Configures the non-real-time registers of a Navigator Subsystem UDMAP + * transmit channel. The channel index must be assigned to the host defined + * in the TISCI header via the RM board configuration resource assignment + * range list. + * + * @hdr: Generic Header + * + * @valid_params: Bitfield defining validity of tx channel configuration + * parameters. The tx channel configuration fields are not valid, and will not + * be used for ch configuration, if their corresponding valid bit is zero. + * Valid bit usage: + * 0 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_pause_on_err + * 1 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_atype + * 2 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_chan_type + * 3 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_fetch_size + * 4 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::txcq_qnum + * 5 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_priority + * 6 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_qos + * 7 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_orderid + * 8 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_sched_priority + * 9 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_einfo + * 10 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_pswords + * 11 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_supr_tdpkt + * 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count + * 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth + * + * @nav_id: SoC device ID of Navigator Subsystem where tx channel is located + * + * @index: UDMAP transmit channel index. + * + * @tx_pause_on_err: UDMAP transmit channel pause on error configuration to + * be programmed into the tx_pause_on_err field of the channel's TCHAN_TCFG + * register. + * + * @tx_filt_einfo: UDMAP transmit channel extended packet information passing + * configuration to be programmed into the tx_filt_einfo field of the + * channel's TCHAN_TCFG register. + * + * @tx_filt_pswords: UDMAP transmit channel protocol specific word passing + * configuration to be programmed into the tx_filt_pswords field of the + * channel's TCHAN_TCFG register. + * + * @tx_atype: UDMAP transmit channel non Ring Accelerator access pointer + * interpretation configuration to be programmed into the tx_atype field of + * the channel's TCHAN_TCFG register. + * + * @tx_chan_type: UDMAP transmit channel functional channel type and work + * passing mechanism configuration to be programmed into the tx_chan_type + * field of the channel's TCHAN_TCFG register. + * + * @tx_supr_tdpkt: UDMAP transmit channel teardown packet generation suppression + * configuration to be programmed into the tx_supr_tdpkt field of the channel's + * TCHAN_TCFG register. + * + * @tx_fetch_size: UDMAP transmit channel number of 32-bit descriptor words to + * fetch configuration to be programmed into the tx_fetch_size field of the + * channel's TCHAN_TCFG register. The user must make sure to set the maximum + * word count that can pass through the channel for any allowed descriptor type. + * + * @tx_credit_count: UDMAP transmit channel transfer request credit count + * configuration to be programmed into the count field of the TCHAN_TCREDIT + * register. Specifies how many credits for complete TRs are available. + * + * @txcq_qnum: UDMAP transmit channel completion queue configuration to be + * programmed into the txcq_qnum field of the TCHAN_TCQ register. The specified + * completion queue must be assigned to the host, or a subordinate of the host, + * requesting configuration of the transmit channel. + * + * @tx_priority: UDMAP transmit channel transmit priority value to be programmed + * into the priority field of the channel's TCHAN_TPRI_CTRL register. + * + * @tx_qos: UDMAP transmit channel transmit qos value to be programmed into the + * qos field of the channel's TCHAN_TPRI_CTRL register. + * + * @tx_orderid: UDMAP transmit channel bus order id value to be programmed into + * the orderid field of the channel's TCHAN_TPRI_CTRL register. + * + * @fdepth: UDMAP transmit channel FIFO depth configuration to be programmed + * into the fdepth field of the TCHAN_TFIFO_DEPTH register. Sets the number of + * Tx FIFO bytes which are allowed to be stored for the channel. Check the UDMAP + * section of the TRM for restrictions regarding this parameter. + * + * @tx_sched_priority: UDMAP transmit channel tx scheduling priority + * configuration to be programmed into the priority field of the channel's + * TCHAN_TST_SCHED register. + */ +struct ti_sci_msg_rm_udmap_tx_ch_cfg_req { + struct ti_sci_msg_hdr hdr; + u32 valid_params; + u16 nav_id; + u16 index; + u8 tx_pause_on_err; + u8 tx_filt_einfo; + u8 tx_filt_pswords; + u8 tx_atype; + u8 tx_chan_type; + u8 tx_supr_tdpkt; + u16 tx_fetch_size; + u8 tx_credit_count; + u16 txcq_qnum; + u8 tx_priority; + u8 tx_qos; + u8 tx_orderid; + u16 fdepth; + u8 tx_sched_priority; +} __packed; + +/** + * Response to configuring a UDMAP transmit channel. + * + * @hdr: Standard TISCI header + */ +struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp { + struct ti_sci_msg_hdr hdr; +} __packed; + +/** + * Configures a Navigator Subsystem UDMAP receive channel + * + * Configures the non-real-time registers of a Navigator Subsystem UDMAP + * receive channel. The channel index must be assigned to the host defined + * in the TISCI header via the RM board configuration resource assignment + * range list. + * + * @hdr: Generic Header + * + * @valid_params: Bitfield defining validity of rx channel configuration + * parameters. + * The rx channel configuration fields are not valid, and will not be used for + * ch configuration, if their corresponding valid bit is zero. + * Valid bit usage: + * 0 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_pause_on_err + * 1 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_atype + * 2 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_chan_type + * 3 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_fetch_size + * 4 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rxcq_qnum + * 5 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_priority + * 6 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_qos + * 7 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_orderid + * 8 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_sched_priority + * 9 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_start + * 10 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_cnt + * 11 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_short + * 12 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_long + * + * @nav_id: SoC device ID of Navigator Subsystem where rx channel is located + * + * @index: UDMAP receive channel index. + * + * @rx_fetch_size: UDMAP receive channel number of 32-bit descriptor words to + * fetch configuration to be programmed into the rx_fetch_size field of the + * channel's RCHAN_RCFG register. + * + * @rxcq_qnum: UDMAP receive channel completion queue configuration to be + * programmed into the rxcq_qnum field of the RCHAN_RCQ register. + * The specified completion queue must be assigned to the host, or a subordinate + * of the host, requesting configuration of the receive channel. + * + * @rx_priority: UDMAP receive channel receive priority value to be programmed + * into the priority field of the channel's RCHAN_RPRI_CTRL register. + * + * @rx_qos: UDMAP receive channel receive qos value to be programmed into the + * qos field of the channel's RCHAN_RPRI_CTRL register. + * + * @rx_orderid: UDMAP receive channel bus order id value to be programmed into + * the orderid field of the channel's RCHAN_RPRI_CTRL register. + * + * @rx_sched_priority: UDMAP receive channel rx scheduling priority + * configuration to be programmed into the priority field of the channel's + * RCHAN_RST_SCHED register. + * + * @flowid_start: UDMAP receive channel additional flows starting index + * configuration to program into the flow_start field of the RCHAN_RFLOW_RNG + * register. Specifies the starting index for flow IDs the receive channel is to + * make use of beyond the default flow. flowid_start and @ref flowid_cnt must be + * set as valid and configured together. The starting flow ID set by + * @ref flowid_cnt must be a flow index within the Navigator Subsystem's subset + * of flows beyond the default flows statically mapped to receive channels. + * The additional flows must be assigned to the host, or a subordinate of the + * host, requesting configuration of the receive channel. + * + * @flowid_cnt: UDMAP receive channel additional flows count configuration to + * program into the flowid_cnt field of the RCHAN_RFLOW_RNG register. + * This field specifies how many flow IDs are in the additional contiguous range + * of legal flow IDs for the channel. @ref flowid_start and flowid_cnt must be + * set as valid and configured together. Disabling the valid_params field bit + * for flowid_cnt indicates no flow IDs other than the default are to be + * allocated and used by the receive channel. @ref flowid_start plus flowid_cnt + * cannot be greater than the number of receive flows in the receive channel's + * Navigator Subsystem. The additional flows must be assigned to the host, or a + * subordinate of the host, requesting configuration of the receive channel. + * + * @rx_pause_on_err: UDMAP receive channel pause on error configuration to be + * programmed into the rx_pause_on_err field of the channel's RCHAN_RCFG + * register. + * + * @rx_atype: UDMAP receive channel non Ring Accelerator access pointer + * interpretation configuration to be programmed into the rx_atype field of the + * channel's RCHAN_RCFG register. + * + * @rx_chan_type: UDMAP receive channel functional channel type and work passing + * mechanism configuration to be programmed into the rx_chan_type field of the + * channel's RCHAN_RCFG register. + * + * @rx_ignore_short: UDMAP receive channel short packet treatment configuration + * to be programmed into the rx_ignore_short field of the RCHAN_RCFG register. + * + * @rx_ignore_long: UDMAP receive channel long packet treatment configuration to + * be programmed into the rx_ignore_long field of the RCHAN_RCFG register. + */ +struct ti_sci_msg_rm_udmap_rx_ch_cfg_req { + struct ti_sci_msg_hdr hdr; + u32 valid_params; + u16 nav_id; + u16 index; + u16 rx_fetch_size; + u16 rxcq_qnum; + u8 rx_priority; + u8 rx_qos; + u8 rx_orderid; + u8 rx_sched_priority; + u16 flowid_start; + u16 flowid_cnt; + u8 rx_pause_on_err; + u8 rx_atype; + u8 rx_chan_type; + u8 rx_ignore_short; + u8 rx_ignore_long; +} __packed; + +/** + * Response to configuring a UDMAP receive channel. + * + * @hdr: Standard TISCI header + */ +struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp { + struct ti_sci_msg_hdr hdr; +} __packed; + +/** + * Configures a Navigator Subsystem UDMAP receive flow + * + * Configures a Navigator Subsystem UDMAP receive flow's registers. + * Configuration does not include the flow registers which handle size-based + * free descriptor queue routing. + * + * The flow index must be assigned to the host defined in the TISCI header via + * the RM board configuration resource assignment range list. + * + * @hdr: Standard TISCI header + * + * @valid_params + * Bitfield defining validity of rx flow configuration parameters. The + * rx flow configuration fields are not valid, and will not be used for flow + * configuration, if their corresponding valid bit is zero. Valid bit usage: + * 0 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_einfo_present + * 1 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_psinfo_present + * 2 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_error_handling + * 3 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_desc_type + * 4 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_sop_offset + * 5 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_qnum + * 6 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi + * 7 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo + * 8 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi + * 9 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo + * 10 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi_sel + * 11 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo_sel + * 12 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi_sel + * 13 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo_sel + * 14 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq0_sz0_qnum + * 15 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq1_sz0_qnum + * 16 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq2_sz0_qnum + * 17 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq3_sz0_qnum + * 18 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_ps_location + * + * @nav_id: SoC device ID of Navigator Subsystem from which the receive flow is + * allocated + * + * @flow_index: UDMAP receive flow index for non-optional configuration. + * + * @rx_einfo_present: + * UDMAP receive flow extended packet info present configuration to be + * programmed into the rx_einfo_present field of the flow's RFLOW_RFA register. + * + * @rx_psinfo_present: + * UDMAP receive flow PS words present configuration to be programmed into the + * rx_psinfo_present field of the flow's RFLOW_RFA register. + * + * @rx_error_handling: + * UDMAP receive flow error handling configuration to be programmed into the + * rx_error_handling field of the flow's RFLOW_RFA register. + * + * @rx_desc_type: + * UDMAP receive flow descriptor type configuration to be programmed into the + * rx_desc_type field field of the flow's RFLOW_RFA register. + * + * @rx_sop_offset: + * UDMAP receive flow start of packet offset configuration to be programmed + * into the rx_sop_offset field of the RFLOW_RFA register. See the UDMAP + * section of the TRM for more information on this setting. Valid values for + * this field are 0-255 bytes. + * + * @rx_dest_qnum: + * UDMAP receive flow destination queue configuration to be programmed into the + * rx_dest_qnum field of the flow's RFLOW_RFA register. The specified + * destination queue must be valid within the Navigator Subsystem and must be + * owned by the host, or a subordinate of the host, requesting allocation and + * configuration of the receive flow. + * + * @rx_src_tag_hi: + * UDMAP receive flow source tag high byte constant configuration to be + * programmed into the rx_src_tag_hi field of the flow's RFLOW_RFB register. + * See the UDMAP section of the TRM for more information on this setting. + * + * @rx_src_tag_lo: + * UDMAP receive flow source tag low byte constant configuration to be + * programmed into the rx_src_tag_lo field of the flow's RFLOW_RFB register. + * See the UDMAP section of the TRM for more information on this setting. + * + * @rx_dest_tag_hi: + * UDMAP receive flow destination tag high byte constant configuration to be + * programmed into the rx_dest_tag_hi field of the flow's RFLOW_RFB register. + * See the UDMAP section of the TRM for more information on this setting. + * + * @rx_dest_tag_lo: + * UDMAP receive flow destination tag low byte constant configuration to be + * programmed into the rx_dest_tag_lo field of the flow's RFLOW_RFB register. + * See the UDMAP section of the TRM for more information on this setting. + * + * @rx_src_tag_hi_sel: + * UDMAP receive flow source tag high byte selector configuration to be + * programmed into the rx_src_tag_hi_sel field of the RFLOW_RFC register. See + * the UDMAP section of the TRM for more information on this setting. + * + * @rx_src_tag_lo_sel: + * UDMAP receive flow source tag low byte selector configuration to be + * programmed into the rx_src_tag_lo_sel field of the RFLOW_RFC register. See + * the UDMAP section of the TRM for more information on this setting. + * + * @rx_dest_tag_hi_sel: + * UDMAP receive flow destination tag high byte selector configuration to be + * programmed into the rx_dest_tag_hi_sel field of the RFLOW_RFC register. See + * the UDMAP section of the TRM for more information on this setting. + * + * @rx_dest_tag_lo_sel: + * UDMAP receive flow destination tag low byte selector configuration to be + * programmed into the rx_dest_tag_lo_sel field of the RFLOW_RFC register. See + * the UDMAP section of the TRM for more information on this setting. + * + * @rx_fdq0_sz0_qnum: + * UDMAP receive flow free descriptor queue 0 configuration to be programmed + * into the rx_fdq0_sz0_qnum field of the flow's RFLOW_RFD register. See the + * UDMAP section of the TRM for more information on this setting. The specified + * free queue must be valid within the Navigator Subsystem and must be owned + * by the host, or a subordinate of the host, requesting allocation and + * configuration of the receive flow. + * + * @rx_fdq1_qnum: + * UDMAP receive flow free descriptor queue 1 configuration to be programmed + * into the rx_fdq1_qnum field of the flow's RFLOW_RFD register. See the + * UDMAP section of the TRM for more information on this setting. The specified + * free queue must be valid within the Navigator Subsystem and must be owned + * by the host, or a subordinate of the host, requesting allocation and + * configuration of the receive flow. + * + * @rx_fdq2_qnum: + * UDMAP receive flow free descriptor queue 2 configuration to be programmed + * into the rx_fdq2_qnum field of the flow's RFLOW_RFE register. See the + * UDMAP section of the TRM for more information on this setting. The specified + * free queue must be valid within the Navigator Subsystem and must be owned + * by the host, or a subordinate of the host, requesting allocation and + * configuration of the receive flow. + * + * @rx_fdq3_qnum: + * UDMAP receive flow free descriptor queue 3 configuration to be programmed + * into the rx_fdq3_qnum field of the flow's RFLOW_RFE register. See the + * UDMAP section of the TRM for more information on this setting. The specified + * free queue must be valid within the Navigator Subsystem and must be owned + * by the host, or a subordinate of the host, requesting allocation and + * configuration of the receive flow. + * + * @rx_ps_location: + * UDMAP receive flow PS words location configuration to be programmed into the + * rx_ps_location field of the flow's RFLOW_RFA register. + */ +struct ti_sci_msg_rm_udmap_flow_cfg_req { + struct ti_sci_msg_hdr hdr; + u32 valid_params; + u16 nav_id; + u16 flow_index; + u8 rx_einfo_present; + u8 rx_psinfo_present; + u8 rx_error_handling; + u8 rx_desc_type; + u16 rx_sop_offset; + u16 rx_dest_qnum; + u8 rx_src_tag_hi; + u8 rx_src_tag_lo; + u8 rx_dest_tag_hi; + u8 rx_dest_tag_lo; + u8 rx_src_tag_hi_sel; + u8 rx_src_tag_lo_sel; + u8 rx_dest_tag_hi_sel; + u8 rx_dest_tag_lo_sel; + u16 rx_fdq0_sz0_qnum; + u16 rx_fdq1_qnum; + u16 rx_fdq2_qnum; + u16 rx_fdq3_qnum; + u8 rx_ps_location; +} __packed; + +/** + * Response to configuring a Navigator Subsystem UDMAP receive flow + * + * @hdr: Standard TISCI header + */ +struct ti_sci_msg_rm_udmap_flow_cfg_resp { + struct ti_sci_msg_hdr hdr; +} __packed; + #endif /* __TI_SCI_H */ diff --git a/drivers/mmc/omap_hsmmc.c b/drivers/mmc/omap_hsmmc.c index 826a39fad72..133cdc13527 100644 --- a/drivers/mmc/omap_hsmmc.c +++ b/drivers/mmc/omap_hsmmc.c @@ -264,7 +264,7 @@ static unsigned char mmc_board_init(struct mmc *mmc) !CONFIG_IS_ENABLED(DM_REGULATOR) /* PBIAS config needed for MMC1 only */ if (mmc_get_blk_desc(mmc)->devnum == 0) - vmmc_pbias_config(LDO_VOLT_3V0); + vmmc_pbias_config(LDO_VOLT_3V3); #endif return 0; @@ -418,7 +418,7 @@ static void omap_hsmmc_conf_bus_power(struct mmc *mmc, uint signal_voltage) switch (signal_voltage) { case MMC_SIGNAL_VOLTAGE_330: - hctl |= SDVS_3V0; + hctl |= SDVS_3V3; break; case MMC_SIGNAL_VOLTAGE_180: hctl |= SDVS_1V8; @@ -514,10 +514,9 @@ static int omap_hsmmc_set_signal_voltage(struct mmc *mmc) return -EINVAL; if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { - /* Use 3.0V rather than 3.3V */ - mv = 3000; - capa_mask = VS30_3V0SUP; - palmas_ldo_volt = LDO_VOLT_3V0; + mv = 3300; + capa_mask = VS33_3V3SUP; + palmas_ldo_volt = LDO_VOLT_3V3; } else if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { capa_mask = VS18_1V8SUP; palmas_ldo_volt = LDO_VOLT_1V8; @@ -556,13 +555,13 @@ static uint32_t omap_hsmmc_set_capabilities(struct mmc *mmc) val = readl(&mmc_base->capa); if (priv->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { - val |= (VS30_3V0SUP | VS18_1V8SUP); + val |= (VS33_3V3SUP | VS18_1V8SUP); } else if (priv->controller_flags & OMAP_HSMMC_NO_1_8_V) { - val |= VS30_3V0SUP; + val |= VS33_3V3SUP; val &= ~VS18_1V8SUP; } else { val |= VS18_1V8SUP; - val &= ~VS30_3V0SUP; + val &= ~VS33_3V3SUP; } writel(val, &mmc_base->capa); @@ -842,11 +841,11 @@ static int omap_hsmmc_init_setup(struct mmc *mmc) #if CONFIG_IS_ENABLED(DM_MMC) reg_val = omap_hsmmc_set_capabilities(mmc); - omap_hsmmc_conf_bus_power(mmc, (reg_val & VS30_3V0SUP) ? + omap_hsmmc_conf_bus_power(mmc, (reg_val & VS33_3V3SUP) ? MMC_SIGNAL_VOLTAGE_330 : MMC_SIGNAL_VOLTAGE_180); #else writel(DTW_1_BITMODE | SDBP_PWROFF | SDVS_3V0, &mmc_base->hctl); - writel(readl(&mmc_base->capa) | VS30_3V0SUP | VS18_1V8SUP, + writel(readl(&mmc_base->capa) | VS33_3V3SUP | VS18_1V8SUP, &mmc_base->capa); #endif diff --git a/drivers/net/ti/cpsw-common.c b/drivers/net/ti/cpsw-common.c index 6c8ddbd9361..ac12cfe9b86 100644 --- a/drivers/net/ti/cpsw-common.c +++ b/drivers/net/ti/cpsw-common.c @@ -16,35 +16,11 @@ DECLARE_GLOBAL_DATA_PTR; #define CTRL_MAC_REG(offset, id) ((offset) + 0x8 * (id)) -static int davinci_emac_3517_get_macid(struct udevice *dev, u16 offset, - int slave, u8 *mac_addr) +static void davinci_emac_3517_get_macid(u32 addr, u8 *mac_addr) { - void *fdt = (void *)gd->fdt_blob; - int node = dev_of_offset(dev); - u32 macid_lsb; - u32 macid_msb; - fdt32_t gmii = 0; - int syscon; - u32 addr; - - syscon = fdtdec_lookup_phandle(fdt, node, "syscon"); - if (syscon < 0) { - pr_err("Syscon offset not found\n"); - return -ENOENT; - } - - addr = (u32)map_physmem(fdt_translate_address(fdt, syscon, &gmii), - sizeof(u32), MAP_NOCACHE); - if (addr == FDT_ADDR_T_NONE) { - pr_err("Not able to get syscon address to get mac efuse address\n"); - return -ENOENT; - } - - addr += CTRL_MAC_REG(offset, slave); - /* try reading mac address from efuse */ - macid_lsb = readl(addr); - macid_msb = readl(addr + 4); + u32 macid_lsb = readl(addr); + u32 macid_msb = readl(addr + 4); mac_addr[0] = (macid_msb >> 16) & 0xff; mac_addr[1] = (macid_msb >> 8) & 0xff; @@ -52,20 +28,62 @@ static int davinci_emac_3517_get_macid(struct udevice *dev, u16 offset, mac_addr[3] = (macid_lsb >> 16) & 0xff; mac_addr[4] = (macid_lsb >> 8) & 0xff; mac_addr[5] = macid_lsb & 0xff; +} - return 0; +static void cpsw_am33xx_cm_get_macid(u32 addr, u8 *mac_addr) +{ + /* try reading mac address from efuse */ + u32 macid_lo = readl(addr); + u32 macid_hi = readl(addr + 4); + + mac_addr[5] = (macid_lo >> 8) & 0xff; + mac_addr[4] = macid_lo & 0xff; + mac_addr[3] = (macid_hi >> 24) & 0xff; + mac_addr[2] = (macid_hi >> 16) & 0xff; + mac_addr[1] = (macid_hi >> 8) & 0xff; + mac_addr[0] = macid_hi & 0xff; +} + +void ti_cm_get_macid(struct udevice *dev, struct cpsw_platform_data *data, + u8 *mac_addr) +{ + if (!strcmp(data->macid_sel_compat, "cpsw,am33xx")) + cpsw_am33xx_cm_get_macid(data->syscon_addr, mac_addr); + else if (!strcmp(data->macid_sel_compat, "davinci,emac")) + davinci_emac_3517_get_macid(data->syscon_addr, mac_addr); } -static int cpsw_am33xx_cm_get_macid(struct udevice *dev, u16 offset, int slave, - u8 *mac_addr) +int ti_cm_get_macid_addr(struct udevice *dev, int slave, + struct cpsw_platform_data *data) { void *fdt = (void *)gd->fdt_blob; int node = dev_of_offset(dev); - u32 macid_lo; - u32 macid_hi; fdt32_t gmii = 0; int syscon; - u32 addr; + u16 offset; + + if (of_machine_is_compatible("ti,dm8148")) { + offset = 0x630; + data->macid_sel_compat = "cpsw,am33xx"; + } else if (of_machine_is_compatible("ti,am33xx")) { + offset = 0x630; + data->macid_sel_compat = "cpsw,am33xx"; + } else if (device_is_compatible(dev, "ti,am3517-emac")) { + offset = 0x110; + data->macid_sel_compat = "davinci,emac"; + } else if (device_is_compatible(dev, "ti,dm816-emac")) { + offset = 0x30; + data->macid_sel_compat = "cpsw,am33xx"; + } else if (of_machine_is_compatible("ti,am43")) { + offset = 0x630; + data->macid_sel_compat = "cpsw,am33xx"; + } else if (of_machine_is_compatible("ti,dra7")) { + offset = 0x514; + data->macid_sel_compat = "davinci,emac"; + } else { + dev_err(dev, "incompatible machine/device type for reading mac address\n"); + return -ENOENT; + } syscon = fdtdec_lookup_phandle(fdt, node, "syscon"); if (syscon < 0) { @@ -73,49 +91,16 @@ static int cpsw_am33xx_cm_get_macid(struct udevice *dev, u16 offset, int slave, return -ENOENT; } - addr = (u32)map_physmem(fdt_translate_address(fdt, syscon, &gmii), - sizeof(u32), MAP_NOCACHE); - if (addr == FDT_ADDR_T_NONE) { + data->syscon_addr = (u32)map_physmem(fdt_translate_address(fdt, syscon, + &gmii), + sizeof(u32), MAP_NOCACHE); + if (data->syscon_addr == FDT_ADDR_T_NONE) { pr_err("Not able to get syscon address to get mac efuse address\n"); return -ENOENT; } - addr += CTRL_MAC_REG(offset, slave); - - /* try reading mac address from efuse */ - macid_lo = readl(addr); - macid_hi = readl(addr + 4); - - mac_addr[5] = (macid_lo >> 8) & 0xff; - mac_addr[4] = macid_lo & 0xff; - mac_addr[3] = (macid_hi >> 24) & 0xff; - mac_addr[2] = (macid_hi >> 16) & 0xff; - mac_addr[1] = (macid_hi >> 8) & 0xff; - mac_addr[0] = macid_hi & 0xff; + data->syscon_addr += CTRL_MAC_REG(offset, slave); return 0; -} - -int ti_cm_get_macid(struct udevice *dev, int slave, u8 *mac_addr) -{ - if (of_machine_is_compatible("ti,dm8148")) - return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); - - if (of_machine_is_compatible("ti,am33xx")) - return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); - - if (device_is_compatible(dev, "ti,am3517-emac")) - return davinci_emac_3517_get_macid(dev, 0x110, slave, mac_addr); - - if (device_is_compatible(dev, "ti,dm816-emac")) - return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr); - - if (of_machine_is_compatible("ti,am43")) - return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); - - if (of_machine_is_compatible("ti,dra7")) - return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr); - dev_err(dev, "incompatible machine/device type for reading mac address\n"); - return -ENOENT; } diff --git a/drivers/net/ti/cpsw.c b/drivers/net/ti/cpsw.c index f5fd02efe1a..20ddb44dd89 100644 --- a/drivers/net/ti/cpsw.c +++ b/drivers/net/ti/cpsw.c @@ -33,24 +33,6 @@ DECLARE_GLOBAL_DATA_PTR; #define GIGABITEN BIT(7) #define FULLDUPLEXEN BIT(0) #define MIIEN BIT(15) - -/* reg offset */ -#define CPSW_HOST_PORT_OFFSET 0x108 -#define CPSW_SLAVE0_OFFSET 0x208 -#define CPSW_SLAVE1_OFFSET 0x308 -#define CPSW_SLAVE_SIZE 0x100 -#define CPSW_CPDMA_OFFSET 0x800 -#define CPSW_HW_STATS 0x900 -#define CPSW_STATERAM_OFFSET 0xa00 -#define CPSW_CPTS_OFFSET 0xc00 -#define CPSW_ALE_OFFSET 0xd00 -#define CPSW_SLIVER0_OFFSET 0xd80 -#define CPSW_SLIVER1_OFFSET 0xdc0 -#define CPSW_BD_OFFSET 0x2000 -#define CPSW_MDIO_DIV 0xff - -#define AM335X_GMII_SEL_OFFSET 0x630 - /* DMA Registers */ #define CPDMA_TXCONTROL 0x004 #define CPDMA_RXCONTROL 0x014 @@ -209,10 +191,10 @@ struct cpdma_chan { #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld)) #define for_active_slave(slave, priv) \ - slave = (priv)->slaves + (priv)->data.active_slave; if (slave) + slave = (priv)->slaves + ((priv)->data)->active_slave; if (slave) #define for_each_slave(slave, priv) \ for (slave = (priv)->slaves; slave != (priv)->slaves + \ - (priv)->data.slaves; slave++) + ((priv)->data)->slaves; slave++) struct cpsw_priv { #ifdef CONFIG_DM_ETH @@ -220,7 +202,7 @@ struct cpsw_priv { #else struct eth_device *dev; #endif - struct cpsw_platform_data data; + struct cpsw_platform_data *data; int host_port; struct cpsw_regs *regs; @@ -327,7 +309,7 @@ static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr) u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; - for (idx = 0; idx < priv->data.ale_entries; idx++) { + for (idx = 0; idx < priv->data->ale_entries; idx++) { u8 entry_addr[6]; cpsw_ale_read(priv, idx, ale_entry); @@ -346,7 +328,7 @@ static int cpsw_ale_match_free(struct cpsw_priv *priv) u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; - for (idx = 0; idx < priv->data.ale_entries; idx++) { + for (idx = 0; idx < priv->data->ale_entries; idx++) { cpsw_ale_read(priv, idx, ale_entry); type = cpsw_ale_get_entry_type(ale_entry); if (type == ALE_TYPE_FREE) @@ -360,7 +342,7 @@ static int cpsw_ale_find_ageable(struct cpsw_priv *priv) u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; - for (idx = 0; idx < priv->data.ale_entries; idx++) { + for (idx = 0; idx < priv->data->ale_entries; idx++) { cpsw_ale_read(priv, idx, ale_entry); type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) @@ -500,7 +482,7 @@ static int cpsw_slave_update_link(struct cpsw_slave *slave, *link = phy->link; if (phy->link) { /* link up */ - mac_control = priv->data.mac_control; + mac_control = priv->data->mac_control; if (phy->speed == 1000) mac_control |= GIGABITEN; if (phy->duplex == DUPLEX_FULL) @@ -710,7 +692,7 @@ static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr) priv->desc_free = &priv->descs[0]; /* initialize channels */ - if (priv->data.version == CPSW_CTRL_VERSION_2) { + if (priv->data->version == CPSW_CTRL_VERSION_2) { memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan)); priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2; priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2; @@ -733,8 +715,8 @@ static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr) /* clear dma state */ setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET); - if (priv->data.version == CPSW_CTRL_VERSION_2) { - for (i = 0; i < priv->data.channels; i++) { + if (priv->data->version == CPSW_CTRL_VERSION_2) { + for (i = 0; i < priv->data->channels; i++) { __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4 * i); __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4 @@ -747,7 +729,7 @@ static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr) * i); } } else { - for (i = 0; i < priv->data.channels; i++) { + for (i = 0; i < priv->data->channels; i++) { __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4 * i); __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4 @@ -843,7 +825,7 @@ static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num, struct cpsw_priv *priv) { void *regs = priv->regs; - struct cpsw_slave_data *data = priv->data.slave_data + slave_num; + struct cpsw_slave_data *data = priv->data->slave_data + slave_num; slave->slave_num = slave_num; slave->data = data; slave->regs = regs + data->slave_reg_ofs; @@ -879,7 +861,7 @@ static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave) static void cpsw_phy_addr_update(struct cpsw_priv *priv) { - struct cpsw_platform_data *data = &priv->data; + struct cpsw_platform_data *data = priv->data; u16 alive = cpsw_mdio_get_alive(priv->bus); int active = data->active_slave; int new_addr = ffs(alive) - 1; @@ -899,7 +881,7 @@ static void cpsw_phy_addr_update(struct cpsw_priv *priv) int _cpsw_register(struct cpsw_priv *priv) { struct cpsw_slave *slave; - struct cpsw_platform_data *data = &priv->data; + struct cpsw_platform_data *data = priv->data; void *regs = (void *)data->cpsw_base; priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves); @@ -988,7 +970,7 @@ int cpsw_register(struct cpsw_platform_data *data) } priv->dev = dev; - priv->data = *data; + priv->data = data; strcpy(dev->name, "cpsw"); dev->iobase = 0; @@ -1048,16 +1030,6 @@ static void cpsw_eth_stop(struct udevice *dev) return _cpsw_halt(priv); } - -static int cpsw_eth_probe(struct udevice *dev) -{ - struct cpsw_priv *priv = dev_get_priv(dev); - - priv->dev = dev; - - return _cpsw_register(priv); -} - static const struct eth_ops cpsw_eth_ops = { .start = cpsw_eth_start, .send = cpsw_eth_send, @@ -1079,9 +1051,9 @@ static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv, u32 mask; u32 mode = 0; bool rgmii_id = false; - int slave = priv->data.active_slave; + int slave = priv->data->active_slave; - reg = readl(priv->data.gmii_sel); + reg = readl(priv->data->gmii_sel); switch (phy_mode) { case PHY_INTERFACE_MODE_RMII: @@ -1107,7 +1079,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv, mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6); mode <<= slave * 2; - if (priv->data.rmii_clock_external) { + if (priv->data->rmii_clock_external) { if (slave == 0) mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN; else @@ -1124,7 +1096,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv, reg &= ~mask; reg |= mode; - writel(reg, priv->data.gmii_sel); + writel(reg, priv->data->gmii_sel); } static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv, @@ -1133,9 +1105,9 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv, u32 reg; u32 mask; u32 mode = 0; - int slave = priv->data.active_slave; + int slave = priv->data->active_slave; - reg = readl(priv->data.gmii_sel); + reg = readl(priv->data->gmii_sel); switch (phy_mode) { case PHY_INTERFACE_MODE_RMII: @@ -1168,13 +1140,13 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv, return; } - if (priv->data.rmii_clock_external) + if (priv->data->rmii_clock_external) dev_err(priv->dev, "RMII External clock is not supported\n"); reg &= ~mask; reg |= mode; - writel(reg, priv->data.gmii_sel); + writel(reg, priv->data->gmii_sel); } static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat, @@ -1188,13 +1160,28 @@ static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat, cpsw_gmii_sel_dra7xx(priv, phy_mode); } +static int cpsw_eth_probe(struct udevice *dev) +{ + struct cpsw_priv *priv = dev_get_priv(dev); + struct eth_pdata *pdata = dev_get_platdata(dev); + + priv->dev = dev; + priv->data = pdata->priv_pdata; + ti_cm_get_macid(dev, priv->data, pdata->enetaddr); + /* Select phy interface in control module */ + cpsw_phy_sel(priv, priv->data->phy_sel_compat, + pdata->phy_interface); + + return _cpsw_register(priv); +} + +#if CONFIG_IS_ENABLED(OF_CONTROL) static int cpsw_eth_ofdata_to_platdata(struct udevice *dev) { struct eth_pdata *pdata = dev_get_platdata(dev); - struct cpsw_priv *priv = dev_get_priv(dev); + struct cpsw_platform_data *data; struct gpio_desc *mode_gpios; const char *phy_mode; - const char *phy_sel_compat = NULL; const void *fdt = gd->fdt_blob; int node = dev_of_offset(dev); int subnode; @@ -1203,45 +1190,47 @@ static int cpsw_eth_ofdata_to_platdata(struct udevice *dev) int num_mode_gpios; int ret; + data = calloc(1, sizeof(struct cpsw_platform_data)); + pdata->priv_pdata = data; pdata->iobase = devfdt_get_addr(dev); - priv->data.version = CPSW_CTRL_VERSION_2; - priv->data.bd_ram_ofs = CPSW_BD_OFFSET; - priv->data.ale_reg_ofs = CPSW_ALE_OFFSET; - priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET; - priv->data.mdio_div = CPSW_MDIO_DIV; - priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET, + data->version = CPSW_CTRL_VERSION_2; + data->bd_ram_ofs = CPSW_BD_OFFSET; + data->ale_reg_ofs = CPSW_ALE_OFFSET; + data->cpdma_reg_ofs = CPSW_CPDMA_OFFSET; + data->mdio_div = CPSW_MDIO_DIV; + data->host_port_reg_ofs = CPSW_HOST_PORT_OFFSET, pdata->phy_interface = -1; - priv->data.cpsw_base = pdata->iobase; - priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1); - if (priv->data.channels <= 0) { + data->cpsw_base = pdata->iobase; + data->channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1); + if (data->channels <= 0) { printf("error: cpdma_channels not found in dt\n"); return -ENOENT; } - priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1); - if (priv->data.slaves <= 0) { + data->slaves = fdtdec_get_int(fdt, node, "slaves", -1); + if (data->slaves <= 0) { printf("error: slaves not found in dt\n"); return -ENOENT; } - priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) * - priv->data.slaves); + data->slave_data = malloc(sizeof(struct cpsw_slave_data) * + data->slaves); - priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1); - if (priv->data.ale_entries <= 0) { + data->ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1); + if (data->ale_entries <= 0) { printf("error: ale_entries not found in dt\n"); return -ENOENT; } - priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1); - if (priv->data.bd_ram_ofs <= 0) { + data->bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1); + if (data->bd_ram_ofs <= 0) { printf("error: bd_ram_size not found in dt\n"); return -ENOENT; } - priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1); - if (priv->data.mac_control <= 0) { + data->mac_control = fdtdec_get_int(fdt, node, "mac_control", -1); + if (data->mac_control <= 0) { printf("error: ale_entries not found in dt\n"); return -ENOENT; } @@ -1256,7 +1245,7 @@ static int cpsw_eth_ofdata_to_platdata(struct udevice *dev) } active_slave = fdtdec_get_int(fdt, node, "active_slave", 0); - priv->data.active_slave = active_slave; + data->active_slave = active_slave; fdt_for_each_subnode(subnode, fdt, node) { int len; @@ -1271,108 +1260,107 @@ static int cpsw_eth_ofdata_to_platdata(struct udevice *dev) pr_err("Not able to get MDIO address space\n"); return -ENOENT; } - priv->data.mdio_base = mdio_base; + data->mdio_base = mdio_base; } if (!strncmp(name, "slave", 5)) { u32 phy_id[2]; - if (slave_index >= priv->data.slaves) + if (slave_index >= data->slaves) continue; phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL); if (phy_mode) - priv->data.slave_data[slave_index].phy_if = + data->slave_data[slave_index].phy_if = phy_get_interface_by_name(phy_mode); - priv->data.slave_data[slave_index].phy_of_handle = + data->slave_data[slave_index].phy_of_handle = fdtdec_lookup_phandle(fdt, subnode, "phy-handle"); - if (priv->data.slave_data[slave_index].phy_of_handle >= 0) { - priv->data.slave_data[slave_index].phy_addr = + if (data->slave_data[slave_index].phy_of_handle >= 0) { + data->slave_data[slave_index].phy_addr = fdtdec_get_int(gd->fdt_blob, - priv->data.slave_data[slave_index].phy_of_handle, + data->slave_data[slave_index].phy_of_handle, "reg", -1); } else { fdtdec_get_int_array(fdt, subnode, "phy_id", phy_id, 2); - priv->data.slave_data[slave_index].phy_addr = + data->slave_data[slave_index].phy_addr = phy_id[1]; } slave_index++; } if (!strncmp(name, "cpsw-phy-sel", 12)) { - priv->data.gmii_sel = cpsw_get_addr_by_node(fdt, - subnode); + data->gmii_sel = cpsw_get_addr_by_node(fdt, subnode); - if (priv->data.gmii_sel == FDT_ADDR_T_NONE) { + if (data->gmii_sel == FDT_ADDR_T_NONE) { pr_err("Not able to get gmii_sel reg address\n"); return -ENOENT; } if (fdt_get_property(fdt, subnode, "rmii-clock-ext", NULL)) - priv->data.rmii_clock_external = true; + data->rmii_clock_external = true; - phy_sel_compat = fdt_getprop(fdt, subnode, "compatible", - NULL); - if (!phy_sel_compat) { + data->phy_sel_compat = fdt_getprop(fdt, subnode, + "compatible", NULL); + if (!data->phy_sel_compat) { pr_err("Not able to get gmii_sel compatible\n"); return -ENOENT; } } } - priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET; - priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET; + data->slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET; + data->slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET; - if (priv->data.slaves == 2) { - priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET; - priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET; + if (data->slaves == 2) { + data->slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET; + data->slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET; } - ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr); + ret = ti_cm_get_macid_addr(dev, active_slave, data); if (ret < 0) { pr_err("cpsw read efuse mac failed\n"); return ret; } - pdata->phy_interface = priv->data.slave_data[active_slave].phy_if; + pdata->phy_interface = data->slave_data[active_slave].phy_if; if (pdata->phy_interface == -1) { debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode); return -EINVAL; } - /* Select phy interface in control module */ - cpsw_phy_sel(priv, phy_sel_compat, pdata->phy_interface); - return 0; } +static const struct udevice_id cpsw_eth_ids[] = { + { .compatible = "ti,cpsw" }, + { .compatible = "ti,am335x-cpsw" }, + { } +}; +#endif + int cpsw_get_slave_phy_addr(struct udevice *dev, int slave) { struct cpsw_priv *priv = dev_get_priv(dev); - struct cpsw_platform_data *data = &priv->data; + struct cpsw_platform_data *data = priv->data; return data->slave_data[slave].phy_addr; } -static const struct udevice_id cpsw_eth_ids[] = { - { .compatible = "ti,cpsw" }, - { .compatible = "ti,am335x-cpsw" }, - { } -}; - U_BOOT_DRIVER(eth_cpsw) = { .name = "eth_cpsw", .id = UCLASS_ETH, +#if CONFIG_IS_ENABLED(OF_CONTROL) .of_match = cpsw_eth_ids, .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata, + .platdata_auto_alloc_size = sizeof(struct eth_pdata), +#endif .probe = cpsw_eth_probe, .ops = &cpsw_eth_ops, .priv_auto_alloc_size = sizeof(struct cpsw_priv), - .platdata_auto_alloc_size = sizeof(struct eth_pdata), - .flags = DM_FLAG_ALLOC_PRIV_DMA, + .flags = DM_FLAG_ALLOC_PRIV_DMA | DM_FLAG_PRE_RELOC, }; #endif /* CONFIG_DM_ETH */ diff --git a/drivers/net/ti/keystone_net.c b/drivers/net/ti/keystone_net.c index a3ba91cc3f5..4baeeb83f10 100644 --- a/drivers/net/ti/keystone_net.c +++ b/drivers/net/ti/keystone_net.c @@ -88,6 +88,7 @@ struct ks2_eth_priv { struct mii_dev *mdio_bus; int phy_addr; phy_interface_t phy_if; + int phy_of_handle; int sgmii_link_type; void *mdio_base; struct rx_buff_desc net_rx_buffs; @@ -588,6 +589,10 @@ static int ks2_eth_probe(struct udevice *dev) if (priv->has_mdio) { priv->phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev, priv->phy_if); +#ifdef CONFIG_DM_ETH + if (priv->phy_of_handle) + priv->phydev->node = offset_to_ofnode(priv->phy_of_handle); +#endif phy_config(priv->phydev); } @@ -679,6 +684,7 @@ static int ks2_eth_parse_slave_interface(int netcp, int slave, int phy; int dma_count; u32 dma_channel[8]; + const char *phy_mode; priv->slave_port = fdtdec_get_int(fdt, slave, "slave-port", -1); priv->net_rx_buffs.rx_flow = priv->slave_port * 8; @@ -700,7 +706,9 @@ static int ks2_eth_parse_slave_interface(int netcp, int slave, priv->link_type = fdtdec_get_int(fdt, slave, "link-interface", -1); phy = fdtdec_lookup_phandle(fdt, slave, "phy-handle"); + if (phy >= 0) { + priv->phy_of_handle = phy; priv->phy_addr = fdtdec_get_int(fdt, phy, "reg", -1); mdio = fdt_parent_offset(fdt, phy); @@ -717,7 +725,19 @@ static int ks2_eth_parse_slave_interface(int netcp, int slave, priv->sgmii_link_type = SGMII_LINK_MAC_PHY; priv->has_mdio = true; } else if (priv->link_type == LINK_TYPE_RGMII_LINK_MAC_PHY) { - priv->phy_if = PHY_INTERFACE_MODE_RGMII; + phy_mode = fdt_getprop(fdt, slave, "phy-mode", NULL); + if (phy_mode) { + priv->phy_if = phy_get_interface_by_name(phy_mode); + if (priv->phy_if != PHY_INTERFACE_MODE_RGMII && + priv->phy_if != PHY_INTERFACE_MODE_RGMII_ID && + priv->phy_if != PHY_INTERFACE_MODE_RGMII_RXID && + priv->phy_if != PHY_INTERFACE_MODE_RGMII_TXID) { + pr_err("invalid phy-mode\n"); + return -EINVAL; + } + } else { + priv->phy_if = PHY_INTERFACE_MODE_RGMII; + } pdata->phy_interface = priv->phy_if; priv->has_mdio = true; } diff --git a/drivers/power/regulator/pbias_regulator.c b/drivers/power/regulator/pbias_regulator.c index 4ed3c94e031..88dc9f273ae 100644 --- a/drivers/power/regulator/pbias_regulator.c +++ b/drivers/power/regulator/pbias_regulator.c @@ -238,7 +238,7 @@ static int pbias_regulator_set_value(struct udevice *dev, int uV) if (rc) return rc; - if (uV == 3000000) + if (uV == 3300000) reg |= p->vmode; else if (uV == 1800000) reg &= ~p->vmode; diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig new file mode 100644 index 00000000000..7b4e4d61308 --- /dev/null +++ b/drivers/soc/Kconfig @@ -0,0 +1,5 @@ +menu "SOC (System On Chip) specific Drivers" + +source "drivers/soc/ti/Kconfig" + +endmenu diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 42037f99d58..ce253b7aa88 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -2,4 +2,4 @@ # # Makefile for the U-Boot SOC specific device drivers. -obj-$(CONFIG_ARCH_KEYSTONE) += keystone/ +obj-$(CONFIG_SOC_TI) += ti/ diff --git a/drivers/soc/keystone/Makefile b/drivers/soc/keystone/Makefile deleted file mode 100644 index dfebb143e09..00000000000 --- a/drivers/soc/keystone/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0+ - -obj-$(CONFIG_TI_KEYSTONE_SERDES) += keystone_serdes.o diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig new file mode 100644 index 00000000000..e4f88344487 --- /dev/null +++ b/drivers/soc/ti/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0+ + +menuconfig SOC_TI + bool "TI SOC drivers support" + +if SOC_TI + +config TI_K3_NAVSS_RINGACC + bool "K3 Ring accelerator Sub System" + depends on ARCH_K3 + select MISC + help + Say y here to support the K3 AM65x Ring accelerator module. + The Ring Accelerator (RINGACC or RA) provides hardware acceleration + to enable straightforward passing of work between a producer + and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs + If unsure, say N. + +config TI_KEYSTONE_SERDES + bool "Keystone SerDes driver for ethernet" + depends on ARCH_KEYSTONE + help + SerDes driver for Keystone SoC used for ethernet support on TI + K2 platforms. + +endif # SOC_TI diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile new file mode 100644 index 00000000000..4ec04ee1257 --- /dev/null +++ b/drivers/soc/ti/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0+ + +obj-$(CONFIG_TI_K3_NAVSS_RINGACC) += k3-navss-ringacc.o +obj-$(CONFIG_TI_KEYSTONE_SERDES) += keystone_serdes.o diff --git a/drivers/soc/ti/k3-navss-ringacc.c b/drivers/soc/ti/k3-navss-ringacc.c new file mode 100644 index 00000000000..fcb84f7aa49 --- /dev/null +++ b/drivers/soc/ti/k3-navss-ringacc.c @@ -0,0 +1,1057 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com + */ + +#include <common.h> +#include <asm/io.h> +#include <malloc.h> +#include <asm/dma-mapping.h> +#include <asm/bitops.h> +#include <dm.h> +#include <dm/read.h> +#include <dm/uclass.h> +#include <linux/compat.h> +#include <linux/soc/ti/k3-navss-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> + +#define set_bit(bit, bitmap) __set_bit(bit, bitmap) +#define clear_bit(bit, bitmap) __clear_bit(bit, bitmap) +#define dma_free_coherent(dev, size, cpu_addr, dma_handle) \ + dma_free_coherent(cpu_addr) +#define dma_zalloc_coherent(dev, size, dma_handle, flag) \ +({ \ + void *ring_mem_virt; \ + ring_mem_virt = dma_alloc_coherent((size), \ + (unsigned long *)(dma_handle)); \ + if (ring_mem_virt) \ + memset(ring_mem_virt, 0, (size)); \ + ring_mem_virt; \ +}) + +static LIST_HEAD(k3_nav_ringacc_list); + +static void ringacc_writel(u32 v, void __iomem *reg) +{ + pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", v, reg); + writel(v, reg); +} + +static u32 ringacc_readl(void __iomem *reg) +{ + u32 v; + + v = readl(reg); + pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, reg); + return v; +} + +#define KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0) + +/** + * struct k3_nav_ring_rt_regs - The RA Control/Status Registers region + */ +struct k3_nav_ring_rt_regs { + u32 resv_16[4]; + u32 db; /* RT Ring N Doorbell Register */ + u32 resv_4[1]; + u32 occ; /* RT Ring N Occupancy Register */ + u32 indx; /* RT Ring N Current Index Register */ + u32 hwocc; /* RT Ring N Hardware Occupancy Register */ + u32 hwindx; /* RT Ring N Current Index Register */ +}; + +#define KNAV_RINGACC_RT_REGS_STEP 0x1000 + +/** + * struct k3_nav_ring_fifo_regs - The Ring Accelerator Queues Registers region + */ +struct k3_nav_ring_fifo_regs { + u32 head_data[128]; /* Ring Head Entry Data Registers */ + u32 tail_data[128]; /* Ring Tail Entry Data Registers */ + u32 peek_head_data[128]; /* Ring Peek Head Entry Data Regs */ + u32 peek_tail_data[128]; /* Ring Peek Tail Entry Data Regs */ +}; + +/** + * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region + */ +struct k3_ringacc_proxy_gcfg_regs { + u32 revision; /* Revision Register */ + u32 config; /* Config Register */ +}; + +#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0) + +/** + * struct k3_ringacc_proxy_target_regs - RA Proxy Datapath MMIO Region + */ +struct k3_ringacc_proxy_target_regs { + u32 control; /* Proxy Control Register */ + u32 status; /* Proxy Status Register */ + u8 resv_512[504]; + u32 data[128]; /* Proxy Data Register */ +}; + +#define K3_RINGACC_PROXY_TARGET_STEP 0x1000 +#define K3_RINGACC_PROXY_NOT_USED (-1) + +enum k3_ringacc_proxy_access_mode { + PROXY_ACCESS_MODE_HEAD = 0, + PROXY_ACCESS_MODE_TAIL = 1, + PROXY_ACCESS_MODE_PEEK_HEAD = 2, + PROXY_ACCESS_MODE_PEEK_TAIL = 3, +}; + +#define KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U) +#define KNAV_RINGACC_FIFO_REGS_STEP 0x1000 +#define KNAV_RINGACC_MAX_DB_RING_CNT (127U) + +/** + * struct k3_nav_ring_ops - Ring operations + */ +struct k3_nav_ring_ops { + int (*push_tail)(struct k3_nav_ring *ring, void *elm); + int (*push_head)(struct k3_nav_ring *ring, void *elm); + int (*pop_tail)(struct k3_nav_ring *ring, void *elm); + int (*pop_head)(struct k3_nav_ring *ring, void *elm); +}; + +/** + * struct k3_nav_ring - RA Ring descriptor + * + * @rt - Ring control/status registers + * @fifos - Ring queues registers + * @proxy - Ring Proxy Datapath registers + * @ring_mem_dma - Ring buffer dma address + * @ring_mem_virt - Ring buffer virt address + * @ops - Ring operations + * @size - Ring size in elements + * @elm_size - Size of the ring element + * @mode - Ring mode + * @flags - flags + * @free - Number of free elements + * @occ - Ring occupancy + * @windex - Write index (only for @K3_NAV_RINGACC_RING_MODE_RING) + * @rindex - Read index (only for @K3_NAV_RINGACC_RING_MODE_RING) + * @ring_id - Ring Id + * @parent - Pointer on struct @k3_nav_ringacc + * @use_count - Use count for shared rings + * @proxy_id - RA Ring Proxy Id (only if @K3_NAV_RINGACC_RING_USE_PROXY) + */ +struct k3_nav_ring { + struct k3_nav_ring_rt_regs __iomem *rt; + struct k3_nav_ring_fifo_regs __iomem *fifos; + struct k3_ringacc_proxy_target_regs __iomem *proxy; + dma_addr_t ring_mem_dma; + void *ring_mem_virt; + struct k3_nav_ring_ops *ops; + u32 size; + enum k3_nav_ring_size elm_size; + enum k3_nav_ring_mode mode; + u32 flags; +#define KNAV_RING_FLAG_BUSY BIT(1) +#define K3_NAV_RING_FLAG_SHARED BIT(2) + u32 free; + u32 occ; + u32 windex; + u32 rindex; + u32 ring_id; + struct k3_nav_ringacc *parent; + u32 use_count; + int proxy_id; +}; + +/** + * struct k3_nav_ringacc - Rings accelerator descriptor + * + * @dev - pointer on RA device + * @proxy_gcfg - RA proxy global config registers + * @proxy_target_base - RA proxy datapath region + * @num_rings - number of ring in RA + * @rm_gp_range - general purpose rings range from tisci + * @dma_ring_reset_quirk - DMA reset w/a enable + * @num_proxies - number of RA proxies + * @rings - array of rings descriptors (struct @k3_nav_ring) + * @list - list of RAs in the system + * @tisci - pointer ti-sci handle + * @tisci_ring_ops - ti-sci rings ops + * @tisci_dev_id - ti-sci device id + */ +struct k3_nav_ringacc { + struct udevice *dev; + struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg; + void __iomem *proxy_target_base; + u32 num_rings; /* number of rings in Ringacc module */ + unsigned long *rings_inuse; + struct ti_sci_resource *rm_gp_range; + bool dma_ring_reset_quirk; + u32 num_proxies; + unsigned long *proxy_inuse; + + struct k3_nav_ring *rings; + struct list_head list; + + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_ringacc_ops *tisci_ring_ops; + u32 tisci_dev_id; +}; + +static long k3_nav_ringacc_ring_get_fifo_pos(struct k3_nav_ring *ring) +{ + return KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES - + (4 << ring->elm_size); +} + +static void *k3_nav_ringacc_get_elm_addr(struct k3_nav_ring *ring, u32 idx) +{ + return (idx * (4 << ring->elm_size) + ring->ring_mem_virt); +} + +static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem); +static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem); + +static struct k3_nav_ring_ops k3_nav_mode_ring_ops = { + .push_tail = k3_nav_ringacc_ring_push_mem, + .pop_head = k3_nav_ringacc_ring_pop_mem, +}; + +static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem); +static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem); +static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring, + void *elem); +static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring, + void *elem); + +static struct k3_nav_ring_ops k3_nav_mode_msg_ops = { + .push_tail = k3_nav_ringacc_ring_push_io, + .push_head = k3_nav_ringacc_ring_push_head_io, + .pop_tail = k3_nav_ringacc_ring_pop_tail_io, + .pop_head = k3_nav_ringacc_ring_pop_io, +}; + +static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring, + void *elem); +static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring, + void *elem); +static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem); + +static struct k3_nav_ring_ops k3_nav_mode_proxy_ops = { + .push_tail = k3_ringacc_ring_push_tail_proxy, + .push_head = k3_ringacc_ring_push_head_proxy, + .pop_tail = k3_ringacc_ring_pop_tail_proxy, + .pop_head = k3_ringacc_ring_pop_head_proxy, +}; + +struct udevice *k3_nav_ringacc_get_dev(struct k3_nav_ringacc *ringacc) +{ + return ringacc->dev; +} + +struct k3_nav_ring *k3_nav_ringacc_request_ring(struct k3_nav_ringacc *ringacc, + int id, u32 flags) +{ + int proxy_id = K3_RINGACC_PROXY_NOT_USED; + + if (id == K3_NAV_RINGACC_RING_ID_ANY) { + /* Request for any general purpose ring */ + struct ti_sci_resource_desc *gp_rings = + &ringacc->rm_gp_range->desc[0]; + unsigned long size; + + size = gp_rings->start + gp_rings->num; + id = find_next_zero_bit(ringacc->rings_inuse, + size, gp_rings->start); + if (id == size) + goto error; + } else if (id < 0) { + goto error; + } + + if (test_bit(id, ringacc->rings_inuse) && + !(ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED)) + goto error; + else if (ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED) + goto out; + + if (flags & K3_NAV_RINGACC_RING_USE_PROXY) { + proxy_id = find_next_zero_bit(ringacc->proxy_inuse, + ringacc->num_proxies, 0); + if (proxy_id == ringacc->num_proxies) + goto error; + } + + if (!try_module_get(ringacc->dev->driver->owner)) + goto error; + + if (proxy_id != K3_RINGACC_PROXY_NOT_USED) { + set_bit(proxy_id, ringacc->proxy_inuse); + ringacc->rings[id].proxy_id = proxy_id; + pr_debug("Giving ring#%d proxy#%d\n", + id, proxy_id); + } else { + pr_debug("Giving ring#%d\n", id); + } + + set_bit(id, ringacc->rings_inuse); +out: + ringacc->rings[id].use_count++; + return &ringacc->rings[id]; + +error: + return NULL; +} + +static void k3_ringacc_ring_reset_sci(struct k3_nav_ring *ring) +{ + struct k3_nav_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + ring->size, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_nav_ringacc_ring_reset(struct k3_nav_ring *ring) +{ + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return; + + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + k3_ringacc_ring_reset_sci(ring); +} + +static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_nav_ring *ring, + enum k3_nav_ring_mode mode) +{ + struct k3_nav_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_MODE_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + mode, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring *ring, u32 occ) +{ + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return; + + if (!ring->parent->dma_ring_reset_quirk) + return; + + if (!occ) + occ = ringacc_readl(&ring->rt->occ); + + if (occ) { + u32 db_ring_cnt, db_ring_cnt_cur; + + pr_debug("%s %u occ: %u\n", __func__, + ring->ring_id, occ); + /* 2. Reset the ring */ + k3_ringacc_ring_reset_sci(ring); + + /* + * 3. Setup the ring in ring/doorbell mode + * (if not already in this mode) + */ + if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci( + ring, K3_NAV_RINGACC_RING_MODE_RING); + /* + * 4. Ring the doorbell 2**22 – ringOcc times. + * This will wrap the internal UDMAP ring state occupancy + * counter (which is 21-bits wide) to 0. + */ + db_ring_cnt = (1U << 22) - occ; + + while (db_ring_cnt != 0) { + /* + * Ring the doorbell with the maximum count each + * iteration if possible to minimize the total + * of writes + */ + if (db_ring_cnt > KNAV_RINGACC_MAX_DB_RING_CNT) + db_ring_cnt_cur = KNAV_RINGACC_MAX_DB_RING_CNT; + else + db_ring_cnt_cur = db_ring_cnt; + + writel(db_ring_cnt_cur, &ring->rt->db); + db_ring_cnt -= db_ring_cnt_cur; + } + + /* 5. Restore the original ring mode (if not ring mode) */ + if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode); + } + + /* 2. Reset the ring */ + k3_nav_ringacc_ring_reset(ring); +} + +static void k3_ringacc_ring_free_sci(struct k3_nav_ring *ring) +{ + struct k3_nav_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +int k3_nav_ringacc_ring_free(struct k3_nav_ring *ring) +{ + struct k3_nav_ringacc *ringacc; + + if (!ring) + return -EINVAL; + + ringacc = ring->parent; + + pr_debug("%s flags: 0x%08x\n", __func__, ring->flags); + + if (!test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + if (--ring->use_count) + goto out; + + if (!(ring->flags & KNAV_RING_FLAG_BUSY)) + goto no_init; + + k3_ringacc_ring_free_sci(ring); + + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, ring->ring_mem_dma); + ring->flags &= ~KNAV_RING_FLAG_BUSY; + ring->ops = NULL; + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) { + clear_bit(ring->proxy_id, ringacc->proxy_inuse); + ring->proxy = NULL; + ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + +no_init: + clear_bit(ring->ring_id, ringacc->rings_inuse); + + module_put(ringacc->dev->driver->owner); + +out: + return 0; +} + +u32 k3_nav_ringacc_get_ring_id(struct k3_nav_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->ring_id; +} + +static int k3_nav_ringacc_ring_cfg_sci(struct k3_nav_ring *ring) +{ + struct k3_nav_ringacc *ringacc = ring->parent; + u32 ring_idx; + int ret; + + if (!ringacc->tisci) + return -EINVAL; + + ring_idx = ring->ring_id; + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring_idx, + lower_32_bits(ring->ring_mem_dma), + upper_32_bits(ring->ring_mem_dma), + ring->size, + ring->mode, + ring->elm_size, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n", + ret, ring_idx); + + return ret; +} + +int k3_nav_ringacc_ring_cfg(struct k3_nav_ring *ring, + struct k3_nav_ring_cfg *cfg) +{ + struct k3_nav_ringacc *ringacc = ring->parent; + int ret = 0; + + if (!ring || !cfg) + return -EINVAL; + if (cfg->elm_size > K3_NAV_RINGACC_RING_ELSIZE_256 || + cfg->mode > K3_NAV_RINGACC_RING_MODE_QM || + cfg->size & ~KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK || + !test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + if (ring->use_count != 1) + return 0; + + ring->size = cfg->size; + ring->elm_size = cfg->elm_size; + ring->mode = cfg->mode; + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) + ring->proxy = ringacc->proxy_target_base + + ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP; + + switch (ring->mode) { + case K3_NAV_RINGACC_RING_MODE_RING: + ring->ops = &k3_nav_mode_ring_ops; + break; + case K3_NAV_RINGACC_RING_MODE_QM: + /* + * In Queue mode elm_size can be 8 only and each operation + * uses 2 element slots + */ + if (cfg->elm_size != K3_NAV_RINGACC_RING_ELSIZE_8 || + cfg->size % 2) + goto err_free_proxy; + case K3_NAV_RINGACC_RING_MODE_MESSAGE: + if (ring->proxy) + ring->ops = &k3_nav_mode_proxy_ops; + else + ring->ops = &k3_nav_mode_msg_ops; + break; + default: + ring->ops = NULL; + ret = -EINVAL; + goto err_free_proxy; + }; + + ring->ring_mem_virt = + dma_zalloc_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + &ring->ring_mem_dma, GFP_KERNEL); + if (!ring->ring_mem_virt) { + dev_err(ringacc->dev, "Failed to alloc ring mem\n"); + ret = -ENOMEM; + goto err_free_ops; + } + + ret = k3_nav_ringacc_ring_cfg_sci(ring); + + if (ret) + goto err_free_mem; + + ring->flags |= KNAV_RING_FLAG_BUSY; + ring->flags |= (cfg->flags & K3_NAV_RINGACC_RING_SHARED) ? + K3_NAV_RING_FLAG_SHARED : 0; + + return 0; + +err_free_mem: + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, + ring->ring_mem_dma); +err_free_ops: + ring->ops = NULL; +err_free_proxy: + ring->proxy = NULL; + return ret; +} + +u32 k3_nav_ringacc_ring_get_size(struct k3_nav_ring *ring) +{ + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return -EINVAL; + + return ring->size; +} + +u32 k3_nav_ringacc_ring_get_free(struct k3_nav_ring *ring) +{ + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->free) + ring->free = ring->size - ringacc_readl(&ring->rt->occ); + + return ring->free; +} + +u32 k3_nav_ringacc_ring_get_occ(struct k3_nav_ring *ring) +{ + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return -EINVAL; + + return ringacc_readl(&ring->rt->occ); +} + +u32 k3_nav_ringacc_ring_is_full(struct k3_nav_ring *ring) +{ + return !k3_nav_ringacc_ring_get_free(ring); +} + +enum k3_ringacc_access_mode { + K3_RINGACC_ACCESS_MODE_PUSH_HEAD, + K3_RINGACC_ACCESS_MODE_POP_HEAD, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL, + K3_RINGACC_ACCESS_MODE_POP_TAIL, + K3_RINGACC_ACCESS_MODE_PEEK_HEAD, + K3_RINGACC_ACCESS_MODE_PEEK_TAIL, +}; + +static int k3_ringacc_ring_cfg_proxy(struct k3_nav_ring *ring, + enum k3_ringacc_proxy_access_mode mode) +{ + u32 val; + + val = ring->ring_id; + val |= mode << 16; + val |= ring->elm_size << 24; + ringacc_writel(val, &ring->proxy->control); + return 0; +} + +static int k3_nav_ringacc_ring_access_proxy( + struct k3_nav_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + ptr = (void __iomem *)&ring->proxy->data; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD); + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL); + break; + default: + return -EINVAL; + } + + ptr += k3_nav_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + pr_debug("proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", + ptr, access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + pr_debug("proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", + ptr, access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + pr_debug("proxy: free%d occ%d\n", + ring->free, ring->occ); + return 0; +} + +static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring, void *elem) +{ + return k3_nav_ringacc_ring_access_proxy( + ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring, void *elem) +{ + return k3_nav_ringacc_ring_access_proxy( + ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem) +{ + return k3_nav_ringacc_ring_access_proxy( + ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem) +{ + return k3_nav_ringacc_ring_access_proxy( + ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_nav_ringacc_ring_access_io( + struct k3_nav_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + ptr = (void __iomem *)&ring->fifos->head_data; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + ptr = (void __iomem *)&ring->fifos->tail_data; + break; + default: + return -EINVAL; + } + + ptr += k3_nav_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + pr_debug("memcpy_fromio(x): --> ptr(%p), mode:%d\n", + ptr, access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + pr_debug("memcpy_toio(x): --> ptr(%p), mode:%d\n", + ptr, access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + pr_debug("free%d index%d occ%d index%d\n", + ring->free, ring->windex, ring->occ, ring->rindex); + return 0; +} + +static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring, + void *elem) +{ + return k3_nav_ringacc_ring_access_io( + ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem) +{ + return k3_nav_ringacc_ring_access_io( + ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem) +{ + return k3_nav_ringacc_ring_access_io( + ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring, void *elem) +{ + return k3_nav_ringacc_ring_access_io( + ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->windex); + + memcpy(elem_ptr, elem, (4 << ring->elm_size)); + + ring->windex = (ring->windex + 1) % ring->size; + ring->free--; + ringacc_writel(1, &ring->rt->db); + + pr_debug("ring_push_mem: free%d index%d\n", + ring->free, ring->windex); + + return 0; +} + +static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->rindex); + + memcpy(elem, elem_ptr, (4 << ring->elm_size)); + + ring->rindex = (ring->rindex + 1) % ring->size; + ring->occ--; + ringacc_writel(-1, &ring->rt->db); + + pr_debug("ring_pop_mem: occ%d index%d pos_ptr%p\n", + ring->occ, ring->rindex, elem_ptr); + return 0; +} + +int k3_nav_ringacc_ring_push(struct k3_nav_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return -EINVAL; + + pr_debug("ring_push%d: free%d index%d\n", + ring->ring_id, ring->free, ring->windex); + + if (k3_nav_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_tail) + ret = ring->ops->push_tail(ring, elem); + + return ret; +} + +int k3_nav_ringacc_ring_push_head(struct k3_nav_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return -EINVAL; + + pr_debug("ring_push_head: free%d index%d\n", + ring->free, ring->windex); + + if (k3_nav_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_head) + ret = ring->ops->push_head(ring, elem); + + return ret; +} + +int k3_nav_ringacc_ring_pop(struct k3_nav_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_nav_ringacc_ring_get_occ(ring); + + pr_debug("ring_pop%d: occ%d index%d\n", + ring->ring_id, ring->occ, ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_head) + ret = ring->ops->pop_head(ring, elem); + + return ret; +} + +int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_nav_ringacc_ring_get_occ(ring); + + pr_debug("ring_pop_tail: occ%d index%d\n", + ring->occ, ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_tail) + ret = ring->ops->pop_tail(ring, elem); + + return ret; +} + +static int k3_nav_ringacc_probe_dt(struct k3_nav_ringacc *ringacc) +{ + struct udevice *dev = ringacc->dev; + struct udevice *tisci_dev = NULL; + int ret; + + ringacc->num_rings = dev_read_u32_default(dev, "ti,num-rings", 0); + if (!ringacc->num_rings) { + dev_err(dev, "ti,num-rings read failure %d\n", ret); + return -EINVAL; + } + + ringacc->dma_ring_reset_quirk = + dev_read_bool(dev, "ti,dma-ring-reset-quirk"); + + ret = uclass_get_device_by_name(UCLASS_FIRMWARE, "dmsc", &tisci_dev); + if (ret) { + pr_debug("TISCI RA RM get failed (%d)\n", ret); + ringacc->tisci = NULL; + return -ENODEV; + } + ringacc->tisci = (struct ti_sci_handle *) + (ti_sci_get_handle_from_sysfw(tisci_dev)); + + ret = dev_read_u32_default(dev, "ti,sci", 0); + if (!ret) { + dev_err(dev, "TISCI RA RM disabled\n"); + ringacc->tisci = NULL; + return ret; + } + + ret = dev_read_u32(dev, "ti,sci-dev-id", &ringacc->tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); + ringacc->tisci = NULL; + return ret; + } + + ringacc->rm_gp_range = devm_ti_sci_get_of_resource( + ringacc->tisci, dev, + ringacc->tisci_dev_id, + "ti,sci-rm-range-gp-rings"); + if (IS_ERR(ringacc->rm_gp_range)) + ret = PTR_ERR(ringacc->rm_gp_range); + + return 0; +} + +static int k3_nav_ringacc_probe(struct udevice *dev) +{ + struct k3_nav_ringacc *ringacc; + void __iomem *base_fifo, *base_rt; + int ret, i; + + ringacc = dev_get_priv(dev); + if (!ringacc) + return -ENOMEM; + + ringacc->dev = dev; + + ret = k3_nav_ringacc_probe_dt(ringacc); + if (ret) + return ret; + + base_rt = (uint32_t *)devfdt_get_addr_name(dev, "rt"); + pr_debug("rt %p\n", base_rt); + if (IS_ERR(base_rt)) + return PTR_ERR(base_rt); + + base_fifo = (uint32_t *)devfdt_get_addr_name(dev, "fifos"); + pr_debug("fifos %p\n", base_fifo); + if (IS_ERR(base_fifo)) + return PTR_ERR(base_fifo); + + ringacc->proxy_gcfg = (struct k3_ringacc_proxy_gcfg_regs __iomem *) + devfdt_get_addr_name(dev, "proxy_gcfg"); + if (IS_ERR(ringacc->proxy_gcfg)) + return PTR_ERR(ringacc->proxy_gcfg); + ringacc->proxy_target_base = + (struct k3_ringacc_proxy_gcfg_regs __iomem *) + devfdt_get_addr_name(dev, "proxy_target"); + if (IS_ERR(ringacc->proxy_target_base)) + return PTR_ERR(ringacc->proxy_target_base); + + ringacc->num_proxies = ringacc_readl(&ringacc->proxy_gcfg->config) & + K3_RINGACC_PROXY_CFG_THREADS_MASK; + + ringacc->rings = devm_kzalloc(dev, + sizeof(*ringacc->rings) * + ringacc->num_rings, + GFP_KERNEL); + ringacc->rings_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_rings), + sizeof(unsigned long), GFP_KERNEL); + ringacc->proxy_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_proxies), + sizeof(unsigned long), GFP_KERNEL); + + if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) + return -ENOMEM; + + for (i = 0; i < ringacc->num_rings; i++) { + ringacc->rings[i].rt = base_rt + + KNAV_RINGACC_RT_REGS_STEP * i; + ringacc->rings[i].fifos = base_fifo + + KNAV_RINGACC_FIFO_REGS_STEP * i; + ringacc->rings[i].parent = ringacc; + ringacc->rings[i].ring_id = i; + ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + dev_set_drvdata(dev, ringacc); + + ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; + + list_add_tail(&ringacc->list, &k3_nav_ringacc_list); + + dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n", + ringacc->num_rings, + ringacc->rm_gp_range->desc[0].start, + ringacc->rm_gp_range->desc[0].num, + ringacc->tisci_dev_id); + dev_info(dev, "dma-ring-reset-quirk: %s\n", + ringacc->dma_ring_reset_quirk ? "enabled" : "disabled"); + dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n", + ringacc_readl(&ringacc->proxy_gcfg->revision), + ringacc->num_proxies); + return 0; +} + +static const struct udevice_id knav_ringacc_ids[] = { + { .compatible = "ti,am654-navss-ringacc" }, + {}, +}; + +U_BOOT_DRIVER(k3_navss_ringacc) = { + .name = "k3-navss-ringacc", + .id = UCLASS_MISC, + .of_match = knav_ringacc_ids, + .probe = k3_nav_ringacc_probe, + .priv_auto_alloc_size = sizeof(struct k3_nav_ringacc), +}; diff --git a/drivers/soc/keystone/keystone_serdes.c b/drivers/soc/ti/keystone_serdes.c index 7907e6f9773..7907e6f9773 100644 --- a/drivers/soc/keystone/keystone_serdes.c +++ b/drivers/soc/ti/keystone_serdes.c |