diff options
-rw-r--r-- | MAINTAINERS | 7 | ||||
-rw-r--r-- | drivers/net/wan/Kconfig | 12 | ||||
-rw-r--r-- | drivers/net/wan/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/wan/fsl_qmc_hdlc.c | 797 | ||||
-rw-r--r-- | include/linux/bitmap.h | 101 | ||||
-rw-r--r-- | lib/test_bitmap.c | 42 |
6 files changed, 960 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 90a58c73dbde..fd66be5046f9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8601,6 +8601,13 @@ F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,cpm1-scc-qmc.yaml F: drivers/soc/fsl/qe/qmc.c F: include/soc/fsl/qe/qmc.h +FREESCALE QUICC ENGINE QMC HDLC DRIVER +M: Herve Codina <herve.codina@bootlin.com> +L: netdev@vger.kernel.org +L: linuxppc-dev@lists.ozlabs.org +S: Maintained +F: drivers/net/wan/fsl_qmc_hdlc.c + FREESCALE QUICC ENGINE TSA DRIVER M: Herve Codina <herve.codina@bootlin.com> L: linuxppc-dev@lists.ozlabs.org diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 7dda87756d3f..31ab2136cdf1 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -197,6 +197,18 @@ config FARSYNC To compile this driver as a module, choose M here: the module will be called farsync. +config FSL_QMC_HDLC + tristate "Freescale QMC HDLC support" + depends on HDLC + depends on CPM_QMC + help + HDLC support using the Freescale QUICC Multichannel Controller (QMC). + + To compile this driver as a module, choose M here: the + module will be called fsl_qmc_hdlc. + + If unsure, say N. + config FSL_UCC_HDLC tristate "Freescale QUICC Engine HDLC support" depends on HDLC diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 8119b49d1da9..00e9b7ee1e01 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_WANXL) += wanxl.o obj-$(CONFIG_PCI200SYN) += pci200syn.o obj-$(CONFIG_PC300TOO) += pc300too.o obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o +obj-$(CONFIG_FSL_QMC_HDLC) += fsl_qmc_hdlc.o obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c new file mode 100644 index 000000000000..960371df470a --- /dev/null +++ b/drivers/net/wan/fsl_qmc_hdlc.c @@ -0,0 +1,797 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale QMC HDLC Device Driver + * + * Copyright 2023 CS GROUP France + * + * Author: Herve Codina <herve.codina@bootlin.com> + */ + +#include <linux/array_size.h> +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/bitmap.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/framer/framer.h> +#include <linux/hdlc.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <soc/fsl/qe/qmc.h> + +struct qmc_hdlc_desc { + struct net_device *netdev; + struct sk_buff *skb; /* NULL if the descriptor is not in use */ + dma_addr_t dma_addr; + size_t dma_size; +}; + +struct qmc_hdlc { + struct device *dev; + struct qmc_chan *qmc_chan; + struct net_device *netdev; + struct framer *framer; + spinlock_t carrier_lock; /* Protect carrier detection */ + struct notifier_block nb; + bool is_crc32; + spinlock_t tx_lock; /* Protect tx descriptors */ + struct qmc_hdlc_desc tx_descs[8]; + unsigned int tx_out; + struct qmc_hdlc_desc rx_descs[4]; + u32 slot_map; +}; + +static struct qmc_hdlc *netdev_to_qmc_hdlc(struct net_device *netdev) +{ + return dev_to_hdlc(netdev)->priv; +} + +static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc) +{ + struct framer_status framer_status; + int ret; + + if (!qmc_hdlc->framer) + return 0; + + guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock); + + ret = framer_get_status(qmc_hdlc->framer, &framer_status); + if (ret) { + dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret); + return ret; + } + if (framer_status.link_is_on) + netif_carrier_on(qmc_hdlc->netdev); + else + netif_carrier_off(qmc_hdlc->netdev); + + return 0; +} + +static int qmc_hdlc_framer_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct qmc_hdlc *qmc_hdlc = container_of(nb, struct qmc_hdlc, nb); + int ret; + + if (action != FRAMER_EVENT_STATUS) + return NOTIFY_DONE; + + ret = qmc_hdlc_framer_set_carrier(qmc_hdlc); + return ret ? NOTIFY_DONE : NOTIFY_OK; +} + +static int qmc_hdlc_framer_start(struct qmc_hdlc *qmc_hdlc) +{ + struct framer_status framer_status; + int ret; + + if (!qmc_hdlc->framer) + return 0; + + ret = framer_power_on(qmc_hdlc->framer); + if (ret) { + dev_err(qmc_hdlc->dev, "framer power-on failed (%d)\n", ret); + return ret; + } + + /* Be sure that get_status is supported */ + ret = framer_get_status(qmc_hdlc->framer, &framer_status); + if (ret) { + dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret); + goto framer_power_off; + } + + qmc_hdlc->nb.notifier_call = qmc_hdlc_framer_notifier; + ret = framer_notifier_register(qmc_hdlc->framer, &qmc_hdlc->nb); + if (ret) { + dev_err(qmc_hdlc->dev, "framer notifier register failed (%d)\n", ret); + goto framer_power_off; + } + + return 0; + +framer_power_off: + framer_power_off(qmc_hdlc->framer); + return ret; +} + +static void qmc_hdlc_framer_stop(struct qmc_hdlc *qmc_hdlc) +{ + if (!qmc_hdlc->framer) + return; + + framer_notifier_unregister(qmc_hdlc->framer, &qmc_hdlc->nb); + framer_power_off(qmc_hdlc->framer); +} + +static int qmc_hdlc_framer_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, + const te1_settings *te1) +{ + struct framer_config config; + int ret; + + if (!qmc_hdlc->framer) + return 0; + + ret = framer_get_config(qmc_hdlc->framer, &config); + if (ret) + return ret; + + switch (if_iface) { + case IF_IFACE_E1: + config.iface = FRAMER_IFACE_E1; + break; + case IF_IFACE_T1: + config.iface = FRAMER_IFACE_T1; + break; + default: + return -EINVAL; + } + + switch (te1->clock_type) { + case CLOCK_DEFAULT: + /* Keep current value */ + break; + case CLOCK_EXT: + config.clock_type = FRAMER_CLOCK_EXT; + break; + case CLOCK_INT: + config.clock_type = FRAMER_CLOCK_INT; + break; + default: + return -EINVAL; + } + config.line_clock_rate = te1->clock_rate; + + return framer_set_config(qmc_hdlc->framer, &config); +} + +static int qmc_hdlc_framer_get_iface(struct qmc_hdlc *qmc_hdlc, int *if_iface, te1_settings *te1) +{ + struct framer_config config; + int ret; + + if (!qmc_hdlc->framer) { + *if_iface = IF_IFACE_E1; + return 0; + } + + ret = framer_get_config(qmc_hdlc->framer, &config); + if (ret) + return ret; + + switch (config.iface) { + case FRAMER_IFACE_E1: + *if_iface = IF_IFACE_E1; + break; + case FRAMER_IFACE_T1: + *if_iface = IF_IFACE_T1; + break; + } + + if (!te1) + return 0; /* Only iface type requested */ + + switch (config.clock_type) { + case FRAMER_CLOCK_EXT: + te1->clock_type = CLOCK_EXT; + break; + case FRAMER_CLOCK_INT: + te1->clock_type = CLOCK_INT; + break; + default: + return -EINVAL; + } + te1->clock_rate = config.line_clock_rate; + return 0; +} + +static int qmc_hdlc_framer_init(struct qmc_hdlc *qmc_hdlc) +{ + int ret; + + if (!qmc_hdlc->framer) + return 0; + + ret = framer_init(qmc_hdlc->framer); + if (ret) { + dev_err(qmc_hdlc->dev, "framer init failed (%d)\n", ret); + return ret; + } + + return 0; +} + +static void qmc_hdlc_framer_exit(struct qmc_hdlc *qmc_hdlc) +{ + if (!qmc_hdlc->framer) + return; + + framer_exit(qmc_hdlc->framer); +} + +static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size); + +#define QMC_HDLC_RX_ERROR_FLAGS \ + (QMC_RX_FLAG_HDLC_OVF | QMC_RX_FLAG_HDLC_UNA | \ + QMC_RX_FLAG_HDLC_CRC | QMC_RX_FLAG_HDLC_ABORT) + +static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int flags) +{ + struct qmc_hdlc_desc *desc = context; + struct net_device *netdev; + struct qmc_hdlc *qmc_hdlc; + int ret; + + netdev = desc->netdev; + qmc_hdlc = netdev_to_qmc_hdlc(netdev); + + dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE); + + if (flags & QMC_HDLC_RX_ERROR_FLAGS) { + netdev->stats.rx_errors++; + if (flags & QMC_RX_FLAG_HDLC_OVF) /* Data overflow */ + netdev->stats.rx_over_errors++; + if (flags & QMC_RX_FLAG_HDLC_UNA) /* bits received not multiple of 8 */ + netdev->stats.rx_frame_errors++; + if (flags & QMC_RX_FLAG_HDLC_ABORT) /* Received an abort sequence */ + netdev->stats.rx_frame_errors++; + if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */ + netdev->stats.rx_crc_errors++; + kfree_skb(desc->skb); + } else { + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += length; + + skb_put(desc->skb, length); + desc->skb->protocol = hdlc_type_trans(desc->skb, netdev); + netif_rx(desc->skb); + } + + /* Re-queue a transfer using the same descriptor */ + ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size); + if (ret) { + dev_err(qmc_hdlc->dev, "queue recv desc failed (%d)\n", ret); + netdev->stats.rx_errors++; + } +} + +static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size) +{ + int ret; + + desc->skb = dev_alloc_skb(size); + if (!desc->skb) + return -ENOMEM; + + desc->dma_size = size; + desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data, + desc->dma_size, DMA_FROM_DEVICE); + ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr); + if (ret) + goto free_skb; + + ret = qmc_chan_read_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size, + qmc_hcld_recv_complete, desc); + if (ret) + goto dma_unmap; + + return 0; + +dma_unmap: + dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE); +free_skb: + kfree_skb(desc->skb); + desc->skb = NULL; + return ret; +} + +static void qmc_hdlc_xmit_complete(void *context) +{ + struct qmc_hdlc_desc *desc = context; + struct net_device *netdev; + struct qmc_hdlc *qmc_hdlc; + struct sk_buff *skb; + + netdev = desc->netdev; + qmc_hdlc = netdev_to_qmc_hdlc(netdev); + + scoped_guard(spinlock_irqsave, &qmc_hdlc->tx_lock) { + dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE); + skb = desc->skb; + desc->skb = NULL; /* Release the descriptor */ + if (netif_queue_stopped(netdev)) + netif_wake_queue(netdev); + } + + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + + dev_consume_skb_any(skb); +} + +static int qmc_hdlc_xmit_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc) +{ + int ret; + + desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data, + desc->dma_size, DMA_TO_DEVICE); + ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr); + if (ret) { + dev_err(qmc_hdlc->dev, "failed to map skb\n"); + return ret; + } + + ret = qmc_chan_write_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size, + qmc_hdlc_xmit_complete, desc); + if (ret) { + dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE); + dev_err(qmc_hdlc->dev, "qmc chan write returns %d\n", ret); + return ret; + } + + return 0; +} + +static netdev_tx_t qmc_hdlc_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev); + struct qmc_hdlc_desc *desc; + int err; + + guard(spinlock_irqsave)(&qmc_hdlc->tx_lock); + + desc = &qmc_hdlc->tx_descs[qmc_hdlc->tx_out]; + if (WARN_ONCE(desc->skb, "No tx descriptors available\n")) { + /* Should never happen. + * Previous xmit should have already stopped the queue. + */ + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + desc->netdev = netdev; + desc->dma_size = skb->len; + desc->skb = skb; + err = qmc_hdlc_xmit_queue(qmc_hdlc, desc); + if (err) { + desc->skb = NULL; /* Release the descriptor */ + if (err == -EBUSY) { + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + dev_kfree_skb(skb); + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + qmc_hdlc->tx_out = (qmc_hdlc->tx_out + 1) % ARRAY_SIZE(qmc_hdlc->tx_descs); + + if (qmc_hdlc->tx_descs[qmc_hdlc->tx_out].skb) + netif_stop_queue(netdev); + + return NETDEV_TX_OK; +} + +static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc *qmc_hdlc, + u32 slot_map, struct qmc_chan_ts_info *ts_info) +{ + DECLARE_BITMAP(ts_mask_avail, 64); + DECLARE_BITMAP(ts_mask, 64); + DECLARE_BITMAP(map, 64); + + /* Tx and Rx available masks must be identical */ + if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) { + dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n", + ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail); + return -EINVAL; + } + + bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail); + bitmap_from_u64(map, slot_map); + bitmap_scatter(ts_mask, map, ts_mask_avail, 64); + + if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) { + dev_err(qmc_hdlc->dev, "Cannot translate timeslots %64pb -> (%64pb, %64pb)\n", + map, ts_mask_avail, ts_mask); + return -EINVAL; + } + + bitmap_to_arr64(&ts_info->tx_ts_mask, ts_mask, 64); + ts_info->rx_ts_mask = ts_info->tx_ts_mask; + return 0; +} + +static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc *qmc_hdlc, + const struct qmc_chan_ts_info *ts_info, u32 *slot_map) +{ + DECLARE_BITMAP(ts_mask_avail, 64); + DECLARE_BITMAP(ts_mask, 64); + DECLARE_BITMAP(map, 64); + u32 slot_array[2]; + + /* Tx and Rx masks and available masks must be identical */ + if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) { + dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n", + ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail); + return -EINVAL; + } + if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) { + dev_err(qmc_hdlc->dev, "tx and rx timeslots mismatch (0x%llx, 0x%llx)\n", + ts_info->rx_ts_mask, ts_info->tx_ts_mask); + return -EINVAL; + } + + bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail); + bitmap_from_u64(ts_mask, ts_info->rx_ts_mask); + bitmap_gather(map, ts_mask, ts_mask_avail, 64); + + if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) { + dev_err(qmc_hdlc->dev, "Cannot translate timeslots (%64pb, %64pb) -> %64pb\n", + ts_mask_avail, ts_mask, map); + return -EINVAL; + } + + bitmap_to_arr32(slot_array, map, 64); + if (slot_array[1]) { + dev_err(qmc_hdlc->dev, "Slot map out of 32bit (%64pb, %64pb) -> %64pb\n", + ts_mask_avail, ts_mask, map); + return -EINVAL; + } + + *slot_map = slot_array[0]; + return 0; +} + +static int qmc_hdlc_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, const te1_settings *te1) +{ + struct qmc_chan_ts_info ts_info; + int ret; + + ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info); + if (ret) { + dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", ret); + return ret; + } + ret = qmc_hdlc_xlate_slot_map(qmc_hdlc, te1->slot_map, &ts_info); + if (ret) + return ret; + + ret = qmc_chan_set_ts_info(qmc_hdlc->qmc_chan, &ts_info); + if (ret) { + dev_err(qmc_hdlc->dev, "set QMC channel ts info failed %d\n", ret); + return ret; + } + + qmc_hdlc->slot_map = te1->slot_map; + + ret = qmc_hdlc_framer_set_iface(qmc_hdlc, if_iface, te1); + if (ret) { + dev_err(qmc_hdlc->dev, "framer set iface failed %d\n", ret); + return ret; + } + + return 0; +} + +static int qmc_hdlc_ioctl(struct net_device *netdev, struct if_settings *ifs) +{ + struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev); + te1_settings te1; + int ret; + + switch (ifs->type) { + case IF_GET_IFACE: + if (ifs->size < sizeof(te1)) { + /* Retrieve type only */ + ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, NULL); + if (ret) + return ret; + + if (!ifs->size) + return 0; /* only type requested */ + + ifs->size = sizeof(te1); /* data size wanted */ + return -ENOBUFS; + } + + memset(&te1, 0, sizeof(te1)); + + /* Retrieve info from framer */ + ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, &te1); + if (ret) + return ret; + + /* Update slot_map */ + te1.slot_map = qmc_hdlc->slot_map; + + if (copy_to_user(ifs->ifs_ifsu.te1, &te1, sizeof(te1))) + return -EFAULT; + return 0; + + case IF_IFACE_E1: + case IF_IFACE_T1: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (netdev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&te1, ifs->ifs_ifsu.te1, sizeof(te1))) + return -EFAULT; + + return qmc_hdlc_set_iface(qmc_hdlc, ifs->type, &te1); + + default: + return hdlc_ioctl(netdev, ifs); + } +} + +static int qmc_hdlc_open(struct net_device *netdev) +{ + struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev); + struct qmc_chan_param chan_param; + struct qmc_hdlc_desc *desc; + int ret; + int i; + + ret = qmc_hdlc_framer_start(qmc_hdlc); + if (ret) + return ret; + + ret = hdlc_open(netdev); + if (ret) + goto framer_stop; + + /* Update carrier */ + qmc_hdlc_framer_set_carrier(qmc_hdlc); + + chan_param.mode = QMC_HDLC; + /* HDLC_MAX_MRU + 4 for the CRC + * HDLC_MAX_MRU + 4 + 8 for the CRC and some extraspace needed by the QMC + */ + chan_param.hdlc.max_rx_buf_size = HDLC_MAX_MRU + 4 + 8; + chan_param.hdlc.max_rx_frame_size = HDLC_MAX_MRU + 4; + chan_param.hdlc.is_crc32 = qmc_hdlc->is_crc32; + ret = qmc_chan_set_param(qmc_hdlc->qmc_chan, &chan_param); + if (ret) { + dev_err(qmc_hdlc->dev, "failed to set param (%d)\n", ret); + goto hdlc_close; + } + + /* Queue as many recv descriptors as possible */ + for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) { + desc = &qmc_hdlc->rx_descs[i]; + + desc->netdev = netdev; + ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, chan_param.hdlc.max_rx_buf_size); + if (ret == -EBUSY && i != 0) + break; /* We use all the QMC chan capability */ + if (ret) + goto free_desc; + } + + ret = qmc_chan_start(qmc_hdlc->qmc_chan, QMC_CHAN_ALL); + if (ret) { + dev_err(qmc_hdlc->dev, "qmc chan start failed (%d)\n", ret); + goto free_desc; + } + + netif_start_queue(netdev); + + return 0; + +free_desc: + qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL); + while (i--) { + desc = &qmc_hdlc->rx_descs[i]; + dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, + DMA_FROM_DEVICE); + kfree_skb(desc->skb); + desc->skb = NULL; + } +hdlc_close: + hdlc_close(netdev); +framer_stop: + qmc_hdlc_framer_stop(qmc_hdlc); + return ret; +} + +static int qmc_hdlc_close(struct net_device *netdev) +{ + struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev); + struct qmc_hdlc_desc *desc; + int i; + + qmc_chan_stop(qmc_hdlc->qmc_chan, QMC_CHAN_ALL); + qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL); + + netif_stop_queue(netdev); + + for (i = 0; i < ARRAY_SIZE(qmc_hdlc->tx_descs); i++) { + desc = &qmc_hdlc->tx_descs[i]; + if (!desc->skb) + continue; + dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, + DMA_TO_DEVICE); + kfree_skb(desc->skb); + desc->skb = NULL; + } + + for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) { + desc = &qmc_hdlc->rx_descs[i]; + if (!desc->skb) + continue; + dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, + DMA_FROM_DEVICE); + kfree_skb(desc->skb); + desc->skb = NULL; + } + + hdlc_close(netdev); + qmc_hdlc_framer_stop(qmc_hdlc); + return 0; +} + +static int qmc_hdlc_attach(struct net_device *netdev, unsigned short encoding, + unsigned short parity) +{ + struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev); + + if (encoding != ENCODING_NRZ) + return -EINVAL; + + switch (parity) { + case PARITY_CRC16_PR1_CCITT: + qmc_hdlc->is_crc32 = false; + break; + case PARITY_CRC32_PR1_CCITT: + qmc_hdlc->is_crc32 = true; + break; + default: + dev_err(qmc_hdlc->dev, "unsupported parity %u\n", parity); + return -EINVAL; + } + + return 0; +} + +static const struct net_device_ops qmc_hdlc_netdev_ops = { + .ndo_open = qmc_hdlc_open, + .ndo_stop = qmc_hdlc_close, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_siocwandev = qmc_hdlc_ioctl, +}; + +static int qmc_hdlc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct qmc_chan_ts_info ts_info; + struct qmc_hdlc *qmc_hdlc; + struct qmc_chan_info info; + hdlc_device *hdlc; + int ret; + + qmc_hdlc = devm_kzalloc(dev, sizeof(*qmc_hdlc), GFP_KERNEL); + if (!qmc_hdlc) + return -ENOMEM; + + qmc_hdlc->dev = dev; + spin_lock_init(&qmc_hdlc->tx_lock); + spin_lock_init(&qmc_hdlc->carrier_lock); + + qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node); + if (IS_ERR(qmc_hdlc->qmc_chan)) + return dev_err_probe(dev, PTR_ERR(qmc_hdlc->qmc_chan), + "get QMC channel failed\n"); + + ret = qmc_chan_get_info(qmc_hdlc->qmc_chan, &info); + if (ret) + return dev_err_probe(dev, ret, "get QMC channel info failed\n"); + + if (info.mode != QMC_HDLC) + return dev_err_probe(dev, -EINVAL, "QMC chan mode %d is not QMC_HDLC\n", + info.mode); + + ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info); + if (ret) + return dev_err_probe(dev, ret, "get QMC channel ts info failed\n"); + + ret = qmc_hdlc_xlate_ts_info(qmc_hdlc, &ts_info, &qmc_hdlc->slot_map); + if (ret) + return ret; + + qmc_hdlc->framer = devm_framer_optional_get(dev, "fsl,framer"); + if (IS_ERR(qmc_hdlc->framer)) + return PTR_ERR(qmc_hdlc->framer); + + ret = qmc_hdlc_framer_init(qmc_hdlc); + if (ret) + return ret; + + qmc_hdlc->netdev = alloc_hdlcdev(qmc_hdlc); + if (!qmc_hdlc->netdev) { + ret = -ENOMEM; + goto framer_exit; + } + + hdlc = dev_to_hdlc(qmc_hdlc->netdev); + hdlc->attach = qmc_hdlc_attach; + hdlc->xmit = qmc_hdlc_xmit; + SET_NETDEV_DEV(qmc_hdlc->netdev, dev); + qmc_hdlc->netdev->tx_queue_len = ARRAY_SIZE(qmc_hdlc->tx_descs); + qmc_hdlc->netdev->netdev_ops = &qmc_hdlc_netdev_ops; + ret = register_hdlc_device(qmc_hdlc->netdev); + if (ret) { + dev_err_probe(dev, ret, "failed to register hdlc device\n"); + goto free_netdev; + } + + platform_set_drvdata(pdev, qmc_hdlc); + return 0; + +free_netdev: + free_netdev(qmc_hdlc->netdev); +framer_exit: + qmc_hdlc_framer_exit(qmc_hdlc); + return ret; +} + +static int qmc_hdlc_remove(struct platform_device *pdev) +{ + struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev); + + unregister_hdlc_device(qmc_hdlc->netdev); + free_netdev(qmc_hdlc->netdev); + qmc_hdlc_framer_exit(qmc_hdlc); + + return 0; +} + +static const struct of_device_id qmc_hdlc_id_table[] = { + { .compatible = "fsl,qmc-hdlc" }, + {} /* sentinel */ +}; +MODULE_DEVICE_TABLE(of, qmc_hdlc_driver); + +static struct platform_driver qmc_hdlc_driver = { + .driver = { + .name = "fsl-qmc-hdlc", + .of_match_table = qmc_hdlc_id_table, + }, + .probe = qmc_hdlc_probe, + .remove = qmc_hdlc_remove, +}; +module_platform_driver(qmc_hdlc_driver); + +MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>"); +MODULE_DESCRIPTION("QMC HDLC driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 5814e9ee40ba..00b8c6d4355c 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -63,6 +63,8 @@ struct device; * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest * bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask) + * bitmap_scatter(dst, src, mask, nbits) *dst = map(dense, sparse)(src) + * bitmap_gather(dst, src, mask, nbits) *dst = map(sparse, dense)(src) * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap @@ -499,6 +501,105 @@ static inline void bitmap_replace(unsigned long *dst, __bitmap_replace(dst, old, new, mask, nbits); } +/** + * bitmap_scatter - Scatter a bitmap according to the given mask + * @dst: scattered bitmap + * @src: gathered bitmap + * @mask: mask representing bits to assign to in the scattered bitmap + * @nbits: number of bits in each of these bitmaps + * + * Scatters bitmap with sequential bits according to the given @mask. + * + * Example: + * If @src bitmap = 0x005a, with @mask = 0x1313, @dst will be 0x0302. + * + * Or in binary form + * @src @mask @dst + * 0000000001011010 0001001100010011 0000001100000010 + * + * (Bits 0, 1, 2, 3, 4, 5 are copied to the bits 0, 1, 4, 8, 9, 12) + * + * A more 'visual' description of the operation: + * src: 0000000001011010 + * |||||| + * +------+||||| + * | +----+|||| + * | |+----+||| + * | || +-+|| + * | || | || + * mask: ...v..vv...v..vv + * ...0..11...0..10 + * dst: 0000001100000010 + * + * A relationship exists between bitmap_scatter() and bitmap_gather(). + * bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation. + * See bitmap_scatter() for details related to this relationship. + */ +static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src, + const unsigned long *mask, unsigned int nbits) +{ + unsigned int n = 0; + unsigned int bit; + + bitmap_zero(dst, nbits); + + for_each_set_bit(bit, mask, nbits) + __assign_bit(bit, dst, test_bit(n++, src)); +} + +/** + * bitmap_gather - Gather a bitmap according to given mask + * @dst: gathered bitmap + * @src: scattered bitmap + * @mask: mask representing bits to extract from in the scattered bitmap + * @nbits: number of bits in each of these bitmaps + * + * Gathers bitmap with sparse bits according to the given @mask. + * + * Example: + * If @src bitmap = 0x0302, with @mask = 0x1313, @dst will be 0x001a. + * + * Or in binary form + * @src @mask @dst + * 0000001100000010 0001001100010011 0000000000011010 + * + * (Bits 0, 1, 4, 8, 9, 12 are copied to the bits 0, 1, 2, 3, 4, 5) + * + * A more 'visual' description of the operation: + * mask: ...v..vv...v..vv + * src: 0000001100000010 + * ^ ^^ ^ 0 + * | || | 10 + * | || > 010 + * | |+--> 1010 + * | +--> 11010 + * +----> 011010 + * dst: 0000000000011010 + * + * A relationship exists between bitmap_gather() and bitmap_scatter(). See + * bitmap_scatter() for the bitmap scatter detailed operations. + * Suppose scattered computed using bitmap_scatter(scattered, src, mask, n). + * The operation bitmap_gather(result, scattered, mask, n) leads to a result + * equal or equivalent to src. + * + * The result can be 'equivalent' because bitmap_scatter() and bitmap_gather() + * are not bijective. + * The result and src values are equivalent in that sense that a call to + * bitmap_scatter(res, src, mask, n) and a call to + * bitmap_scatter(res, result, mask, n) will lead to the same res value. + */ +static inline void bitmap_gather(unsigned long *dst, const unsigned long *src, + const unsigned long *mask, unsigned int nbits) +{ + unsigned int n = 0; + unsigned int bit; + + bitmap_zero(dst, nbits); + + for_each_set_bit(bit, mask, nbits) + __assign_bit(n++, dst, test_bit(bit, src)); +} + static inline void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs, unsigned int *re, unsigned int end) diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 65f22c2578b0..6b2b33579f56 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -380,6 +380,47 @@ static void __init test_replace(void) expect_eq_bitmap(bmap, exp3_1_0, nbits); } +static const unsigned long sg_mask[] __initconst = { + BITMAP_FROM_U64(0x000000000000035aULL), +}; + +static const unsigned long sg_src[] __initconst = { + BITMAP_FROM_U64(0x0000000000000667ULL), +}; + +static const unsigned long sg_gather_exp[] __initconst = { + BITMAP_FROM_U64(0x0000000000000029ULL), +}; + +static const unsigned long sg_scatter_exp[] __initconst = { + BITMAP_FROM_U64(0x000000000000021aULL), +}; + +static void __init test_bitmap_sg(void) +{ + unsigned int nbits = 64; + DECLARE_BITMAP(bmap_gather, 100); + DECLARE_BITMAP(bmap_scatter, 100); + DECLARE_BITMAP(bmap_tmp, 100); + DECLARE_BITMAP(bmap_res, 100); + + /* Simple gather call */ + bitmap_zero(bmap_gather, 100); + bitmap_gather(bmap_gather, sg_src, sg_mask, nbits); + expect_eq_bitmap(sg_gather_exp, bmap_gather, nbits); + + /* Simple scatter call */ + bitmap_zero(bmap_scatter, 100); + bitmap_scatter(bmap_scatter, sg_src, sg_mask, nbits); + expect_eq_bitmap(sg_scatter_exp, bmap_scatter, nbits); + + /* Scatter/gather relationship */ + bitmap_zero(bmap_tmp, 100); + bitmap_gather(bmap_tmp, bmap_scatter, sg_mask, nbits); + bitmap_scatter(bmap_res, bmap_tmp, sg_mask, nbits); + expect_eq_bitmap(bmap_scatter, bmap_res, nbits); +} + #define PARSE_TIME 0x1 #define NO_LEN 0x2 @@ -1252,6 +1293,7 @@ static void __init selftest(void) test_copy(); test_bitmap_region(); test_replace(); + test_bitmap_sg(); test_bitmap_arr32(); test_bitmap_arr64(); test_bitmap_parse(); |