diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/can/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/can/flexcan.c | 419 | ||||
-rw-r--r-- | drivers/net/can/rx-offload.c | 289 |
3 files changed, 534 insertions, 177 deletions
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 7a85495dbb0c..0da4f2f5c7e3 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -6,7 +6,8 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o obj-$(CONFIG_CAN_SLCAN) += slcan.o obj-$(CONFIG_CAN_DEV) += can-dev.o -can-dev-y := dev.o +can-dev-y += dev.o +can-dev-y += rx-offload.o can-dev-$(CONFIG_CAN_LEDS) += led.o diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 43cfce8b076b..ea57fed375c6 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -3,7 +3,8 @@ * * Copyright (c) 2005-2006 Varma Electronics Oy * Copyright (c) 2009 Sascha Hauer, Pengutronix - * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix + * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * Copyright (c) 2014 David Jander, Protonic Holland * * Based on code originally by Andrey Volkov <avolkov@varma-el.com> * @@ -24,6 +25,7 @@ #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/led.h> +#include <linux/can/rx-offload.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -55,9 +57,10 @@ #define FLEXCAN_MCR_WAK_SRC BIT(19) #define FLEXCAN_MCR_DOZE BIT(18) #define FLEXCAN_MCR_SRX_DIS BIT(17) -#define FLEXCAN_MCR_BCC BIT(16) +#define FLEXCAN_MCR_IRMQ BIT(16) #define FLEXCAN_MCR_LPRIO_EN BIT(13) #define FLEXCAN_MCR_AEN BIT(12) +/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */ #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) #define FLEXCAN_MCR_IDAM_A (0x0 << 8) #define FLEXCAN_MCR_IDAM_B (0x1 << 8) @@ -143,17 +146,20 @@ /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ -#define FLEXCAN_TX_BUF_RESERVED 8 -#define FLEXCAN_TX_BUF_ID 9 -#define FLEXCAN_IFLAG_BUF(x) BIT(x) +#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 +#define FLEXCAN_TX_MB_OFF_FIFO 9 +#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 +#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 +#define FLEXCAN_IFLAG_MB(x) BIT(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) -#define FLEXCAN_IFLAG_DEFAULT \ - (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \ - FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID)) /* FLEXCAN message buffers */ +#define FLEXCAN_MB_CODE_MASK (0xf << 24) +#define FLEXCAN_MB_CODE_RX_BUSY_BIT (0x1 << 24) #define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) #define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) #define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) @@ -189,7 +195,9 @@ */ #define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ -#define FLEXCAN_QUIRK_DISABLE_MECR BIT(3) /* Disble Memory error detection */ +#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ +#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */ +#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ /* Structure of the message buffer */ struct flexcan_mb { @@ -213,7 +221,10 @@ struct flexcan_regs { u32 imask1; /* 0x28 */ u32 iflag2; /* 0x2c */ u32 iflag1; /* 0x30 */ - u32 ctrl2; /* 0x34 */ + union { /* 0x34 */ + u32 gfwr_mx28; /* MX28, MX53 */ + u32 ctrl2; /* MX6, VF610 */ + }; u32 esr2; /* 0x38 */ u32 imeur; /* 0x3c */ u32 lrfr; /* 0x40 */ @@ -232,7 +243,11 @@ struct flexcan_regs { * size conf'ed via ctrl2::RFFN * (mx6, vf610) */ - u32 _reserved4[408]; + u32 _reserved4[256]; /* 0x480 */ + u32 rximr[64]; /* 0x880 */ + u32 _reserved5[24]; /* 0x980 */ + u32 gfwr_mx6; /* 0x9e0 - MX6 */ + u32 _reserved6[63]; /* 0x9e4 */ u32 mecr; /* 0xae0 */ u32 erriar; /* 0xae4 */ u32 erridpr; /* 0xae8 */ @@ -249,31 +264,36 @@ struct flexcan_devtype_data { struct flexcan_priv { struct can_priv can; - struct napi_struct napi; + struct can_rx_offload offload; struct flexcan_regs __iomem *regs; - u32 reg_esr; + struct flexcan_mb __iomem *tx_mb; + struct flexcan_mb __iomem *tx_mb_reserved; + u8 tx_mb_idx; u32 reg_ctrl_default; + u32 reg_imask1_default; + u32 reg_imask2_default; struct clk *clk_ipg; struct clk *clk_per; - struct flexcan_platform_data *pdata; const struct flexcan_devtype_data *devtype_data; struct regulator *reg_xceiver; }; -static struct flexcan_devtype_data fsl_p1010_devtype_data = { +static const struct flexcan_devtype_data fsl_p1010_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, }; -static struct flexcan_devtype_data fsl_imx28_devtype_data; +static const struct flexcan_devtype_data fsl_imx28_devtype_data; -static struct flexcan_devtype_data fsl_imx6q_devtype_data = { - .quirks = FLEXCAN_QUIRK_DISABLE_RXFG, +static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, }; -static struct flexcan_devtype_data fsl_vf610_devtype_data = { - .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR, +static const struct flexcan_devtype_data fsl_vf610_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, }; static const struct can_bittiming_const flexcan_bittiming_const = { @@ -331,13 +351,6 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) return regulator_disable(priv->reg_xceiver); } -static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, - u32 reg_esr) -{ - return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && - (reg_esr & FLEXCAN_ESR_ERR_BUS); -} - static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; @@ -468,7 +481,6 @@ static int flexcan_get_berr_counter(const struct net_device *dev, static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); - struct flexcan_regs __iomem *regs = priv->regs; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id; u32 data; @@ -491,68 +503,73 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) if (cf->can_dlc > 0) { data = be32_to_cpup((__be32 *)&cf->data[0]); - flexcan_write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[0]); + flexcan_write(data, &priv->tx_mb->data[0]); } if (cf->can_dlc > 3) { data = be32_to_cpup((__be32 *)&cf->data[4]); - flexcan_write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[1]); + flexcan_write(data, &priv->tx_mb->data[1]); } can_put_echo_skb(skb, dev, 0); - flexcan_write(can_id, ®s->mb[FLEXCAN_TX_BUF_ID].can_id); - flexcan_write(ctrl, ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl); + flexcan_write(can_id, &priv->tx_mb->can_id); + flexcan_write(ctrl, &priv->tx_mb->can_ctrl); /* Errata ERR005829 step8: * Write twice INACTIVE(0x8) code to first MB. */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + &priv->tx_mb_reserved->can_ctrl); flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + &priv->tx_mb_reserved->can_ctrl); return NETDEV_TX_OK; } -static void do_bus_err(struct net_device *dev, - struct can_frame *cf, u32 reg_esr) +static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); - int rx_errors = 0, tx_errors = 0; + struct sk_buff *skb; + struct can_frame *cf; + bool rx_errors = false, tx_errors = false; + + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; if (reg_esr & FLEXCAN_ESR_BIT1_ERR) { netdev_dbg(dev, "BIT1_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT1; - tx_errors = 1; + tx_errors = true; } if (reg_esr & FLEXCAN_ESR_BIT0_ERR) { netdev_dbg(dev, "BIT0_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT0; - tx_errors = 1; + tx_errors = true; } if (reg_esr & FLEXCAN_ESR_ACK_ERR) { netdev_dbg(dev, "ACK_ERR irq\n"); cf->can_id |= CAN_ERR_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK; - tx_errors = 1; + tx_errors = true; } if (reg_esr & FLEXCAN_ESR_CRC_ERR) { netdev_dbg(dev, "CRC_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; - rx_errors = 1; + rx_errors = true; } if (reg_esr & FLEXCAN_ESR_FRM_ERR) { netdev_dbg(dev, "FRM_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_FORM; - rx_errors = 1; + rx_errors = true; } if (reg_esr & FLEXCAN_ESR_STF_ERR) { netdev_dbg(dev, "STF_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_STUFF; - rx_errors = 1; + rx_errors = true; } priv->can.can_stats.bus_error++; @@ -560,32 +577,16 @@ static void do_bus_err(struct net_device *dev, dev->stats.rx_errors++; if (tx_errors) dev->stats.tx_errors++; -} - -static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) -{ - struct sk_buff *skb; - struct can_frame *cf; - - skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; - - do_bus_err(dev, cf, reg_esr); - - dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->can_dlc; - netif_receive_skb(skb); - return 1; + can_rx_offload_irq_queue_err_skb(&priv->offload, skb); } -static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) +static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; - enum can_state new_state = 0, rx_state = 0, tx_state = 0; + enum can_state new_state, rx_state, tx_state; int flt; struct can_berr_counter bec; @@ -606,33 +607,63 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) /* state hasn't changed */ if (likely(new_state == priv->can.state)) - return 0; + return; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) - return 0; + return; can_change_state(dev, cf, tx_state, rx_state); if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->can_dlc; - netif_receive_skb(skb); + can_rx_offload_irq_queue_err_skb(&priv->offload, skb); +} - return 1; +static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) +{ + return container_of(offload, struct flexcan_priv, offload); } -static void flexcan_read_fifo(const struct net_device *dev, - struct can_frame *cf) +static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, + struct can_frame *cf, + u32 *timestamp, unsigned int n) { - const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_priv *priv = rx_offload_to_priv(offload); struct flexcan_regs __iomem *regs = priv->regs; - struct flexcan_mb __iomem *mb = ®s->mb[0]; - u32 reg_ctrl, reg_id; + struct flexcan_mb __iomem *mb = ®s->mb[n]; + u32 reg_ctrl, reg_id, reg_iflag1; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u32 code; + + do { + reg_ctrl = flexcan_read(&mb->can_ctrl); + } while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT); + + /* is this MB empty? */ + code = reg_ctrl & FLEXCAN_MB_CODE_MASK; + if ((code != FLEXCAN_MB_CODE_RX_FULL) && + (code != FLEXCAN_MB_CODE_RX_OVERRUN)) + return 0; + + if (code == FLEXCAN_MB_CODE_RX_OVERRUN) { + /* This MB was overrun, we lost data */ + offload->dev->stats.rx_over_errors++; + offload->dev->stats.rx_errors++; + } + } else { + reg_iflag1 = flexcan_read(®s->iflag1); + if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) + return 0; + + reg_ctrl = flexcan_read(&mb->can_ctrl); + } + + /* increase timstamp to full 32 bit */ + *timestamp = reg_ctrl << 16; - reg_ctrl = flexcan_read(&mb->can_ctrl); reg_id = flexcan_read(&mb->can_id); if (reg_ctrl & FLEXCAN_MB_CNT_IDE) cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; @@ -647,69 +678,31 @@ static void flexcan_read_fifo(const struct net_device *dev, *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1])); /* mark as read */ - flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); - flexcan_read(®s->timer); -} - -static int flexcan_read_frame(struct net_device *dev) -{ - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf; - struct sk_buff *skb; - - skb = alloc_can_skb(dev, &cf); - if (unlikely(!skb)) { - stats->rx_dropped++; - return 0; + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + /* Clear IRQ */ + if (n < 32) + flexcan_write(BIT(n), ®s->iflag1); + else + flexcan_write(BIT(n - 32), ®s->iflag2); + } else { + flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); + flexcan_read(®s->timer); } - flexcan_read_fifo(dev, cf); - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); - - can_led_event(dev, CAN_LED_EVENT_RX); - return 1; } -static int flexcan_poll(struct napi_struct *napi, int quota) + +static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) { - struct net_device *dev = napi->dev; - const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; - u32 reg_iflag1, reg_esr; - int work_done = 0; - - /* The error bits are cleared on read, - * use saved value from irq handler. - */ - reg_esr = flexcan_read(®s->esr) | priv->reg_esr; - - /* handle state changes */ - work_done += flexcan_poll_state(dev, reg_esr); + u32 iflag1, iflag2; - /* handle RX-FIFO */ - reg_iflag1 = flexcan_read(®s->iflag1); - while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE && - work_done < quota) { - work_done += flexcan_read_frame(dev); - reg_iflag1 = flexcan_read(®s->iflag1); - } - - /* report bus errors */ - if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota) - work_done += flexcan_poll_bus_err(dev, reg_esr); + iflag2 = flexcan_read(®s->iflag2) & priv->reg_imask2_default; + iflag1 = flexcan_read(®s->iflag1) & priv->reg_imask1_default & + ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); - if (work_done < quota) { - napi_complete_done(napi, work_done); - /* enable IRQs */ - flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); - flexcan_write(priv->reg_ctrl_default, ®s->ctrl); - } - - return work_done; + return (u64)iflag2 << 32 | iflag1; } static irqreturn_t flexcan_irq(int irq, void *dev_id) @@ -718,55 +711,70 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) struct net_device_stats *stats = &dev->stats; struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; + irqreturn_t handled = IRQ_NONE; u32 reg_iflag1, reg_esr; reg_iflag1 = flexcan_read(®s->iflag1); - reg_esr = flexcan_read(®s->esr); - /* ACK all bus error and state change IRQ sources */ - if (reg_esr & FLEXCAN_ESR_ALL_INT) - flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); - - /* schedule NAPI in case of: - * - rx IRQ - * - state change IRQ - * - bus error IRQ and bus error reporting is activated - */ - if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) || - (reg_esr & FLEXCAN_ESR_ERR_STATE) || - flexcan_has_and_handle_berr(priv, reg_esr)) { - /* The error bits are cleared on read, - * save them for later use. - */ - priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS; - flexcan_write(FLEXCAN_IFLAG_DEFAULT & - ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->imask1); - flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, - ®s->ctrl); - napi_schedule(&priv->napi); - } + /* reception interrupt */ + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u64 reg_iflag; + int ret; + + while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) { + handled = IRQ_HANDLED; + ret = can_rx_offload_irq_offload_timestamp(&priv->offload, + reg_iflag); + if (!ret) + break; + } + } else { + if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { + handled = IRQ_HANDLED; + can_rx_offload_irq_offload_fifo(&priv->offload); + } - /* FIFO overflow */ - if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { - flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); - dev->stats.rx_over_errors++; - dev->stats.rx_errors++; + /* FIFO overflow interrupt */ + if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { + handled = IRQ_HANDLED; + flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } } /* transmission complete interrupt */ - if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) { + if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { + handled = IRQ_HANDLED; stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); /* after sending a RTR frame MB is in RX mode */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl); - flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); + &priv->tx_mb->can_ctrl); + flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); netif_wake_queue(dev); } - return IRQ_HANDLED; + reg_esr = flexcan_read(®s->esr); + + /* ACK all bus error and state change IRQ sources */ + if (reg_esr & FLEXCAN_ESR_ALL_INT) { + handled = IRQ_HANDLED; + flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); + } + + /* state change interrupt */ + if (reg_esr & FLEXCAN_ESR_ERR_STATE) + flexcan_irq_state(dev, reg_esr); + + /* bus error IRQ - handle if bus error reporting is activated */ + if ((reg_esr & FLEXCAN_ESR_ERR_BUS) && + (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + flexcan_irq_bus_err(dev, reg_esr); + + return handled; } static void flexcan_set_bittiming(struct net_device *dev) @@ -839,14 +847,23 @@ static int flexcan_chip_start(struct net_device *dev) * only supervisor access * enable warning int * disable local echo + * enable individual RX masking * choose format C * set max mailbox number */ reg_mcr = flexcan_read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); - reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | - FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | - FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID); + reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | + FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | + FLEXCAN_MCR_IDAM_C; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + reg_mcr &= ~FLEXCAN_MCR_FEN; + reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); + } else { + reg_mcr |= FLEXCAN_MCR_FEN | + FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + } netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); flexcan_write(reg_mcr, ®s->mcr); @@ -883,19 +900,31 @@ static int flexcan_chip_start(struct net_device *dev) netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); flexcan_write(reg_ctrl, ®s->ctrl); + if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { + reg_ctrl2 = flexcan_read(®s->ctrl2); + reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS; + flexcan_write(reg_ctrl2, ®s->ctrl2); + } + /* clear and invalidate all mailboxes first */ - for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->mb); i++) { + for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, ®s->mb[i].can_ctrl); } + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) + flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY, + ®s->mb[i].can_ctrl); + } + /* Errata ERR005829: mark first TX mailbox as INACTIVE */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + &priv->tx_mb_reserved->can_ctrl); /* mark TX mailbox as INACTIVE */ flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl); + &priv->tx_mb->can_ctrl); /* acceptance mask/acceptance code (accept everything) */ flexcan_write(0x0, ®s->rxgmask); @@ -905,6 +934,10 @@ static int flexcan_chip_start(struct net_device *dev) if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG) flexcan_write(0x0, ®s->rxfgmask); + /* clear acceptance filters */ + for (i = 0; i < ARRAY_SIZE(regs->mb); i++) + flexcan_write(0, ®s->rximr[i]); + /* On Vybrid, disable memory error detection interrupts * and freeze mode. * This also works around errata e5295 which generates @@ -942,7 +975,8 @@ static int flexcan_chip_start(struct net_device *dev) /* enable interrupts atomically */ disable_irq(dev->irq); flexcan_write(priv->reg_ctrl_default, ®s->ctrl); - flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); + flexcan_write(priv->reg_imask1_default, ®s->imask1); + flexcan_write(priv->reg_imask2_default, ®s->imask2); enable_irq(dev->irq); /* print chip status */ @@ -972,6 +1006,7 @@ static void flexcan_chip_stop(struct net_device *dev) flexcan_chip_disable(priv); /* Disable all interrupts */ + flexcan_write(0, ®s->imask2); flexcan_write(0, ®s->imask1); flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, ®s->ctrl); @@ -1008,7 +1043,7 @@ static int flexcan_open(struct net_device *dev) can_led_event(dev, CAN_LED_EVENT_OPEN); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(dev); return 0; @@ -1030,7 +1065,7 @@ static int flexcan_close(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); netif_stop_queue(dev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); flexcan_chip_stop(dev); free_irq(dev->irq, dev); @@ -1104,8 +1139,9 @@ static int register_flexcandev(struct net_device *dev) flexcan_write(reg, ®s->mcr); /* Currently we only support newer versions of this core - * featuring a RX FIFO. Older cores found on some Coldfire - * derivates are not yet supported. + * featuring a RX hardware FIFO (although this driver doesn't + * make use of it on some cores). Older cores, found on some + * Coldfire derivates are not tested. */ reg = flexcan_read(®s->mcr); if (!(reg & FLEXCAN_MCR_FEN)) { @@ -1208,6 +1244,9 @@ static int flexcan_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &flexcan_netdev_ops; dev->irq = irq; dev->flags |= IFF_ECHO; @@ -1223,14 +1262,41 @@ static int flexcan_probe(struct platform_device *pdev) priv->regs = regs; priv->clk_ipg = clk_ipg; priv->clk_per = clk_per; - priv->pdata = dev_get_platdata(&pdev->dev); priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; - netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP; + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; + } else { + priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; + } + priv->tx_mb = ®s->mb[priv->tx_mb_idx]; - platform_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); + priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + priv->reg_imask2_default = 0; + + priv->offload.mailbox_read = flexcan_mailbox_read; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u64 imask; + + priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; + priv->offload.mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST; + + imask = GENMASK_ULL(priv->offload.mb_last, priv->offload.mb_first); + priv->reg_imask1_default |= imask; + priv->reg_imask2_default |= imask >> 32; + + err = can_rx_offload_add_timestamp(dev, &priv->offload); + } else { + priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | + FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; + err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT); + } + if (err) + goto failed_offload; err = register_flexcandev(dev); if (err) { @@ -1245,6 +1311,7 @@ static int flexcan_probe(struct platform_device *pdev) return 0; + failed_offload: failed_register: free_candev(dev); return err; @@ -1256,7 +1323,7 @@ static int flexcan_remove(struct platform_device *pdev) struct flexcan_priv *priv = netdev_priv(dev); unregister_flexcandev(dev); - netif_napi_del(&priv->napi); + can_rx_offload_del(&priv->offload); free_candev(dev); return 0; diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c new file mode 100644 index 000000000000..f394f77d7528 --- /dev/null +++ b/drivers/net/can/rx-offload.c @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/can/dev.h> +#include <linux/can/rx-offload.h> + +struct can_rx_offload_cb { + u32 timestamp; +}; + +static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); + + return (struct can_rx_offload_cb *)skb->cb; +} + +static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) +{ + if (offload->inc) + return a <= b; + else + return a >= b; +} + +static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) +{ + if (offload->inc) + return (*val)++; + else + return (*val)--; +} + +static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) +{ + struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + int work_done = 0; + + while ((work_done < quota) && + (skb = skb_dequeue(&offload->skb_queue))) { + struct can_frame *cf = (struct can_frame *)skb->data; + + work_done++; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + } + + if (work_done < quota) { + napi_complete_done(napi, work_done); + + /* Check if there was another interrupt */ + if (!skb_queue_empty(&offload->skb_queue)) + napi_reschedule(&offload->napi); + } + + can_led_event(offload->dev, CAN_LED_EVENT_RX); + + return work_done; +} + +static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, + int (*compare)(struct sk_buff *a, struct sk_buff *b)) +{ + struct sk_buff *pos, *insert = (struct sk_buff *)head; + + skb_queue_reverse_walk(head, pos) { + const struct can_rx_offload_cb *cb_pos, *cb_new; + + cb_pos = can_rx_offload_get_cb(pos); + cb_new = can_rx_offload_get_cb(new); + + netdev_dbg(new->dev, + "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n", + __func__, + cb_pos->timestamp, cb_new->timestamp, + cb_new->timestamp - cb_pos->timestamp, + skb_queue_len(head)); + + if (compare(pos, new) < 0) + continue; + insert = pos; + break; + } + + __skb_queue_after(head, insert, new); +} + +static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) +{ + const struct can_rx_offload_cb *cb_a, *cb_b; + + cb_a = can_rx_offload_get_cb(a); + cb_b = can_rx_offload_get_cb(b); + + /* Substract two u32 and return result as int, to keep + * difference steady around the u32 overflow. + */ + return cb_b->timestamp - cb_a->timestamp; +} + +static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) +{ + struct sk_buff *skb = NULL; + struct can_rx_offload_cb *cb; + struct can_frame *cf; + int ret; + + /* If queue is full or skb not available, read to discard mailbox */ + if (likely(skb_queue_len(&offload->skb_queue) <= + offload->skb_queue_len_max)) + skb = alloc_can_skb(offload->dev, &cf); + + if (!skb) { + struct can_frame cf_overflow; + u32 timestamp; + + ret = offload->mailbox_read(offload, &cf_overflow, + ×tamp, n); + if (ret) + offload->dev->stats.rx_dropped++; + + return NULL; + } + + cb = can_rx_offload_get_cb(skb); + ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); + if (!ret) { + kfree_skb(skb); + return NULL; + } + + return skb; +} + +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) +{ + struct sk_buff_head skb_queue; + unsigned int i; + + __skb_queue_head_init(&skb_queue); + + for (i = offload->mb_first; + can_rx_offload_le(offload, i, offload->mb_last); + can_rx_offload_inc(offload, &i)) { + struct sk_buff *skb; + + if (!(pending & BIT_ULL(i))) + continue; + + skb = can_rx_offload_offload_one(offload, i); + if (!skb) + break; + + __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); + } + + if (!skb_queue_empty(&skb_queue)) { + unsigned long flags; + u32 queue_len; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + skb_queue_splice_tail(&skb_queue, &offload->skb_queue); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + if ((queue_len = skb_queue_len(&offload->skb_queue)) > + (offload->skb_queue_len_max / 8)) + netdev_dbg(offload->dev, "%s: queue_len=%d\n", + __func__, queue_len); + + can_rx_offload_schedule(offload); + } + + return skb_queue_len(&skb_queue); +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); + +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) +{ + struct sk_buff *skb; + int received = 0; + + while ((skb = can_rx_offload_offload_one(offload, 0))) { + skb_queue_tail(&offload->skb_queue, skb); + received++; + } + + if (received) + can_rx_offload_schedule(offload); + + return received; +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); + +int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) +{ + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) + return -ENOMEM; + + skb_queue_tail(&offload->skb_queue, skb); + can_rx_offload_schedule(offload); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); + +static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) +{ + offload->dev = dev; + + /* Limit queue len to 4x the weight (rounted to next power of two) */ + offload->skb_queue_len_max = 2 << fls(weight); + offload->skb_queue_len_max *= 4; + skb_queue_head_init(&offload->skb_queue); + + can_rx_offload_reset(offload); + netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); + + dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", + __func__, offload->skb_queue_len_max); + + return 0; +} + +int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) +{ + unsigned int weight; + + if (offload->mb_first > BITS_PER_LONG_LONG || + offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) + return -EINVAL; + + if (offload->mb_first < offload->mb_last) { + offload->inc = true; + weight = offload->mb_last - offload->mb_first; + } else { + offload->inc = false; + weight = offload->mb_first - offload->mb_last; + } + + return can_rx_offload_init_queue(dev, offload, weight);; +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); + +int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) +{ + if (!offload->mailbox_read) + return -EINVAL; + + return can_rx_offload_init_queue(dev, offload, weight); +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); + +void can_rx_offload_enable(struct can_rx_offload *offload) +{ + can_rx_offload_reset(offload); + napi_enable(&offload->napi); +} +EXPORT_SYMBOL_GPL(can_rx_offload_enable); + +void can_rx_offload_del(struct can_rx_offload *offload) +{ + netif_napi_del(&offload->napi); + skb_queue_purge(&offload->skb_queue); +} +EXPORT_SYMBOL_GPL(can_rx_offload_del); + +void can_rx_offload_reset(struct can_rx_offload *offload) +{ +} +EXPORT_SYMBOL_GPL(can_rx_offload_reset); |