diff options
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/dma.c')
-rw-r--r-- | drivers/net/wireless/mediatek/mt76/dma.c | 162 |
1 files changed, 106 insertions, 56 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6c25859dd386..214fc95b8a33 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -7,6 +7,76 @@ #include "mt76.h" #include "dma.h" +static struct mt76_txwi_cache * +mt76_alloc_txwi(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *t; + dma_addr_t addr; + u8 *txwi; + int size; + + size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); + txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); + if (!txwi) + return NULL; + + addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, + DMA_TO_DEVICE); + t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); + t->dma_addr = addr; + + return t; +} + +static struct mt76_txwi_cache * +__mt76_get_txwi(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *t = NULL; + + spin_lock(&dev->lock); + if (!list_empty(&dev->txwi_cache)) { + t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, + list); + list_del(&t->list); + } + spin_unlock(&dev->lock); + + return t; +} + +static struct mt76_txwi_cache * +mt76_get_txwi(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *t = __mt76_get_txwi(dev); + + if (t) + return t; + + return mt76_alloc_txwi(dev); +} + +void +mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) +{ + if (!t) + return; + + spin_lock(&dev->lock); + list_add(&t->list, &dev->txwi_cache); + spin_unlock(&dev->lock); +} +EXPORT_SYMBOL_GPL(mt76_put_txwi); + +static void +mt76_free_pending_txwi(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *t; + + while ((t = __mt76_get_txwi(dev)) != NULL) + dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, + DMA_TO_DEVICE); +} + static int mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, int idx, int n_desc, int bufsize, @@ -49,6 +119,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_buf *buf, int nbufs, u32 info, struct sk_buff *skb, void *txwi) { + struct mt76_queue_entry *entry; struct mt76_desc *desc; u32 ctrl; int i, idx = -1; @@ -61,10 +132,27 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, for (i = 0; i < nbufs; i += 2, buf += 2) { u32 buf0 = buf[0].addr, buf1 = 0; + idx = q->head; + q->head = (q->head + 1) % q->ndesc; + + desc = &q->desc[idx]; + entry = &q->entry[idx]; + + if (buf[0].skip_unmap) + entry->skip_buf0 = true; + entry->skip_buf1 = i == nbufs - 1; + + entry->dma_addr[0] = buf[0].addr; + entry->dma_len[0] = buf[0].len; + ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); if (i < nbufs - 1) { + entry->dma_addr[1] = buf[1].addr; + entry->dma_len[1] = buf[1].len; buf1 = buf[1].addr; ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); + if (buf[1].skip_unmap) + entry->skip_buf1 = true; } if (i == nbufs - 1) @@ -72,11 +160,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, else if (i == nbufs - 2) ctrl |= MT_DMA_CTL_LAST_SEC1; - idx = q->head; - q->head = (q->head + 1) % q->ndesc; - - desc = &q->desc[idx]; - WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); WRITE_ONCE(desc->info, cpu_to_le32(info)); @@ -96,24 +179,14 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, struct mt76_queue_entry *prev_e) { struct mt76_queue_entry *e = &q->entry[idx]; - __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); - u32 ctrl = le32_to_cpu(__ctrl); - if (!e->skip_buf0) { - __le32 addr = READ_ONCE(q->desc[idx].buf0); - u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); - - dma_unmap_single(dev->dev, le32_to_cpu(addr), len, + if (!e->skip_buf0) + dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0], DMA_TO_DEVICE); - } - if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) { - __le32 addr = READ_ONCE(q->desc[idx].buf1); - u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl); - - dma_unmap_single(dev->dev, le32_to_cpu(addr), len, + if (!e->skip_buf1) + dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1], DMA_TO_DEVICE); - } if (e->txwi == DMA_DUMMY_DATA) e->txwi = NULL; @@ -137,19 +210,17 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) static void mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) { + wmb(); writel(q->head, &q->regs->cpu_idx); } static void mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) { - struct mt76_sw_queue *sq = &dev->q_tx[qid]; - struct mt76_queue *q = sq->q; + struct mt76_queue *q = dev->q_tx[qid]; struct mt76_queue_entry entry; - unsigned int n_swq_queued[8] = {}; - unsigned int n_queued = 0; bool wake = false; - int i, last; + int last; if (!q) return; @@ -159,16 +230,9 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) else last = readl(&q->regs->dma_idx); - while ((q->queued > n_queued) && q->tail != last) { + while (q->queued > 0 && q->tail != last) { mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); - if (entry.schedule) - n_swq_queued[entry.qid]++; - - q->tail = (q->tail + 1) % q->ndesc; - n_queued++; - - if (entry.skb) - dev->drv->tx_complete_skb(dev, qid, &entry); + mt76_queue_tx_complete(dev, q, &entry); if (entry.txwi) { if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) @@ -178,29 +242,14 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) if (!flush && q->tail == last) last = readl(&q->regs->dma_idx); - } - - spin_lock_bh(&q->lock); - - q->queued -= n_queued; - for (i = 0; i < 4; i++) { - if (!n_swq_queued[i]) - continue; - - dev->q_tx[i].swq_queued -= n_swq_queued[i]; - } - - /* ext PHY */ - for (i = 0; i < 4; i++) { - if (!n_swq_queued[i]) - continue; - dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i]; } if (flush) { + spin_lock_bh(&q->lock); mt76_dma_sync_idx(dev, q); mt76_dma_kick_queue(dev, q); + spin_unlock_bh(&q->lock); } wake = wake && q->stopped && @@ -211,8 +260,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) if (!q->queued) wake_up(&dev->tx_wait); - spin_unlock_bh(&q->lock); - if (wake) ieee80211_wake_queue(dev->hw, qid); } @@ -227,7 +274,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, void *buf = e->buf; int buf_len = SKB_WITH_OVERHEAD(q->buf_size); - buf_addr = le32_to_cpu(READ_ONCE(desc->buf0)); + buf_addr = e->dma_addr[0]; if (len) { u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); @@ -268,7 +315,7 @@ static int mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, struct sk_buff *skb, u32 tx_info) { - struct mt76_queue *q = dev->q_tx[qid].q; + struct mt76_queue *q = dev->q_tx[qid]; struct mt76_queue_buf buf; dma_addr_t addr; @@ -300,7 +347,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { - struct mt76_queue *q = dev->q_tx[qid].q; + struct mt76_queue *q = dev->q_tx[qid]; struct mt76_tx_info tx_info = { .skb = skb, }; @@ -378,7 +425,7 @@ free: e.skb = tx_info.skb; e.txwi = t; - dev->drv->tx_complete_skb(dev, qid, &e); + dev->drv->tx_complete_skb(dev, &e); mt76_put_txwi(dev, t); return ret; } @@ -612,6 +659,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) { int i; + mt76_worker_disable(&dev->tx_worker); netif_napi_del(&dev->tx_napi); for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) mt76_dma_tx_cleanup(dev, i, true); @@ -620,5 +668,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) netif_napi_del(&dev->napi[i]); mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); } + + mt76_free_pending_txwi(dev); } EXPORT_SYMBOL_GPL(mt76_dma_cleanup); |