diff options
author | David Vrabel | 2016-10-04 10:29:17 +0100 |
---|---|---|
committer | David S. Miller | 2016-10-06 20:37:36 -0400 |
commit | a37f12298c251a48bc74d4012e07bf0d78175f46 (patch) | |
tree | 8610270f8e73fa214527d58e36f9915f1d4e7607 /drivers/net/xen-netback/rx.c | |
parent | 98f6d57ced73b723551568262019f1d6c8771f20 (diff) |
xen-netback: batch copies for multiple to-guest rx packets
Instead of flushing the copy ops when an packet is complete, complete
packets when their copy ops are done. This improves performance by
reducing the number of grant copy hypercalls.
Latency is still limited by the relatively small size of the copy
batch.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
[re-based]
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/rx.c')
-rw-r--r-- | drivers/net/xen-netback/rx.c | 27 |
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index ae822b8fa76d..8c8c5b5883eb 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -133,6 +133,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) static void xenvif_rx_copy_flush(struct xenvif_queue *queue) { unsigned int i; + int notify; gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); @@ -154,6 +155,13 @@ static void xenvif_rx_copy_flush(struct xenvif_queue *queue) } queue->rx_copy.num = 0; + + /* Push responses for all completed packets. */ + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); + if (notify) + notify_remote_via_irq(queue->rx_irq); + + __skb_queue_purge(queue->rx_copy.completed); } static void xenvif_rx_copy_add(struct xenvif_queue *queue, @@ -279,18 +287,10 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue, static void xenvif_rx_complete(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt) { - int notify; - - /* Complete any outstanding copy ops for this skb. */ - xenvif_rx_copy_flush(queue); - - /* Push responses and notify. */ + /* All responses are ready to be pushed. */ queue->rx.rsp_prod_pvt = queue->rx.req_cons; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); - if (notify) - notify_remote_via_irq(queue->rx_irq); - dev_kfree_skb(pkt->skb); + __skb_queue_tail(queue->rx_copy.completed, pkt->skb); } static void xenvif_rx_next_chunk(struct xenvif_queue *queue, @@ -429,13 +429,20 @@ void xenvif_rx_skb(struct xenvif_queue *queue) void xenvif_rx_action(struct xenvif_queue *queue) { + struct sk_buff_head completed_skbs; unsigned int work_done = 0; + __skb_queue_head_init(&completed_skbs); + queue->rx_copy.completed = &completed_skbs; + while (xenvif_rx_ring_slots_available(queue) && work_done < RX_BATCH_SIZE) { xenvif_rx_skb(queue); work_done++; } + + /* Flush any pending copies and complete all skbs. */ + xenvif_rx_copy_flush(queue); } static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) |