diff options
author | Wei Liu | 2013-08-26 12:59:38 +0100 |
---|---|---|
committer | David S. Miller | 2013-08-29 01:18:04 -0400 |
commit | b3f980bd827e6e81a050c518d60ed7811a83061d (patch) | |
tree | 2104c030c277c9eab340ab424fa669562fc0189a /drivers/net/xen-netback/netback.c | |
parent | 43e9d1943278e96150b449ea1fa81f4ae27729d5 (diff) |
xen-netback: switch to NAPI + kthread 1:1 model
This patch implements 1:1 model netback. NAPI and kthread are utilized
to do the weight-lifting job:
- NAPI is used for guest side TX (host side RX)
- kthread is used for guest side RX (host side TX)
Xenvif and xen_netbk are made into one structure to reduce code size.
This model provides better scheduling fairness among vifs. It is also
prerequisite for implementing multiqueue for Xen netback.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 607 |
1 files changed, 177 insertions, 430 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 91f163d03a49..44ccc674c02f 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -70,116 +70,25 @@ module_param(fatal_skb_slots, uint, 0444); */ #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN -typedef unsigned int pending_ring_idx_t; -#define INVALID_PENDING_RING_IDX (~0U) - -struct pending_tx_info { - struct xen_netif_tx_request req; /* coalesced tx request */ - struct xenvif *vif; - pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX - * if it is head of one or more tx - * reqs - */ -}; - -struct netbk_rx_meta { - int id; - int size; - int gso_size; -}; - -#define MAX_PENDING_REQS 256 - -/* Discriminate from any valid pending_idx value. */ -#define INVALID_PENDING_IDX 0xFFFF - -#define MAX_BUFFER_OFFSET PAGE_SIZE - -struct xen_netbk { - wait_queue_head_t wq; - struct task_struct *task; - - struct sk_buff_head rx_queue; - struct sk_buff_head tx_queue; - - struct timer_list net_timer; - - struct page *mmap_pages[MAX_PENDING_REQS]; - - pending_ring_idx_t pending_prod; - pending_ring_idx_t pending_cons; - struct list_head net_schedule_list; - - /* Protect the net_schedule_list in netif. */ - spinlock_t net_schedule_list_lock; - - atomic_t netfront_count; - - struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; - /* Coalescing tx requests before copying makes number of grant - * copy ops greater or equal to number of slots required. In - * worst case a tx request consumes 2 gnttab_copy. - */ - struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; - - u16 pending_ring[MAX_PENDING_REQS]; - - /* - * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each - * head/fragment page uses 2 copy operations because it - * straddles two buffers in the frontend. - */ - struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; - struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; -}; - -static struct xen_netbk *xen_netbk; -static int xen_netbk_group_nr; - /* * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of * one or more merged tx requests, otherwise it is the continuation of * previous tx request. */ -static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx) -{ - return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; -} - -void xen_netbk_add_xenvif(struct xenvif *vif) -{ - int i; - int min_netfront_count; - int min_group = 0; - struct xen_netbk *netbk; - - min_netfront_count = atomic_read(&xen_netbk[0].netfront_count); - for (i = 0; i < xen_netbk_group_nr; i++) { - int netfront_count = atomic_read(&xen_netbk[i].netfront_count); - if (netfront_count < min_netfront_count) { - min_group = i; - min_netfront_count = netfront_count; - } - } - - netbk = &xen_netbk[min_group]; - - vif->netbk = netbk; - atomic_inc(&netbk->netfront_count); -} - -void xen_netbk_remove_xenvif(struct xenvif *vif) +static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx) { - struct xen_netbk *netbk = vif->netbk; - vif->netbk = NULL; - atomic_dec(&netbk->netfront_count); + return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, +static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, u8 status); static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st); + +static inline int tx_work_todo(struct xenvif *vif); +static inline int rx_work_todo(struct xenvif *vif); + static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 id, s8 st, @@ -187,16 +96,16 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 size, u16 flags); -static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, +static inline unsigned long idx_to_pfn(struct xenvif *vif, u16 idx) { - return page_to_pfn(netbk->mmap_pages[idx]); + return page_to_pfn(vif->mmap_pages[idx]); } -static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, +static inline unsigned long idx_to_kaddr(struct xenvif *vif, u16 idx) { - return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); + return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); } /* @@ -224,15 +133,10 @@ static inline pending_ring_idx_t pending_index(unsigned i) return i & (MAX_PENDING_REQS-1); } -static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) +static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) { return MAX_PENDING_REQS - - netbk->pending_prod + netbk->pending_cons; -} - -static void xen_netbk_kick_thread(struct xen_netbk *netbk) -{ - wake_up(&netbk->wq); + vif->pending_prod + vif->pending_cons; } static int max_required_rx_slots(struct xenvif *vif) @@ -364,15 +268,15 @@ struct netrx_pending_operations { unsigned copy_prod, copy_cons; unsigned meta_prod, meta_cons; struct gnttab_copy *copy; - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; int copy_off; grant_ref_t copy_gref; }; -static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, - struct netrx_pending_operations *npo) +static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, + struct netrx_pending_operations *npo) { - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; struct xen_netif_rx_request *req; req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); @@ -398,7 +302,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, unsigned long offset, int *head) { struct gnttab_copy *copy_gop; - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; unsigned long bytes; /* Data must not cross a page boundary. */ @@ -434,15 +338,15 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, copy_gop = npo->copy + npo->copy_prod++; copy_gop->flags = GNTCOPY_dest_gref; + copy_gop->len = bytes; + copy_gop->source.domid = DOMID_SELF; copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); - copy_gop->source.offset = offset; - copy_gop->dest.domid = vif->domid; + copy_gop->dest.domid = vif->domid; copy_gop->dest.offset = npo->copy_off; copy_gop->dest.u.ref = npo->copy_gref; - copy_gop->len = bytes; npo->copy_off += bytes; meta->size += bytes; @@ -485,7 +389,7 @@ static int netbk_gop_skb(struct sk_buff *skb, int nr_frags = skb_shinfo(skb)->nr_frags; int i; struct xen_netif_rx_request *req; - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; unsigned char *data; int head = 1; int old_meta_prod; @@ -565,7 +469,7 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, } static void netbk_add_frag_responses(struct xenvif *vif, int status, - struct netbk_rx_meta *meta, + struct xenvif_rx_meta *meta, int nr_meta_slots) { int i; @@ -594,9 +498,13 @@ struct skb_cb_overlay { int meta_slots_used; }; -static void xen_netbk_rx_action(struct xen_netbk *netbk) +static void xen_netbk_kick_thread(struct xenvif *vif) +{ + wake_up(&vif->wq); +} + +void xen_netbk_rx_action(struct xenvif *vif) { - struct xenvif *vif = NULL, *tmp; s8 status; u16 flags; struct xen_netif_rx_response *resp; @@ -608,17 +516,18 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) int count; unsigned long offset; struct skb_cb_overlay *sco; + int need_to_notify = 0; struct netrx_pending_operations npo = { - .copy = netbk->grant_copy_op, - .meta = netbk->meta, + .copy = vif->grant_copy_op, + .meta = vif->meta, }; skb_queue_head_init(&rxq); count = 0; - while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { + while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { vif = netdev_priv(skb->dev); nr_frags = skb_shinfo(skb)->nr_frags; @@ -635,27 +544,27 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) break; } - BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); + BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); if (!npo.copy_prod) return; - BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); - gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod); + BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); + gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { sco = (struct skb_cb_overlay *)skb->cb; vif = netdev_priv(skb->dev); - if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) { + if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) { resp = RING_GET_RESPONSE(&vif->rx, - vif->rx.rsp_prod_pvt++); + vif->rx.rsp_prod_pvt++); resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; - resp->offset = netbk->meta[npo.meta_cons].gso_size; - resp->id = netbk->meta[npo.meta_cons].id; + resp->offset = vif->meta[npo.meta_cons].gso_size; + resp->id = vif->meta[npo.meta_cons].id; resp->status = sco->meta_slots_used; npo.meta_cons++; @@ -680,12 +589,12 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) flags |= XEN_NETRXF_data_validated; offset = 0; - resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id, + resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, status, offset, - netbk->meta[npo.meta_cons].size, + vif->meta[npo.meta_cons].size, flags); - if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { + if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { struct xen_netif_extra_info *gso = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&vif->rx, @@ -693,7 +602,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) resp->flags |= XEN_NETRXF_extra_info; - gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size; + gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; @@ -703,112 +612,33 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) } netbk_add_frag_responses(vif, status, - netbk->meta + npo.meta_cons + 1, + vif->meta + npo.meta_cons + 1, sco->meta_slots_used); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); + if (ret) + need_to_notify = 1; + xenvif_notify_tx_completion(vif); - if (ret && list_empty(&vif->notify_list)) - list_add_tail(&vif->notify_list, ¬ify); - else - xenvif_put(vif); npo.meta_cons += sco->meta_slots_used; dev_kfree_skb(skb); } - list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) { + if (need_to_notify) notify_remote_via_irq(vif->rx_irq); - list_del_init(&vif->notify_list); - xenvif_put(vif); - } /* More work to do? */ - if (!skb_queue_empty(&netbk->rx_queue) && - !timer_pending(&netbk->net_timer)) - xen_netbk_kick_thread(netbk); + if (!skb_queue_empty(&vif->rx_queue)) + xen_netbk_kick_thread(vif); } void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) { - struct xen_netbk *netbk = vif->netbk; + skb_queue_tail(&vif->rx_queue, skb); - skb_queue_tail(&netbk->rx_queue, skb); - - xen_netbk_kick_thread(netbk); -} - -static void xen_netbk_alarm(unsigned long data) -{ - struct xen_netbk *netbk = (struct xen_netbk *)data; - xen_netbk_kick_thread(netbk); -} - -static int __on_net_schedule_list(struct xenvif *vif) -{ - return !list_empty(&vif->schedule_list); -} - -/* Must be called with net_schedule_list_lock held */ -static void remove_from_net_schedule_list(struct xenvif *vif) -{ - if (likely(__on_net_schedule_list(vif))) { - list_del_init(&vif->schedule_list); - xenvif_put(vif); - } -} - -static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk) -{ - struct xenvif *vif = NULL; - - spin_lock_irq(&netbk->net_schedule_list_lock); - if (list_empty(&netbk->net_schedule_list)) - goto out; - - vif = list_first_entry(&netbk->net_schedule_list, - struct xenvif, schedule_list); - if (!vif) - goto out; - - xenvif_get(vif); - - remove_from_net_schedule_list(vif); -out: - spin_unlock_irq(&netbk->net_schedule_list_lock); - return vif; -} - -void xen_netbk_schedule_xenvif(struct xenvif *vif) -{ - unsigned long flags; - struct xen_netbk *netbk = vif->netbk; - - if (__on_net_schedule_list(vif)) - goto kick; - - spin_lock_irqsave(&netbk->net_schedule_list_lock, flags); - if (!__on_net_schedule_list(vif) && - likely(xenvif_schedulable(vif))) { - list_add_tail(&vif->schedule_list, &netbk->net_schedule_list); - xenvif_get(vif); - } - spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags); - -kick: - smp_mb(); - if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && - !list_empty(&netbk->net_schedule_list)) - xen_netbk_kick_thread(netbk); -} - -void xen_netbk_deschedule_xenvif(struct xenvif *vif) -{ - struct xen_netbk *netbk = vif->netbk; - spin_lock_irq(&netbk->net_schedule_list_lock); - remove_from_net_schedule_list(vif); - spin_unlock_irq(&netbk->net_schedule_list_lock); + xen_netbk_kick_thread(vif); } void xen_netbk_check_rx_xenvif(struct xenvif *vif) @@ -818,7 +648,7 @@ void xen_netbk_check_rx_xenvif(struct xenvif *vif) RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); if (more_to_do) - xen_netbk_schedule_xenvif(vif); + napi_schedule(&vif->napi); } static void tx_add_credit(struct xenvif *vif) @@ -860,15 +690,12 @@ static void netbk_tx_err(struct xenvif *vif, txp = RING_GET_REQUEST(&vif->tx, cons++); } while (1); vif->tx.req_cons = cons; - xen_netbk_check_rx_xenvif(vif); - xenvif_put(vif); } static void netbk_fatal_tx_err(struct xenvif *vif) { netdev_err(vif->dev, "fatal error; disabling device\n"); xenvif_carrier_off(vif); - xenvif_put(vif); } static int netbk_count_requests(struct xenvif *vif, @@ -969,19 +796,20 @@ static int netbk_count_requests(struct xenvif *vif, return slots; } -static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, +static struct page *xen_netbk_alloc_page(struct xenvif *vif, u16 pending_idx) { struct page *page; - page = alloc_page(GFP_KERNEL|__GFP_COLD); + + page = alloc_page(GFP_ATOMIC|__GFP_COLD); if (!page) return NULL; - netbk->mmap_pages[pending_idx] = page; + vif->mmap_pages[pending_idx] = page; + return page; } -static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, - struct xenvif *vif, +static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, struct sk_buff *skb, struct xen_netif_tx_request *txp, struct gnttab_copy *gop) @@ -1012,9 +840,9 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, for (shinfo->nr_frags = slot = start; slot < nr_slots; shinfo->nr_frags++) { struct pending_tx_info *pending_tx_info = - netbk->pending_tx_info; + vif->pending_tx_info; - page = alloc_page(GFP_KERNEL|__GFP_COLD); + page = alloc_page(GFP_ATOMIC|__GFP_COLD); if (!page) goto err; @@ -1049,21 +877,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, gop->len = txp->size; dst_offset += gop->len; - index = pending_index(netbk->pending_cons++); + index = pending_index(vif->pending_cons++); - pending_idx = netbk->pending_ring[index]; + pending_idx = vif->pending_ring[index]; memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); - xenvif_get(vif); - - pending_tx_info[pending_idx].vif = vif; /* Poison these fields, corresponding * fields for head tx req will be set * to correct values after the loop. */ - netbk->mmap_pages[pending_idx] = (void *)(~0UL); + vif->mmap_pages[pending_idx] = (void *)(~0UL); pending_tx_info[pending_idx].head = INVALID_PENDING_RING_IDX; @@ -1083,7 +908,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, first->req.offset = 0; first->req.size = dst_offset; first->head = start_idx; - netbk->mmap_pages[head_idx] = page; + vif->mmap_pages[head_idx] = page; frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); } @@ -1093,18 +918,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, err: /* Unwind, freeing all pages and sending error responses. */ while (shinfo->nr_frags-- > start) { - xen_netbk_idx_release(netbk, + xen_netbk_idx_release(vif, frag_get_pending_idx(&frags[shinfo->nr_frags]), XEN_NETIF_RSP_ERROR); } /* The head too, if necessary. */ if (start) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); return NULL; } -static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, +static int xen_netbk_tx_check_gop(struct xenvif *vif, struct sk_buff *skb, struct gnttab_copy **gopp) { @@ -1119,7 +944,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, /* Check status of header. */ err = gop->status; if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); @@ -1129,7 +954,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, pending_ring_idx_t head; pending_idx = frag_get_pending_idx(&shinfo->frags[i]); - tx_info = &netbk->pending_tx_info[pending_idx]; + tx_info = &vif->pending_tx_info[pending_idx]; head = tx_info->head; /* Check error status: if okay then remember grant handle. */ @@ -1137,18 +962,19 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, newerr = (++gop)->status; if (newerr) break; - peek = netbk->pending_ring[pending_index(++head)]; - } while (!pending_tx_is_head(netbk, peek)); + peek = vif->pending_ring[pending_index(++head)]; + } while (!pending_tx_is_head(vif, peek)); if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xen_netbk_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); continue; } /* Error on this fragment: respond to client with an error. */ - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) @@ -1156,10 +982,11 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); for (j = start; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xen_netbk_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); } /* Remember the error: invalidate all subsequent fragments. */ @@ -1170,7 +997,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, return err; } -static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) +static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; @@ -1184,16 +1011,16 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) pending_idx = frag_get_pending_idx(frag); - txp = &netbk->pending_tx_info[pending_idx].req; - page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); + txp = &vif->pending_tx_info[pending_idx].req; + page = virt_to_page(idx_to_kaddr(vif, pending_idx)); __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); skb->len += txp->size; skb->data_len += txp->size; skb->truesize += txp->size; /* Take an extra reference to offset xen_netbk_idx_release */ - get_page(netbk->mmap_pages[pending_idx]); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + get_page(vif->mmap_pages[pending_idx]); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); } } @@ -1353,16 +1180,14 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) return false; } -static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) +static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) { - struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; + struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; - while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS) && - !list_empty(&netbk->net_schedule_list)) { - struct xenvif *vif; + while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX + < MAX_PENDING_REQS)) { struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct page *page; @@ -1373,16 +1198,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) unsigned int data_len; pending_ring_idx_t index; - /* Get a netif from the list with work to do. */ - vif = poll_net_schedule_list(netbk); - /* This can sometimes happen because the test of - * list_empty(net_schedule_list) at the top of the - * loop is unlocked. Just go back and have another - * look. - */ - if (!vif) - continue; - if (vif->tx.sring->req_prod - vif->tx.req_cons > XEN_NETIF_TX_RING_SIZE) { netdev_err(vif->dev, @@ -1395,10 +1210,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) } RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); - if (!work_to_do) { - xenvif_put(vif); - continue; - } + if (!work_to_do) + break; idx = vif->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ @@ -1406,10 +1219,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* Credit-based scheduling. */ if (txreq.size > vif->remaining_credit && - tx_credit_exceeded(vif, txreq.size)) { - xenvif_put(vif); - continue; - } + tx_credit_exceeded(vif, txreq.size)) + break; vif->remaining_credit -= txreq.size; @@ -1422,12 +1233,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) work_to_do); idx = vif->tx.req_cons; if (unlikely(work_to_do < 0)) - continue; + break; } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) - continue; + break; idx += ret; @@ -1435,7 +1246,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) netdev_dbg(vif->dev, "Bad packet size: %d\n", txreq.size); netbk_tx_err(vif, &txreq, idx); - continue; + break; } /* No crossing a page as the payload mustn't fragment. */ @@ -1445,11 +1256,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); netbk_fatal_tx_err(vif); - continue; + break; } - index = pending_index(netbk->pending_cons); - pending_idx = netbk->pending_ring[index]; + index = pending_index(vif->pending_cons); + pending_idx = vif->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? @@ -1474,16 +1285,16 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) if (netbk_set_skb_gso(vif, skb, gso)) { /* Failure in netbk_set_skb_gso is fatal. */ kfree_skb(skb); - continue; + break; } } /* XXX could copy straight to head */ - page = xen_netbk_alloc_page(netbk, pending_idx); + page = xen_netbk_alloc_page(vif, pending_idx); if (!page) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); - continue; + break; } gop->source.u.ref = txreq.gref; @@ -1499,10 +1310,9 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) gop++; - memcpy(&netbk->pending_tx_info[pending_idx].req, + memcpy(&vif->pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); - netbk->pending_tx_info[pending_idx].vif = vif; - netbk->pending_tx_info[pending_idx].head = index; + vif->pending_tx_info[pending_idx].head = index; *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); @@ -1517,46 +1327,45 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) INVALID_PENDING_IDX); } - netbk->pending_cons++; + vif->pending_cons++; - request_gop = xen_netbk_get_requests(netbk, vif, - skb, txfrags, gop); + request_gop = xen_netbk_get_requests(vif, skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); - continue; + break; } gop = request_gop; - __skb_queue_tail(&netbk->tx_queue, skb); + __skb_queue_tail(&vif->tx_queue, skb); vif->tx.req_cons = idx; - xen_netbk_check_rx_xenvif(vif); - if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) + if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops)) break; } - return gop - netbk->tx_copy_ops; + return gop - vif->tx_copy_ops; } -static void xen_netbk_tx_submit(struct xen_netbk *netbk) + +static int xen_netbk_tx_submit(struct xenvif *vif, int budget) { - struct gnttab_copy *gop = netbk->tx_copy_ops; + struct gnttab_copy *gop = vif->tx_copy_ops; struct sk_buff *skb; + int work_done = 0; - while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { + while (work_done < budget && + (skb = __skb_dequeue(&vif->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; - struct xenvif *vif; u16 pending_idx; unsigned data_len; pending_idx = *((u16 *)skb->data); - vif = netbk->pending_tx_info[pending_idx].vif; - txp = &netbk->pending_tx_info[pending_idx].req; + txp = &vif->pending_tx_info[pending_idx].req; /* Check the remap error code. */ - if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) { + if (unlikely(xen_netbk_tx_check_gop(vif, skb, &gop))) { netdev_dbg(vif->dev, "netback grant failed.\n"); skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); @@ -1565,7 +1374,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) data_len = skb->len; memcpy(skb->data, - (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), + (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), data_len); if (data_len < txp->size) { /* Append the packet payload as a fragment. */ @@ -1573,7 +1382,8 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) txp->size -= data_len; } else { /* Schedule a response immediately. */ - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xen_netbk_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) @@ -1581,7 +1391,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; - xen_netbk_fill_frags(netbk, skb); + xen_netbk_fill_frags(vif, skb); /* * If the initial fragment was < PKT_PROT_LEN then @@ -1609,53 +1419,61 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) vif->dev->stats.rx_bytes += skb->len; vif->dev->stats.rx_packets++; - xenvif_receive_skb(vif, skb); + work_done++; + + netif_receive_skb(skb); } + + return work_done; } /* Called after netfront has transmitted */ -static void xen_netbk_tx_action(struct xen_netbk *netbk) +int xen_netbk_tx_action(struct xenvif *vif, int budget) { unsigned nr_gops; + int work_done; - nr_gops = xen_netbk_tx_build_gops(netbk); + if (unlikely(!tx_work_todo(vif))) + return 0; + + nr_gops = xen_netbk_tx_build_gops(vif); if (nr_gops == 0) - return; + return 0; + + gnttab_batch_copy(vif->tx_copy_ops, nr_gops); - gnttab_batch_copy(netbk->tx_copy_ops, nr_gops); + work_done = xen_netbk_tx_submit(vif, nr_gops); - xen_netbk_tx_submit(netbk); + return work_done; } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, +static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, u8 status) { - struct xenvif *vif; struct pending_tx_info *pending_tx_info; pending_ring_idx_t head; u16 peek; /* peek into next tx request */ - BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL)); + BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL)); /* Already complete? */ - if (netbk->mmap_pages[pending_idx] == NULL) + if (vif->mmap_pages[pending_idx] == NULL) return; - pending_tx_info = &netbk->pending_tx_info[pending_idx]; + pending_tx_info = &vif->pending_tx_info[pending_idx]; - vif = pending_tx_info->vif; head = pending_tx_info->head; - BUG_ON(!pending_tx_is_head(netbk, head)); - BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx); + BUG_ON(!pending_tx_is_head(vif, head)); + BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx); do { pending_ring_idx_t index; pending_ring_idx_t idx = pending_index(head); - u16 info_idx = netbk->pending_ring[idx]; + u16 info_idx = vif->pending_ring[idx]; - pending_tx_info = &netbk->pending_tx_info[info_idx]; + pending_tx_info = &vif->pending_tx_info[info_idx]; make_tx_response(vif, &pending_tx_info->req, status); /* Setting any number other than @@ -1664,18 +1482,15 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, */ pending_tx_info->head = 0; - index = pending_index(netbk->pending_prod++); - netbk->pending_ring[index] = netbk->pending_ring[info_idx]; + index = pending_index(vif->pending_prod++); + vif->pending_ring[index] = vif->pending_ring[info_idx]; - xenvif_put(vif); + peek = vif->pending_ring[pending_index(++head)]; - peek = netbk->pending_ring[pending_index(++head)]; + } while (!pending_tx_is_head(vif, peek)); - } while (!pending_tx_is_head(netbk, peek)); - - netbk->mmap_pages[pending_idx]->mapping = 0; - put_page(netbk->mmap_pages[pending_idx]); - netbk->mmap_pages[pending_idx] = NULL; + put_page(vif->mmap_pages[pending_idx]); + vif->mmap_pages[pending_idx] = NULL; } @@ -1723,45 +1538,22 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, return resp; } -static inline int rx_work_todo(struct xen_netbk *netbk) +static inline int rx_work_todo(struct xenvif *vif) { - return !skb_queue_empty(&netbk->rx_queue); + return !skb_queue_empty(&vif->rx_queue); } -static inline int tx_work_todo(struct xen_netbk *netbk) +static inline int tx_work_todo(struct xenvif *vif) { - if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS) && - !list_empty(&netbk->net_schedule_list)) + if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && + (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX + < MAX_PENDING_REQS)) return 1; return 0; } -static int xen_netbk_kthread(void *data) -{ - struct xen_netbk *netbk = data; - while (!kthread_should_stop()) { - wait_event_interruptible(netbk->wq, - rx_work_todo(netbk) || - tx_work_todo(netbk) || - kthread_should_stop()); - cond_resched(); - - if (kthread_should_stop()) - break; - - if (rx_work_todo(netbk)) - xen_netbk_rx_action(netbk); - - if (tx_work_todo(netbk)) - xen_netbk_tx_action(netbk); - } - - return 0; -} - void xen_netbk_unmap_frontend_rings(struct xenvif *vif) { if (vif->tx.sring) @@ -1807,11 +1599,29 @@ err: return err; } +int xen_netbk_kthread(void *data) +{ + struct xenvif *vif = data; + + while (!kthread_should_stop()) { + wait_event_interruptible(vif->wq, + rx_work_todo(vif) || + kthread_should_stop()); + if (kthread_should_stop()) + break; + + if (rx_work_todo(vif)) + xen_netbk_rx_action(vif); + + cond_resched(); + } + + return 0; +} + static int __init netback_init(void) { - int i; int rc = 0; - int group; if (!xen_domain()) return -ENODEV; @@ -1822,48 +1632,6 @@ static int __init netback_init(void) fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; } - xen_netbk_group_nr = num_online_cpus(); - xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); - if (!xen_netbk) - return -ENOMEM; - - for (group = 0; group < xen_netbk_group_nr; group++) { - struct xen_netbk *netbk = &xen_netbk[group]; - skb_queue_head_init(&netbk->rx_queue); - skb_queue_head_init(&netbk->tx_queue); - - init_timer(&netbk->net_timer); - netbk->net_timer.data = (unsigned long)netbk; - netbk->net_timer.function = xen_netbk_alarm; - - netbk->pending_cons = 0; - netbk->pending_prod = MAX_PENDING_REQS; - for (i = 0; i < MAX_PENDING_REQS; i++) - netbk->pending_ring[i] = i; - - init_waitqueue_head(&netbk->wq); - netbk->task = kthread_create(xen_netbk_kthread, - (void *)netbk, - "netback/%u", group); - - if (IS_ERR(netbk->task)) { - pr_alert("kthread_create() fails at netback\n"); - del_timer(&netbk->net_timer); - rc = PTR_ERR(netbk->task); - goto failed_init; - } - - kthread_bind(netbk->task, group); - - INIT_LIST_HEAD(&netbk->net_schedule_list); - - spin_lock_init(&netbk->net_schedule_list_lock); - - atomic_set(&netbk->netfront_count, 0); - - wake_up_process(netbk->task); - } - rc = xenvif_xenbus_init(); if (rc) goto failed_init; @@ -1871,35 +1639,14 @@ static int __init netback_init(void) return 0; failed_init: - while (--group >= 0) { - struct xen_netbk *netbk = &xen_netbk[group]; - del_timer(&netbk->net_timer); - kthread_stop(netbk->task); - } - vfree(xen_netbk); return rc; - } module_init(netback_init); static void __exit netback_fini(void) { - int i, j; - xenvif_xenbus_fini(); - - for (i = 0; i < xen_netbk_group_nr; i++) { - struct xen_netbk *netbk = &xen_netbk[i]; - del_timer_sync(&netbk->net_timer); - kthread_stop(netbk->task); - for (j = 0; j < MAX_PENDING_REQS; j++) { - if (netbk->mmap_pages[j]) - __free_page(netbk->mmap_pages[j]); - } - } - - vfree(xen_netbk); } module_exit(netback_fini); |