diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 294 |
1 files changed, 224 insertions, 70 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3f44b522b831..e481f3710bd3 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i) return i & (MAX_PENDING_REQS-1); } -bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) +static int xenvif_rx_ring_slots_needed(struct xenvif *vif) +{ + if (vif->gso_mask) + return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1; + else + return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE); +} + +static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) { RING_IDX prod, cons; + int needed; + + needed = xenvif_rx_ring_slots_needed(queue->vif); do { prod = queue->rx.sring->req_prod; @@ -263,6 +274,80 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, return meta; } +struct gop_frag_copy { + struct xenvif_queue *queue; + struct netrx_pending_operations *npo; + struct xenvif_rx_meta *meta; + int head; + int gso_type; + + struct page *page; +}; + +static void xenvif_setup_copy_gop(unsigned long gfn, + unsigned int offset, + unsigned int *len, + struct gop_frag_copy *info) +{ + struct gnttab_copy *copy_gop; + struct xen_page_foreign *foreign; + /* Convenient aliases */ + struct xenvif_queue *queue = info->queue; + struct netrx_pending_operations *npo = info->npo; + struct page *page = info->page; + + BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); + + if (npo->copy_off == MAX_BUFFER_OFFSET) + info->meta = get_next_rx_buffer(queue, npo); + + if (npo->copy_off + *len > MAX_BUFFER_OFFSET) + *len = MAX_BUFFER_OFFSET - npo->copy_off; + + copy_gop = npo->copy + npo->copy_prod++; + copy_gop->flags = GNTCOPY_dest_gref; + copy_gop->len = *len; + + foreign = xen_page_foreign(page); + if (foreign) { + copy_gop->source.domid = foreign->domid; + copy_gop->source.u.ref = foreign->gref; + copy_gop->flags |= GNTCOPY_source_gref; + } else { + copy_gop->source.domid = DOMID_SELF; + copy_gop->source.u.gmfn = gfn; + } + copy_gop->source.offset = offset; + + copy_gop->dest.domid = queue->vif->domid; + copy_gop->dest.offset = npo->copy_off; + copy_gop->dest.u.ref = npo->copy_gref; + + npo->copy_off += *len; + info->meta->size += *len; + + /* Leave a gap for the GSO descriptor. */ + if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask)) + queue->rx.req_cons++; + + info->head = 0; /* There must be something in this buffer now */ +} + +static void xenvif_gop_frag_copy_grant(unsigned long gfn, + unsigned offset, + unsigned int len, + void *data) +{ + unsigned int bytes; + + while (len) { + bytes = len; + xenvif_setup_copy_gop(gfn, offset, &bytes, data); + offset += bytes; + len -= bytes; + } +} + /* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. @@ -272,83 +357,52 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb struct page *page, unsigned long size, unsigned long offset, int *head) { - struct gnttab_copy *copy_gop; - struct xenvif_rx_meta *meta; + struct gop_frag_copy info = { + .queue = queue, + .npo = npo, + .head = *head, + .gso_type = XEN_NETIF_GSO_TYPE_NONE, + }; unsigned long bytes; - int gso_type = XEN_NETIF_GSO_TYPE_NONE; + + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6; + } /* Data must not cross a page boundary. */ BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); - meta = npo->meta + npo->meta_prod - 1; + info.meta = npo->meta + npo->meta_prod - 1; /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; while (size > 0) { - struct xen_page_foreign *foreign; - BUG_ON(offset >= PAGE_SIZE); - BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); - - if (npo->copy_off == MAX_BUFFER_OFFSET) - meta = get_next_rx_buffer(queue, npo); bytes = PAGE_SIZE - offset; if (bytes > size) bytes = size; - if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) - bytes = MAX_BUFFER_OFFSET - npo->copy_off; - - copy_gop = npo->copy + npo->copy_prod++; - copy_gop->flags = GNTCOPY_dest_gref; - copy_gop->len = bytes; - - foreign = xen_page_foreign(page); - if (foreign) { - copy_gop->source.domid = foreign->domid; - copy_gop->source.u.ref = foreign->gref; - copy_gop->flags |= GNTCOPY_source_gref; - } else { - copy_gop->source.domid = DOMID_SELF; - copy_gop->source.u.gmfn = - virt_to_mfn(page_address(page)); - } - copy_gop->source.offset = offset; - - copy_gop->dest.domid = queue->vif->domid; - copy_gop->dest.offset = npo->copy_off; - copy_gop->dest.u.ref = npo->copy_gref; - - npo->copy_off += bytes; - meta->size += bytes; - - offset += bytes; + info.page = page; + gnttab_foreach_grant_in_range(page, offset, bytes, + xenvif_gop_frag_copy_grant, + &info); size -= bytes; + offset = 0; - /* Next frame */ - if (offset == PAGE_SIZE && size) { + /* Next page */ + if (size) { BUG_ON(!PageCompound(page)); page++; - offset = 0; } - - /* Leave a gap for the GSO descriptor. */ - if (skb_is_gso(skb)) { - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) - gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - } - - if (*head && ((1 << gso_type) & queue->vif->gso_mask)) - queue->rx.req_cons++; - - *head = 0; /* There must be something in this buffer now. */ - } + + *head = info.head; } /* @@ -513,7 +567,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue) skb_queue_head_init(&rxq); - while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) + while (xenvif_rx_ring_slots_available(queue) && (skb = xenvif_rx_dequeue(queue)) != NULL) { queue->last_rx_time = jiffies; @@ -747,7 +801,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, first->size -= txp->size; slots++; - if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { + if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", txp->offset, txp->size); xenvif_fatal_tx_err(queue->vif); @@ -1157,6 +1211,80 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) return false; } +/* No locking is required in xenvif_mcast_add/del() as they are + * only ever invoked from NAPI poll. An RCU list is used because + * xenvif_mcast_match() is called asynchronously, during start_xmit. + */ + +static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr) +{ + struct xenvif_mcast_addr *mcast; + + if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { + if (net_ratelimit()) + netdev_err(vif->dev, + "Too many multicast addresses\n"); + return -ENOSPC; + } + + mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC); + if (!mcast) + return -ENOMEM; + + ether_addr_copy(mcast->addr, addr); + list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); + vif->fe_mcast_count++; + + return 0; +} + +static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr) +{ + struct xenvif_mcast_addr *mcast; + + list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { + if (ether_addr_equal(addr, mcast->addr)) { + --vif->fe_mcast_count; + list_del_rcu(&mcast->entry); + kfree_rcu(mcast, rcu); + break; + } + } +} + +bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr) +{ + struct xenvif_mcast_addr *mcast; + + rcu_read_lock(); + list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { + if (ether_addr_equal(addr, mcast->addr)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; +} + +void xenvif_mcast_addr_list_free(struct xenvif *vif) +{ + /* No need for locking or RCU here. NAPI poll and TX queue + * are stopped. + */ + while (!list_empty(&vif->fe_mcast_addr)) { + struct xenvif_mcast_addr *mcast; + + mcast = list_first_entry(&vif->fe_mcast_addr, + struct xenvif_mcast_addr, + entry); + --vif->fe_mcast_count; + list_del(&mcast->entry); + kfree(mcast); + } +} + static void xenvif_tx_build_gops(struct xenvif_queue *queue, int budget, unsigned *copy_ops, @@ -1215,6 +1343,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, break; } + if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { + struct xen_netif_extra_info *extra; + + extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; + ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); + + make_tx_response(queue, &txreq, + (ret == 0) ? + XEN_NETIF_RSP_OKAY : + XEN_NETIF_RSP_ERROR); + push_tx_responses(queue); + continue; + } + + if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { + struct xen_netif_extra_info *extra; + + extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; + xenvif_mcast_del(queue->vif, extra->u.mcast.addr); + + make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); + continue; + } + ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) break; @@ -1229,11 +1382,11 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, } /* No crossing a page as the payload mustn't fragment. */ - if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { + if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { netdev_err(queue->vif->dev, "txreq.offset: %u, size: %u, end: %lu\n", txreq.offset, txreq.size, - (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size); + (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size); xenvif_fatal_tx_err(queue->vif); break; } @@ -1296,10 +1449,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; queue->tx_copy_ops[*copy_ops].dest.u.gmfn = - virt_to_mfn(skb->data); + virt_to_gfn(skb->data); queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; queue->tx_copy_ops[*copy_ops].dest.offset = - offset_in_page(skb->data); + offset_in_page(skb->data) & ~XEN_PAGE_MASK; queue->tx_copy_ops[*copy_ops].len = data_len; queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; @@ -1784,7 +1937,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue, goto err; txs = (struct xen_netif_tx_sring *)addr; - BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); + BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), &rx_ring_ref, 1, &addr); @@ -1792,7 +1945,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue, goto err; rxs = (struct xen_netif_rx_sring *)addr; - BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE); + BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); return 0; @@ -1839,8 +1992,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; - return !queue->stalled - && prod - cons < XEN_NETBK_RX_SLOTS_MAX + return !queue->stalled && prod - cons < 1 && time_after(jiffies, queue->last_rx_time + queue->vif->stall_timeout); } @@ -1852,14 +2004,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; - return queue->stalled - && prod - cons >= XEN_NETBK_RX_SLOTS_MAX; + return queue->stalled && prod - cons >= 1; } static bool xenvif_have_rx_work(struct xenvif_queue *queue) { return (!skb_queue_empty(&queue->rx_queue) - && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)) + && xenvif_rx_ring_slots_available(queue)) || (queue->vif->stall_timeout && (xenvif_rx_queue_stalled(queue) || xenvif_rx_queue_ready(queue))) @@ -2006,8 +2157,11 @@ static int __init netback_init(void) if (!xen_domain()) return -ENODEV; - /* Allow as many queues as there are CPUs, by default */ - xenvif_max_queues = num_online_cpus(); + /* Allow as many queues as there are CPUs if user has not + * specified a value. + */ + if (xenvif_max_queues == 0) + xenvif_max_queues = num_online_cpus(); if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", |
