summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/common.h39
-rw-r--r--drivers/net/xen-netback/interface.c62
-rw-r--r--drivers/net/xen-netback/netback.c42
-rw-r--r--drivers/net/xen-netback/xenbus.c14
4 files changed, 135 insertions, 22 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 34173b5e886f..53c2fa244c64 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -137,6 +137,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
struct xenvif *vif; /* Parent VIF */
+ /*
+ * TX/RX common EOI handling.
+ * When feature-split-event-channels = 0, interrupt handler sets
+ * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
+ * by the RX and TX interrupt handlers.
+ * RX and TX handler threads will issue an EOI when either
+ * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
+ * NETBK_TX_EOI) are set and they will reset those bits.
+ */
+ atomic_t eoi_pending;
+#define NETBK_RX_EOI 0x01
+#define NETBK_TX_EOI 0x02
+#define NETBK_COMMON_EOI 0x04
+
/* Use NAPI for guest TX */
struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -317,6 +331,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data);
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
@@ -353,4 +368,28 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
void xenvif_mcast_addr_list_free(struct xenvif *vif);
+#include <linux/atomic.h>
+
+static inline int xenvif_atomic_fetch_or(int i, atomic_t *v)
+{
+ int c, old;
+
+ c = v->counter;
+ while ((old = cmpxchg(&v->counter, c, c | i)) != c)
+ c = old;
+
+ return c;
+}
+
+static inline int xenvif_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ int c, old;
+
+ c = v->counter;
+ while ((old = cmpxchg(&v->counter, c, c & ~i)) != c)
+ c = old;
+
+ return c;
+}
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 2008c6a02b8a..93f7659e7595 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -76,12 +76,28 @@ int xenvif_schedulable(struct xenvif *vif)
!vif->disabled;
}
+static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
+{
+ bool rc;
+
+ rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
+ if (rc)
+ napi_schedule(&queue->napi);
+ return rc;
+}
+
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
+ int old;
- if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
- napi_schedule(&queue->napi);
+ old = xenvif_atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
+ WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
+
+ if (!xenvif_handle_tx_interrupt(queue)) {
+ atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
@@ -115,19 +131,48 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
+{
+ bool rc;
+
+ rc = xenvif_have_rx_work(queue, false);
+ if (rc)
+ xenvif_kick_thread(queue);
+ return rc;
+}
+
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
+ int old;
- xenvif_kick_thread(queue);
+ old = xenvif_atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
+ WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
+
+ if (!xenvif_handle_rx_interrupt(queue)) {
+ atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
- xenvif_tx_interrupt(irq, dev_id);
- xenvif_rx_interrupt(irq, dev_id);
+ struct xenvif_queue *queue = dev_id;
+ int old;
+ bool has_rx, has_tx;
+
+ old = xenvif_atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
+ WARN(old, "Interrupt while EOI pending\n");
+
+ has_tx = xenvif_handle_tx_interrupt(queue);
+ has_rx = xenvif_handle_rx_interrupt(queue);
+
+ if (!has_rx && !has_tx) {
+ atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
@@ -555,7 +600,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
queue->name, queue);
if (err < 0)
@@ -566,7 +611,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
/* feature-split-event-channels == 1 */
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
@@ -576,7 +621,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
@@ -619,6 +664,7 @@ err_tx_unbind:
queue->tx_irq = 0;
err_unmap:
xenvif_unmap_frontend_rings(queue);
+ netif_napi_del(&queue->napi);
err:
return err;
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 65d37257e033..044478c9adad 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -189,11 +189,15 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
spin_lock_irqsave(&queue->rx_queue.lock, flags);
- __skb_queue_tail(&queue->rx_queue, skb);
-
- queue->rx_queue_len += skb->len;
- if (queue->rx_queue_len > queue->rx_queue_max)
+ if (queue->rx_queue_len >= queue->rx_queue_max) {
netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
+ kfree_skb(skb);
+ queue->vif->dev->stats.rx_dropped++;
+ } else {
+ __skb_queue_tail(&queue->rx_queue, skb);
+
+ queue->rx_queue_len += skb->len;
+ }
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
}
@@ -243,6 +247,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
break;
xenvif_rx_dequeue(queue);
kfree_skb(skb);
+ queue->vif->dev->stats.rx_dropped++;
}
}
@@ -670,6 +675,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
if (more_to_do)
napi_schedule(&queue->napi);
+ else if (xenvif_atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
+ &queue->eoi_pending) &
+ (NETBK_TX_EOI | NETBK_COMMON_EOI))
+ xen_irq_lateeoi(queue->tx_irq, 0);
}
static void tx_add_credit(struct xenvif_queue *queue)
@@ -990,7 +999,7 @@ check_frags:
* the header's copy failed, and they are
* sharing a slot, send an error
*/
- if (i == 0 && sharedslot)
+ if (i == 0 && !first_shinfo && sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
else
@@ -1793,7 +1802,15 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
NULL,
queue->pages_to_map,
nr_mops);
- BUG_ON(ret);
+ if (ret) {
+ unsigned int i;
+
+ netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
+ nr_mops, ret);
+ for (i = 0; i < nr_mops; ++i)
+ WARN_ON_ONCE(queue->tx_map_ops[i].status ==
+ GNTST_okay);
+ }
}
work_done = xenvif_tx_submit(queue);
@@ -2010,14 +2027,14 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
return queue->stalled && prod - cons >= 1;
}
-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
{
return (!skb_queue_empty(&queue->rx_queue)
&& xenvif_rx_ring_slots_available(queue))
|| (queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)))
- || kthread_should_stop()
+ || (test_kthread && kthread_should_stop())
|| queue->vif->disabled;
}
@@ -2048,15 +2065,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
{
DEFINE_WAIT(wait);
- if (xenvif_have_rx_work(queue))
+ if (xenvif_have_rx_work(queue, true))
return;
for (;;) {
long ret;
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
- if (xenvif_have_rx_work(queue))
+ if (xenvif_have_rx_work(queue, true))
break;
+ if (xenvif_atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
+ &queue->eoi_pending) &
+ (NETBK_RX_EOI | NETBK_COMMON_EOI))
+ xen_irq_lateeoi(queue->rx_irq, 0);
+
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
if (!ret)
break;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 56ebd8267386..683fd8560f2b 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -697,12 +697,14 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
return -ENOMEM;
snprintf(node, maxlen, "%s/rate", dev->nodename);
vif->credit_watch.node = node;
+ vif->credit_watch.will_handle = NULL;
vif->credit_watch.callback = xen_net_rate_changed;
err = register_xenbus_watch(&vif->credit_watch);
if (err) {
pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
kfree(node);
vif->credit_watch.node = NULL;
+ vif->credit_watch.will_handle = NULL;
vif->credit_watch.callback = NULL;
}
return err;
@@ -847,11 +849,15 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
- hotplug_status_changed,
- "%s/%s", dev->nodename, "hotplug-status");
- if (!err)
+ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+ NULL, hotplug_status_changed,
+ "%s/%s", dev->nodename,
+ "hotplug-status");
+ if (err)
+ goto err;
be->have_hotplug_status_watch = 1;
+ }
netif_tx_wake_all_queues(be->vif->dev);