summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c144
-rw-r--r--net/core/net_namespace.c32
-rw-r--r--net/core/pktgen.c27
-rw-r--r--net/core/scm.c24
-rw-r--r--net/core/skbuff.c12
5 files changed, 155 insertions, 84 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 868ec0ba8b77..9174c77d3112 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -924,10 +924,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
strlcpy(dev->name, newname, IFNAMSIZ);
rollback:
- err = device_rename(&dev->dev, dev->name);
- if (err) {
+ ret = device_rename(&dev->dev, dev->name);
+ if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
- return err;
+ return ret;
}
write_lock_bh(&dev_base_lock);
@@ -2218,6 +2218,9 @@ int netif_receive_skb(struct sk_buff *skb)
int ret = NET_RX_DROP;
__be16 type;
+ if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
+ return NET_RX_SUCCESS;
+
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
return NET_RX_DROP;
@@ -3947,6 +3950,46 @@ static void netdev_init_queue_locks(struct net_device *dev)
__netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
}
+unsigned long netdev_fix_features(unsigned long features, const char *name)
+{
+ /* Fix illegal SG+CSUM combinations. */
+ if ((features & NETIF_F_SG) &&
+ !(features & NETIF_F_ALL_CSUM)) {
+ if (name)
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
+ "checksum feature.\n", name);
+ features &= ~NETIF_F_SG;
+ }
+
+ /* TSO requires that SG is present as well. */
+ if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
+ if (name)
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
+ "SG feature.\n", name);
+ features &= ~NETIF_F_TSO;
+ }
+
+ if (features & NETIF_F_UFO) {
+ if (!(features & NETIF_F_GEN_CSUM)) {
+ if (name)
+ printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
+ "since no NETIF_F_HW_CSUM feature.\n",
+ name);
+ features &= ~NETIF_F_UFO;
+ }
+
+ if (!(features & NETIF_F_SG)) {
+ if (name)
+ printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
+ "since no NETIF_F_SG feature.\n", name);
+ features &= ~NETIF_F_UFO;
+ }
+ }
+
+ return features;
+}
+EXPORT_SYMBOL(netdev_fix_features);
+
/**
* register_netdevice - register a network device
* @dev: device to register
@@ -4032,36 +4075,7 @@ int register_netdevice(struct net_device *dev)
dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
}
-
- /* Fix illegal SG+CSUM combinations. */
- if ((dev->features & NETIF_F_SG) &&
- !(dev->features & NETIF_F_ALL_CSUM)) {
- printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
- dev->name);
- dev->features &= ~NETIF_F_SG;
- }
-
- /* TSO requires that SG is present as well. */
- if ((dev->features & NETIF_F_TSO) &&
- !(dev->features & NETIF_F_SG)) {
- printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
- dev->name);
- dev->features &= ~NETIF_F_TSO;
- }
- if (dev->features & NETIF_F_UFO) {
- if (!(dev->features & NETIF_F_HW_CSUM)) {
- printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
- "NETIF_F_HW_CSUM feature.\n",
- dev->name);
- dev->features &= ~NETIF_F_UFO;
- }
- if (!(dev->features & NETIF_F_SG)) {
- printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
- "NETIF_F_SG feature.\n",
- dev->name);
- dev->features &= ~NETIF_F_UFO;
- }
- }
+ dev->features = netdev_fix_features(dev->features, dev->name);
/* Enable software GSO if SG is supported. */
if (dev->features & NETIF_F_SG)
@@ -4700,49 +4714,45 @@ static int __init netdev_dma_register(void) { return -ENODEV; }
#endif /* CONFIG_NET_DMA */
/**
- * netdev_compute_feature - compute conjunction of two feature sets
- * @all: first feature set
- * @one: second feature set
+ * netdev_increment_features - increment feature set by one
+ * @all: current feature set
+ * @one: new feature set
+ * @mask: mask feature set
*
* Computes a new feature set after adding a device with feature set
- * @one to the master device with current feature set @all. Returns
- * the new feature set.
+ * @one to the master device with current feature set @all. Will not
+ * enable anything that is off in @mask. Returns the new feature set.
*/
-int netdev_compute_features(unsigned long all, unsigned long one)
-{
- /* if device needs checksumming, downgrade to hw checksumming */
- if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
- all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
-
- /* if device can't do all checksum, downgrade to ipv4/ipv6 */
- if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
- all ^= NETIF_F_HW_CSUM
- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-
- if (one & NETIF_F_GSO)
- one |= NETIF_F_GSO_SOFTWARE;
- one |= NETIF_F_GSO;
-
- /*
- * If even one device supports a GSO protocol with software fallback,
- * enable it for all.
- */
- all |= one & NETIF_F_GSO_SOFTWARE;
+unsigned long netdev_increment_features(unsigned long all, unsigned long one,
+ unsigned long mask)
+{
+ /* If device needs checksumming, downgrade to it. */
+ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
+ all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
+ else if (mask & NETIF_F_ALL_CSUM) {
+ /* If one device supports v4/v6 checksumming, set for all. */
+ if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
+ !(all & NETIF_F_GEN_CSUM)) {
+ all &= ~NETIF_F_ALL_CSUM;
+ all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
- /* If even one device supports robust GSO, enable it for all. */
- if (one & NETIF_F_GSO_ROBUST)
- all |= NETIF_F_GSO_ROBUST;
+ /* If one device supports hw checksumming, set for all. */
+ if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
+ all &= ~NETIF_F_ALL_CSUM;
+ all |= NETIF_F_HW_CSUM;
+ }
+ }
- all &= one | NETIF_F_LLTX;
+ one |= NETIF_F_ALL_CSUM;
- if (!(all & NETIF_F_ALL_CSUM))
- all &= ~NETIF_F_SG;
- if (!(all & NETIF_F_SG))
- all &= ~NETIF_F_GSO_MASK;
+ one |= all & NETIF_F_ONE_FOR_ALL;
+ all &= one | NETIF_F_LLTX | NETIF_F_GSO;
+ all |= one & mask & NETIF_F_ONE_FOR_ALL;
return all;
}
-EXPORT_SYMBOL(netdev_compute_features);
+EXPORT_SYMBOL(netdev_increment_features);
static struct hlist_head *netdev_create_hash(void)
{
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index f1d07b5c1e17..1895a4ca9c4f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -325,6 +325,38 @@ void unregister_pernet_subsys(struct pernet_operations *module)
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
+int register_pernet_gen_subsys(int *id, struct pernet_operations *ops)
+{
+ int rv;
+
+ mutex_lock(&net_mutex);
+again:
+ rv = ida_get_new_above(&net_generic_ids, 1, id);
+ if (rv < 0) {
+ if (rv == -EAGAIN) {
+ ida_pre_get(&net_generic_ids, GFP_KERNEL);
+ goto again;
+ }
+ goto out;
+ }
+ rv = register_pernet_operations(first_device, ops);
+ if (rv < 0)
+ ida_remove(&net_generic_ids, *id);
+ mutex_unlock(&net_mutex);
+out:
+ return rv;
+}
+EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
+
+void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops)
+{
+ mutex_lock(&net_mutex);
+ unregister_pernet_operations(ops);
+ ida_remove(&net_generic_ids, id);
+ mutex_unlock(&net_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys);
+
/**
* register_pernet_device - register a network namespace device
* @ops: pernet operations structure for the subsystem
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 99f656d35b4f..a47f5bad110d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1973,28 +1973,27 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
/* make sure that we don't pick a non-existing transmit queue */
ntxq = pkt_dev->odev->real_num_tx_queues;
- if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
+ if (ntxq > num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
- "disabled because CPU count (%d) exceeds number ",
- num_online_cpus());
- printk(KERN_WARNING "pktgen: WARNING: of tx queues "
- "(%d) on %s \n", ntxq, pkt_dev->odev->name);
+ "disabled because CPU count (%d) exceeds number "
+ "of tx queues (%d) on %s\n", num_online_cpus(), ntxq,
+ pkt_dev->odev->name);
pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
}
if (ntxq <= pkt_dev->queue_map_min) {
printk(KERN_WARNING "pktgen: WARNING: Requested "
- "queue_map_min (%d) exceeds number of tx\n",
- pkt_dev->queue_map_min);
- printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
- "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ "queue_map_min (zero-based) (%d) exceeds valid range "
+ "[0 - %d] for (%d) queues on %s, resetting\n",
+ pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq,
+ pkt_dev->odev->name);
pkt_dev->queue_map_min = ntxq - 1;
}
- if (ntxq <= pkt_dev->queue_map_max) {
+ if (pkt_dev->queue_map_max >= ntxq) {
printk(KERN_WARNING "pktgen: WARNING: Requested "
- "queue_map_max (%d) exceeds number of tx\n",
- pkt_dev->queue_map_max);
- printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
- "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ "queue_map_max (zero-based) (%d) exceeds valid range "
+ "[0 - %d] for (%d) queues on %s, resetting\n",
+ pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq,
+ pkt_dev->odev->name);
pkt_dev->queue_map_max = ntxq - 1;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 10f5c65f6a47..ab242cc1acca 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -75,6 +75,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (!fpl)
return -ENOMEM;
*fplp = fpl;
+ INIT_LIST_HEAD(&fpl->list);
fpl->count = 0;
}
fpp = &fpl->fp[fpl->count];
@@ -106,9 +107,25 @@ void __scm_destroy(struct scm_cookie *scm)
if (fpl) {
scm->fp = NULL;
- for (i=fpl->count-1; i>=0; i--)
- fput(fpl->fp[i]);
- kfree(fpl);
+ if (current->scm_work_list) {
+ list_add_tail(&fpl->list, current->scm_work_list);
+ } else {
+ LIST_HEAD(work_list);
+
+ current->scm_work_list = &work_list;
+
+ list_add(&fpl->list, &work_list);
+ while (!list_empty(&work_list)) {
+ fpl = list_first_entry(&work_list, struct scm_fp_list, list);
+
+ list_del(&fpl->list);
+ for (i=fpl->count-1; i>=0; i--)
+ fput(fpl->fp[i]);
+ kfree(fpl);
+ }
+
+ current->scm_work_list = NULL;
+ }
}
}
@@ -284,6 +301,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
if (new_fpl) {
+ INIT_LIST_HEAD(&new_fpl->list);
for (i=fpl->count-1; i>=0; i--)
get_file(fpl->fp[i]);
memcpy(new_fpl, fpl, sizeof(*fpl));
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4e22e3a35359..ebb6b94f8af2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -449,6 +449,18 @@ void kfree_skb(struct sk_buff *skb)
__kfree_skb(skb);
}
+/**
+ * skb_recycle_check - check if skb can be reused for receive
+ * @skb: buffer
+ * @skb_size: minimum receive buffer size
+ *
+ * Checks that the skb passed in is not shared or cloned, and
+ * that it is linear and its head portion at least as large as
+ * skb_size so that it can be recycled as a receive buffer.
+ * If these conditions are met, this function does any necessary
+ * reference count dropping and cleans up the skbuff as if it
+ * just came from __alloc_skb().
+ */
int skb_recycle_check(struct sk_buff *skb, int skb_size)
{
struct skb_shared_info *shinfo;