summaryrefslogtreecommitdiff
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@google.com>2019-02-08 11:50:05 +0100
committerGreg Kroah-Hartman <gregkh@google.com>2019-02-08 11:50:05 +0100
commit71a27400f626ab7b9fa1b6d686dba7712426044f (patch)
tree4f27400b07a29ee42e6d4edea73b34b3b091fdbd /net/core/skbuff.c
parenta95e76973d5902f434d6b27345afdd731558df2d (diff)
parentdc5e8c99975bb1a1561de884a83b3c19e4ac7ada (diff)
Merge 4.4.174 into android-4.4-p
Changes in 4.4.174 inet: frags: change inet_frags_init_net() return value inet: frags: add a pointer to struct netns_frags inet: frags: refactor ipfrag_init() inet: frags: refactor ipv6_frag_init() inet: frags: refactor lowpan_net_frag_init() rhashtable: add rhashtable_lookup_get_insert_key() rhashtable: Add rhashtable_lookup() rhashtable: add schedule points inet: frags: use rhashtables for reassembly units net: ieee802154: 6lowpan: fix frag reassembly ipfrag: really prevent allocation on netns exit inet: frags: remove some helpers inet: frags: get rif of inet_frag_evicting() inet: frags: remove inet_frag_maybe_warn_overflow() inet: frags: break the 2GB limit for frags storage inet: frags: do not clone skb in ip_expire() ipv6: frags: rewrite ip6_expire_frag_queue() rhashtable: reorganize struct rhashtable layout inet: frags: reorganize struct netns_frags inet: frags: get rid of ipfrag_skb_cb/FRAG_CB inet: frags: fix ip6frag_low_thresh boundary ip: discard IPv4 datagrams with overlapping segments. net: modify skb_rbtree_purge to return the truesize of all purged skbs. ipv6: defrag: drop non-last frags smaller than min mtu net: pskb_trim_rcsum() and CHECKSUM_COMPLETE are friends ip: use rb trees for IP frag queue. ip: add helpers to process in-order fragments faster. ip: process in-order fragments efficiently ip: frags: fix crash in ip_do_fragment() ipv4: frags: precedence bug in ip_expire() inet: frags: better deal with smp races net: fix pskb_trim_rcsum_slow() with odd trim offset net: ipv4: do not handle duplicate fragments as overlapping rcu: Force boolean subscript for expedited stall warnings Linux 4.4.174 Change-Id: Id14aa3c02655a21b05d0c39497ac3dd33c115781 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8a57bbaf7452..fea7c24e99d0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1502,6 +1502,21 @@ done:
}
EXPORT_SYMBOL(___pskb_trim);
+/* Note : use pskb_trim_rcsum() instead of calling this directly
+ */
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ int delta = skb->len - len;
+
+ skb->csum = csum_block_sub(skb->csum,
+ skb_checksum(skb, len, delta, 0),
+ len);
+ }
+ return __pskb_trim(skb, len);
+}
+EXPORT_SYMBOL(pskb_trim_rcsum_slow);
+
/**
* __pskb_pull_tail - advance tail of skb header
* @skb: buffer to reallocate
@@ -2380,23 +2395,27 @@ EXPORT_SYMBOL(skb_queue_purge);
/**
* skb_rbtree_purge - empty a skb rbtree
* @root: root of the rbtree to empty
+ * Return value: the sum of truesizes of all purged skbs.
*
* Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
* the list and one reference dropped. This function does not take
* any lock. Synchronization should be handled by the caller (e.g., TCP
* out-of-order queue is protected by the socket lock).
*/
-void skb_rbtree_purge(struct rb_root *root)
+unsigned int skb_rbtree_purge(struct rb_root *root)
{
struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
while (p) {
struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
p = rb_next(p);
rb_erase(&skb->rbnode, root);
+ sum += skb->truesize;
kfree_skb(skb);
}
+ return sum;
}
/**