summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-10-10 12:30:11 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-02-08 11:25:32 +0100
commit7fab8b2f0c994decf580027286b97533c8b7f6fd (patch)
tree73ddbf378159434a9e9dc34c6eaebc848c0532c3 /include/linux
parent5f2d68b6b5a439c3223d8fa6ba20736f91fc58d8 (diff)
net: pskb_trim_rcsum() and CHECKSUM_COMPLETE are friends
commit 88078d98d1bb085d72af8437707279e203524fa5 upstream. After working on IP defragmentation lately, I found that some large packets defeat CHECKSUM_COMPLETE optimization because of NIC adding zero paddings on the last (small) fragment. While removing the padding with pskb_trim_rcsum(), we set skb->ip_summed to CHECKSUM_NONE, forcing a full csum validation, even if all prior fragments had CHECKSUM_COMPLETE set. We can instead compute the checksum of the part we are trimming, usually smaller than the part we keep. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index aa4753ae1ff2..1e734e221ea3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2796,6 +2796,7 @@ static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
return skb->data;
}
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@@ -2810,9 +2811,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (likely(len >= skb->len))
return 0;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
- return __pskb_trim(skb, len);
+ return pskb_trim_rcsum_slow(skb, len);
}
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)