diff options
| author | Bruno Martins <bgcngm@gmail.com> | 2020-12-06 18:31:14 +0000 |
|---|---|---|
| committer | Michael Bestas <mkbestas@lineageos.org> | 2020-12-31 19:40:46 +0200 |
| commit | 141849eac5defb4bb6cf6e6f1381cb24ffcfdba5 (patch) | |
| tree | 991068d0b0e30be8ca5879ba6e2914b82eefd25b /drivers/net/wireguard/compat | |
| parent | 7d982ef6fe996f837d5c0c71feb2a3f3989deee7 (diff) | |
drivers: net: Modify WireGuard for backward compat
Change-Id: I1c8e130a514a7b0329f8df8099cc84f4cc8d5822
Diffstat (limited to 'drivers/net/wireguard/compat')
19 files changed, 4192 insertions, 0 deletions
diff --git a/drivers/net/wireguard/compat/Makefile.include b/drivers/net/wireguard/compat/Makefile.include new file mode 100644 index 000000000000..513dba444a37 --- /dev/null +++ b/drivers/net/wireguard/compat/Makefile.include @@ -0,0 +1,102 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + +kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src)) + +ccflags-y += -include $(kbuild-dir)/compat/compat.h +asflags-y += -include $(kbuild-dir)/compat/compat-asm.h + +ifeq ($(wildcard $(srctree)/include/linux/ptr_ring.h),) +ccflags-y += -I$(kbuild-dir)/compat/ptr_ring/include +endif + +ifeq ($(wildcard $(srctree)/include/linux/siphash.h),) +ccflags-y += -I$(kbuild-dir)/compat/siphash/include +wireguard-y += compat/siphash/siphash.o +endif + +ifeq ($(wildcard $(srctree)/include/net/dst_cache.h),) +ccflags-y += -I$(kbuild-dir)/compat/dst_cache/include +wireguard-y += compat/dst_cache/dst_cache.o +endif + +ifeq ($(wildcard $(srctree)/arch/x86/include/asm/intel-family.h)$(CONFIG_X86),y) +ccflags-y += -I$(kbuild-dir)/compat/intel-family-x86/include +endif + +ifeq ($(wildcard $(srctree)/arch/x86/include/asm/fpu/api.h)$(CONFIG_X86),y) +ccflags-y += -I$(kbuild-dir)/compat/fpu-x86/include +endif + +ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/simd.h)$(shell grep -s -F "generic-y += simd.h" "$(srctree)/arch/$(SRCARCH)/Makefile" "$(srctree)/arch/$(SRCARCH)/Makefile"),) +ccflags-y += -I$(kbuild-dir)/compat/simd-asm/include +endif + +ifeq ($(wildcard $(srctree)/include/linux/simd.h),) +ccflags-y += -I$(kbuild-dir)/compat/simd/include +endif + +ifeq ($(wildcard $(srctree)/include/net/udp_tunnel.h),) +ccflags-y += -I$(kbuild-dir)/compat/udp_tunnel/include +wireguard-y += compat/udp_tunnel/udp_tunnel.o +endif + +ifeq ($(shell grep -s -F "int crypto_memneq" "$(srctree)/include/crypto/algapi.h"),) +ccflags-y += -include $(kbuild-dir)/compat/memneq/include.h +wireguard-y += compat/memneq/memneq.o +endif + +ifeq ($(shell grep -s -F "addr_gen_mode" "$(srctree)/include/linux/ipv6.h"),) +ccflags-y += -DCOMPAT_CANNOT_USE_DEV_CNF +endif + +ifdef CONFIG_HZ +ifeq ($(wildcard $(CURDIR)/include/generated/timeconst.h),) +ccflags-y += $(shell bash -c '((a=$(CONFIG_HZ), b=1000000)); while ((b > 0)); do ((t=b, b=a%b, a=t)); done; echo "-DHZ_TO_USEC_NUM=$$((1000000/a)) -DHZ_TO_USEC_DEN=$$(($(CONFIG_HZ)/a))";') +endif +endif + +ifeq ($(wildcard $(srctree)/arch/arm/include/asm/neon.h)$(CONFIG_ARM),y) +ccflags-y += -I$(kbuild-dir)/compat/neon-arm/include +endif +ifeq ($(wildcard $(srctree)/arch/arm64/include/asm/neon.h)$(CONFIG_ARM64),y) +ccflags-y += -I$(kbuild-dir)/compat/neon-arm/include +endif + +ifeq ($(CONFIG_X86_64),y) + ifeq ($(ssse3_instr),) + ssse3_instr := $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) + ccflags-y += $(ssse3_instr) + asflags-y += $(ssse3_instr) + endif + ifeq ($(avx_instr),) + avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) + ccflags-y += $(avx_instr) + asflags-y += $(avx_instr) + endif + ifeq ($(avx2_instr),) + avx2_instr := $(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) + ccflags-y += $(avx2_instr) + asflags-y += $(avx2_instr) + endif + ifeq ($(avx512_instr),) + avx512_instr := $(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) + ccflags-y += $(avx512_instr) + asflags-y += $(avx512_instr) + endif + ifeq ($(bmi2_instr),) + bmi2_instr :=$(call as-instr,mulx %rax$(comma)%rax$(comma)%rax,-DCONFIG_AS_BMI2=1) + ccflags-y += $(bmi2_instr) + asflags-y += $(bmi2_instr) + endif + ifeq ($(adx_instr),) + adx_instr :=$(call as-instr,adcx %rax$(comma)%rax,-DCONFIG_AS_ADX=1) + ccflags-y += $(adx_instr) + asflags-y += $(adx_instr) + endif +endif + +ifneq ($(shell grep -s -F "\#define LINUX_PACKAGE_ID \" Debian " "$(CURDIR)/include/generated/package.h"),) +ccflags-y += -DISDEBIAN +endif diff --git a/drivers/net/wireguard/compat/checksum/checksum_partial_compat.h b/drivers/net/wireguard/compat/checksum/checksum_partial_compat.h new file mode 100644 index 000000000000..3dfe5397a94f --- /dev/null +++ b/drivers/net/wireguard/compat/checksum/checksum_partial_compat.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <net/route.h> +#include <net/esp.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/ip6_checksum.h> + +#define IP6_MF 0x0001 +#define IP6_OFFSET 0xFFF8 +static inline int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, unsigned int max) +{ + if (skb_headlen(skb) >= len) + return 0; + if (max > skb->len) + max = skb->len; + if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) + return -ENOMEM; + if (skb_headlen(skb) < len) + return -EPROTO; + return 0; +} +#define MAX_IP_HDR_LEN 128 +static inline int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate) +{ + unsigned int off; + bool fragment; + int err; + fragment = false; + err = skb_maybe_pull_tail(skb, sizeof(struct iphdr), MAX_IP_HDR_LEN); + if (err < 0) + goto out; + if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) + fragment = true; + off = ip_hdrlen(skb); + err = -EPROTO; + if (fragment) + goto out; + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct tcphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); + break; + case IPPROTO_UDP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct udphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + udp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); + break; + default: + goto out; + } + err = 0; +out: + return err; +} +#define MAX_IPV6_HDR_LEN 256 +#define OPT_HDR(type, skb, off) \ + (type *)(skb_network_header(skb) + (off)) +static inline int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) +{ + int err; + u8 nexthdr; + unsigned int off; + unsigned int len; + bool fragment; + bool done; + fragment = false; + done = false; + off = sizeof(struct ipv6hdr); + err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + nexthdr = ipv6_hdr(skb)->nexthdr; + len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); + while (off <= len && !done) { + switch (nexthdr) { + case IPPROTO_DSTOPTS: + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: { + struct ipv6_opt_hdr *hp; + + err = skb_maybe_pull_tail(skb, off + sizeof(struct ipv6_opt_hdr), MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); + nexthdr = hp->nexthdr; + off += ipv6_optlen(hp); + break; + } + case IPPROTO_FRAGMENT: { + struct frag_hdr *hp; + err = skb_maybe_pull_tail(skb, off + sizeof(struct frag_hdr), MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct frag_hdr, skb, off); + if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) + fragment = true; + nexthdr = hp->nexthdr; + off += sizeof(struct frag_hdr); + break; + } + default: + done = true; + break; + } + } + err = -EPROTO; + if (!done || fragment) + goto out; + switch (nexthdr) { + case IPPROTO_TCP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct tcphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); + break; + case IPPROTO_UDP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct udphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + udp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); + break; + default: + goto out; + } + err = 0; +out: + return err; +} +static inline int skb_checksum_setup(struct sk_buff *skb, bool recalculate) +{ + int err; + switch (skb->protocol) { + case htons(ETH_P_IP): + err = skb_checksum_setup_ip(skb, recalculate); + break; + + case htons(ETH_P_IPV6): + err = skb_checksum_setup_ipv6(skb, recalculate); + break; + default: + err = -EPROTO; + break; + } + return err; +} diff --git a/drivers/net/wireguard/compat/compat-asm.h b/drivers/net/wireguard/compat/compat-asm.h new file mode 100644 index 000000000000..4e427e50e9c6 --- /dev/null +++ b/drivers/net/wireguard/compat/compat-asm.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_COMPATASM_H +#define _WG_COMPATASM_H + +#include <linux/linkage.h> +#include <linux/kconfig.h> +#include <linux/version.h> + +/* PaX compatibility */ +#if defined(RAP_PLUGIN) +#undef ENTRY +#define ENTRY RAP_ENTRY +#endif + +#if defined(__LINUX_ARM_ARCH__) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo + .macro ret\c, reg +#if __LINUX_ARM_ARCH__ < 6 + mov\c pc, \reg +#else + .ifeqs "\reg", "lr" + bx\c \reg + .else + mov\c pc, \reg + .endif +#endif + .endm + .endr +#endif + +#if defined(__LINUX_ARM_ARCH__) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) +#include <asm/assembler.h> +#define lspush push +#define lspull pull +#undef push +#undef pull +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) +#define SYM_FUNC_START ENTRY +#define SYM_FUNC_END ENDPROC +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) +#define blake2s_compress_ssse3 zinc_blake2s_compress_ssse3 +#define blake2s_compress_avx512 zinc_blake2s_compress_avx512 +#define poly1305_init_arm zinc_poly1305_init_arm +#define poly1305_blocks_arm zinc_poly1305_blocks_arm +#define poly1305_emit_arm zinc_poly1305_emit_arm +#define poly1305_blocks_neon zinc_poly1305_blocks_neon +#define poly1305_emit_neon zinc_poly1305_emit_neon +#define poly1305_init_mips zinc_poly1305_init_mips +#define poly1305_blocks_mips zinc_poly1305_blocks_mips +#define poly1305_emit_mips zinc_poly1305_emit_mips +#define poly1305_init_x86_64 zinc_poly1305_init_x86_64 +#define poly1305_blocks_x86_64 zinc_poly1305_blocks_x86_64 +#define poly1305_emit_x86_64 zinc_poly1305_emit_x86_64 +#define poly1305_emit_avx zinc_poly1305_emit_avx +#define poly1305_blocks_avx zinc_poly1305_blocks_avx +#define poly1305_blocks_avx2 zinc_poly1305_blocks_avx2 +#define poly1305_blocks_avx512 zinc_poly1305_blocks_avx512 +#define curve25519_neon zinc_curve25519_neon +#define hchacha20_ssse3 zinc_hchacha20_ssse3 +#define chacha20_ssse3 zinc_chacha20_ssse3 +#define chacha20_avx2 zinc_chacha20_avx2 +#define chacha20_avx512 zinc_chacha20_avx512 +#define chacha20_avx512vl zinc_chacha20_avx512vl +#define chacha20_mips zinc_chacha20_mips +#define chacha20_arm zinc_chacha20_arm +#define hchacha20_arm zinc_hchacha20_arm +#define chacha20_neon zinc_chacha20_neon +#endif + +#endif /* _WG_COMPATASM_H */ diff --git a/drivers/net/wireguard/compat/compat.h b/drivers/net/wireguard/compat/compat.h new file mode 100644 index 000000000000..9f6e725db531 --- /dev/null +++ b/drivers/net/wireguard/compat/compat.h @@ -0,0 +1,1118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_COMPAT_H +#define _WG_COMPAT_H + +#include <linux/kconfig.h> +#include <linux/version.h> +#include <linux/types.h> +#include <generated/utsrelease.h> + +#ifdef RHEL_MAJOR +#if RHEL_MAJOR == 7 +#define ISRHEL7 +#if RHEL_MINOR == 8 +#define ISCENTOS7 +#endif +#elif RHEL_MAJOR == 8 +#define ISRHEL8 +#if RHEL_MINOR == 2 +#define ISCENTOS8 +#endif +#endif +#endif +#ifdef UTS_UBUNTU_RELEASE_ABI +#if LINUX_VERSION_CODE == KERNEL_VERSION(3, 13, 11) +#define ISUBUNTU1404 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +#define ISUBUNTU1604 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +#define ISUBUNTU1804 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define ISUBUNTU1904 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) +#define ISUBUNTU1910 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) +#error "WireGuard requires Linux >= 3.10" +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +#error "WireGuard has been merged into Linux >= 5.6 and therefore this compatibility module is no longer required." +#endif + +#if defined(ISRHEL7) +#include <linux/skbuff.h> +#define headers_end headers_start +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) +#define headers_start data +#define headers_end data +#endif + +#include <linux/cache.h> +#include <linux/init.h> +#ifndef __ro_after_init +#define __ro_after_init __read_mostly +#endif + +#include <linux/compiler.h> +#ifndef READ_ONCE +#define READ_ONCE ACCESS_ONCE +#endif +#ifndef WRITE_ONCE +#ifdef ACCESS_ONCE_RW +#define WRITE_ONCE(p, v) (ACCESS_ONCE_RW(p) = (v)) +#else +#define WRITE_ONCE(p, v) (ACCESS_ONCE(p) = (v)) +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include "udp_tunnel/udp_tunnel_partial_compat.h" +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(DEBUG) && defined(net_dbg_ratelimited) +#undef net_dbg_ratelimited +#define net_dbg_ratelimited(fmt, ...) do { if (0) no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#include <linux/rcupdate.h> +#ifndef RCU_LOCKDEP_WARN +#define RCU_LOCKDEP_WARN(cond, message) rcu_lockdep_assert(!(cond), message) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(ISRHEL7) +#define ipv6_dst_lookup(a, b, c, d) ipv6_dst_lookup(b, c, d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 83) +#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup_flow(b, c, d) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) && !defined(ISUBUNTU1904)) || (!defined(ISRHEL8) && !defined(ISDEBIAN) && !defined(ISUBUNTU1804) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 119) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 181) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 224) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 224) && !defined(ISUBUNTU1604) && (!defined(ISRHEL7) || defined(ISCENTOS7))) +#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup(a, b, &dst, c) + (void *)0 ?: dst +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISRHEL7) +#include <net/ipv6.h> +struct ipv6_stub_type { + void *udpv6_encap_enable; + int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); +}; +static const struct ipv6_stub_type ipv6_stub_impl = { + .udpv6_encap_enable = (void *)1, + .ipv6_dst_lookup = ip6_dst_lookup +}; +static const struct ipv6_stub_type *ipv6_stub = &ipv6_stub_impl; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISRHEL7) +#include <net/addrconf.h> +static inline bool ipv6_mod_enabled(void) +{ + return ipv6_stub != NULL && ipv6_stub->udpv6_encap_enable != NULL; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) && !defined(ISRHEL7) +#include <linux/skbuff.h> +static inline void skb_reset_tc(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + skb->tc_verd = 0; +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#include <linux/random.h> +#include <linux/siphash.h> +static inline u32 __compat_get_random_u32(void) +{ + static siphash_key_t key; + static u32 counter = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + static bool has_seeded = false; + if (unlikely(!has_seeded)) { + get_random_bytes(&key, sizeof(key)); + has_seeded = true; + } +#else + get_random_once(&key, sizeof(key)); +#endif + return siphash_2u32(counter++, get_random_int(), &key); +} +#define get_random_u32 __compat_get_random_u32 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(ISRHEL7) +static inline void netif_keep_dst(struct net_device *dev) +{ + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; +} +#define COMPAT_CANNOT_USE_CSUM_LEVEL +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#include <linux/netdevice.h> +#ifndef netdev_alloc_pcpu_stats +#define pcpu_sw_netstats pcpu_tstats +#endif +#ifndef netdev_alloc_pcpu_stats +#define netdev_alloc_pcpu_stats alloc_percpu +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(ISRHEL7) +#include <linux/netdevice.h> +#ifndef netdev_alloc_pcpu_stats +#define netdev_alloc_pcpu_stats(type) \ +({ \ + typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ + if (pcpu_stats) { \ + int __cpu; \ + for_each_possible_cpu(__cpu) { \ + typeof(type) *stat; \ + stat = per_cpu_ptr(pcpu_stats, __cpu); \ + u64_stats_init(&stat->syncp); \ + } \ + } \ + pcpu_stats; \ +}) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#include "checksum/checksum_partial_compat.h" +static inline void *__compat_pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) +{ + if (tail != skb) { + skb->data_len += len; + skb->len += len; + } + return skb_put(tail, len); +} +#define pskb_put __compat_pskb_put +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(ISRHEL7) +#include <net/xfrm.h> +static inline void skb_scrub_packet(struct sk_buff *skb, bool xnet) +{ +#ifdef CONFIG_CAVIUM_OCTEON_IPFWD_OFFLOAD + memset(&skb->cvm_info, 0, sizeof(skb->cvm_info)); + skb->cvm_reserved = 0; +#endif + skb->tstamp.tv64 = 0; + skb->pkt_type = PACKET_HOST; + skb->skb_iif = 0; + skb_dst_drop(skb); + secpath_reset(skb); + nf_reset(skb); + nf_reset_trace(skb); + if (!xnet) + return; + skb_orphan(skb); + skb->mark = 0; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) +#define skb_scrub_packet(a, b) skb_scrub_packet(a) +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 63) || defined(ISUBUNTU1404)) && !defined(ISRHEL7) +#include <linux/random.h> +static inline u32 __compat_prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64)prandom_u32() * ep_ro) >> 32); +} +#define prandom_u32_max __compat_prandom_u32_max +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include <linux/kernel.h> +#ifndef U8_MAX +#define U8_MAX ((u8)~0U) +#endif +#ifndef S8_MAX +#define S8_MAX ((s8)(U8_MAX >> 1)) +#endif +#ifndef S8_MIN +#define S8_MIN ((s8)(-S8_MAX - 1)) +#endif +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif +#ifndef S16_MAX +#define S16_MAX ((s16)(U16_MAX >> 1)) +#endif +#ifndef S16_MIN +#define S16_MIN ((s16)(-S16_MAX - 1)) +#endif +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif +#ifndef S32_MAX +#define S32_MAX ((s32)(U32_MAX >> 1)) +#endif +#ifndef S32_MIN +#define S32_MIN ((s32)(-S32_MAX - 1)) +#endif +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif +#ifndef S64_MAX +#define S64_MAX ((s64)(U64_MAX >> 1)) +#endif +#ifndef S64_MIN +#define S64_MIN ((s64)(-S64_MAX - 1)) +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 3) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 35) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 24) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) && !defined(ISUBUNTU1404)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 33) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 60) && !defined(ISRHEL7)) +static inline void memzero_explicit(void *s, size_t count) +{ + memset(s, 0, count); + barrier(); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(ISRHEL7) +static const struct in6_addr __compat_in6addr_any = IN6ADDR_ANY_INIT; +#define in6addr_any __compat_in6addr_any +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) +#include <linux/completion.h> +#include <linux/random.h> +#include <linux/errno.h> +struct rng_initializer { + struct completion done; + struct random_ready_callback cb; +}; +static inline void rng_initialized_callback(struct random_ready_callback *cb) +{ + complete(&container_of(cb, struct rng_initializer, cb)->done); +} +static inline int wait_for_random_bytes(void) +{ + static bool rng_is_initialized = false; + int ret; + if (unlikely(!rng_is_initialized)) { + struct rng_initializer rng = { + .done = COMPLETION_INITIALIZER(rng.done), + .cb = { .owner = THIS_MODULE, .func = rng_initialized_callback } + }; + ret = add_random_ready_callback(&rng.cb); + if (!ret) { + ret = wait_for_completion_interruptible(&rng.done); + if (ret) { + del_random_ready_callback(&rng.cb); + return ret; + } + } else if (ret != -EALREADY) + return ret; + rng_is_initialized = true; + } + return 0; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +/* This is a disaster. Without this API, we really have no way of + * knowing if it's initialized. We just return that it has and hope + * for the best... */ +static inline int wait_for_random_bytes(void) +{ + return 0; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && !defined(ISRHEL8) +#include <linux/random.h> +#include <linux/slab.h> +struct rng_is_initialized_callback { + struct random_ready_callback cb; + atomic_t *rng_state; +}; +static inline void rng_is_initialized_callback(struct random_ready_callback *cb) +{ + struct rng_is_initialized_callback *rdy = container_of(cb, struct rng_is_initialized_callback, cb); + atomic_set(rdy->rng_state, 2); + kfree(rdy); +} +static inline bool rng_is_initialized(void) +{ + static atomic_t rng_state = ATOMIC_INIT(0); + + if (atomic_read(&rng_state) == 2) + return true; + + if (atomic_cmpxchg(&rng_state, 0, 1) == 0) { + int ret; + struct rng_is_initialized_callback *rdy = kmalloc(sizeof(*rdy), GFP_ATOMIC); + if (!rdy) { + atomic_set(&rng_state, 0); + return false; + } + rdy->cb.owner = THIS_MODULE; + rdy->cb.func = rng_is_initialized_callback; + rdy->rng_state = &rng_state; + ret = add_random_ready_callback(&rdy->cb); + if (ret) + kfree(rdy); + if (ret == -EALREADY) { + atomic_set(&rng_state, 2); + return true; + } else if (ret) + atomic_set(&rng_state, 0); + return false; + } + return false; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +/* This is a disaster. Without this API, we really have no way of + * knowing if it's initialized. We just return that it has and hope + * for the best... */ +static inline bool rng_is_initialized(void) +{ + return true; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) +static inline int get_random_bytes_wait(void *buf, int nbytes) +{ + int ret = wait_for_random_bytes(); + if (unlikely(ret)) + return ret; + get_random_bytes(buf, nbytes); + return 0; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(ISRHEL7) +#define system_power_efficient_wq system_unbound_wq +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) +#include <linux/ktime.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#include <linux/hrtimer.h> +#ifndef ktime_get_real_ts64 +#define timespec64 timespec +#define ktime_get_real_ts64 ktime_get_real_ts +#endif +#else +#include <linux/timekeeping.h> +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +static inline u64 __compat_jiffies64_to_nsecs(u64 j) +{ +#if !(NSEC_PER_SEC % HZ) + return (NSEC_PER_SEC / HZ) * j; +#else + return div_u64(j * HZ_TO_USEC_NUM, HZ_TO_USEC_DEN) * 1000; +#endif +} +#define jiffies64_to_nsecs __compat_jiffies64_to_nsecs +#endif +static inline u64 ktime_get_coarse_boottime_ns(void) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + return ktime_to_ns(ktime_get_boottime()); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 12) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 53) + return ktime_to_ns(ktime_mono_to_any(ns_to_ktime(jiffies64_to_nsecs(get_jiffies_64())), TK_OFFS_BOOT)); +#else + return ktime_to_ns(ktime_get_coarse_boottime()); +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include <linux/inetdevice.h> +static inline __be32 __compat_confirm_addr_indev(struct in_device *in_dev, __be32 dst, __be32 local, int scope) +{ + int same = 0; + __be32 addr = 0; + for_ifa(in_dev) { + if (!addr && (local == ifa->ifa_local || !local) && ifa->ifa_scope <= scope) { + addr = ifa->ifa_local; + if (same) + break; + } + if (!same) { + same = (!local || inet_ifa_match(local, ifa)) && (!dst || inet_ifa_match(dst, ifa)); + if (same && addr) { + if (local || !dst) + break; + if (inet_ifa_match(addr, ifa)) + break; + if (ifa->ifa_scope <= scope) { + addr = ifa->ifa_local; + break; + } + same = 0; + } + } + } endfor_ifa(in_dev); + return same ? addr : 0; +} +static inline __be32 __compat_inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope) +{ + __be32 addr = 0; + struct net_device *dev; + if (in_dev) + return __compat_confirm_addr_indev(in_dev, dst, local, scope); + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + in_dev = __in_dev_get_rcu(dev); + if (in_dev) { + addr = __compat_confirm_addr_indev(in_dev, dst, local, scope); + if (addr) + break; + } + } + rcu_read_unlock(); + return addr; +} +#define inet_confirm_addr __compat_inet_confirm_addr +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/slab.h> +static inline void *__compat_kvmalloc(size_t size, gfp_t flags) +{ + gfp_t kmalloc_flags = flags; + void *ret; + if (size > PAGE_SIZE) { + kmalloc_flags |= __GFP_NOWARN; + if (!(kmalloc_flags & __GFP_REPEAT) || (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + kmalloc_flags |= __GFP_NORETRY; + } + ret = kmalloc(size, kmalloc_flags); + if (ret || size <= PAGE_SIZE) + return ret; + return __vmalloc(size, flags, PAGE_KERNEL); +} +static inline void *__compat_kvzalloc(size_t size, gfp_t flags) +{ + return __compat_kvmalloc(size, flags | __GFP_ZERO); +} +#define kvmalloc __compat_kvmalloc +#define kvzalloc __compat_kvzalloc +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 41)) && !defined(ISUBUNTU1404) +#include <linux/vmalloc.h> +#include <linux/mm.h> +static inline void __compat_kvfree(const void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} +#define kvfree __compat_kvfree +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9) +#include <linux/netdevice.h> +#define priv_destructor destructor +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) +#define wg_newlink(a,b,c,d,e) wg_newlink(a,b,c,d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include <net/netlink.h> +#include <net/genetlink.h> +#define nlmsg_parse(a, b, c, d, e, f) nlmsg_parse(a, b, c, d, e) +#define nla_parse_nested(a, b, c, d, e) nla_parse_nested(a, b, c, d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(ISRHEL7) +static inline struct nlattr **genl_family_attrbuf(const struct genl_family *family) +{ + return family->attrbuf; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +#define PTR_ERR_OR_ZERO(p) PTR_RET(p) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) +#include <net/netlink.h> +#define nla_put_u64_64bit(a, b, c, d) nla_put_u64(a, b, c) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +#include <net/genetlink.h> +#ifndef GENL_UNS_ADMIN_PERM +#define GENL_UNS_ADMIN_PERM GENL_ADMIN_PERM +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(ISRHEL7) +#include <net/genetlink.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) +#define genl_register_family(a) genl_register_family_with_ops(a, genl_ops, ARRAY_SIZE(genl_ops)) +#define COMPAT_CANNOT_USE_CONST_GENL_OPS +#else +#define genl_register_family(a) genl_register_family_with_ops(a, genl_ops) +#endif +#define COMPAT_CANNOT_USE_GENL_NOPS +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 2) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 16) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 65) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 101) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 84) +#define __COMPAT_NETLINK_DUMP_BLOCK { \ + int ret; \ + skb->end -= nlmsg_total_size(sizeof(int)); \ + ret = wg_get_device_dump_real(skb, cb); \ + skb->end += nlmsg_total_size(sizeof(int)); \ + return ret; \ +} +#define __COMPAT_NETLINK_DUMP_OVERRIDE +#else +#define __COMPAT_NETLINK_DUMP_BLOCK return wg_get_device_dump_real(skb, cb); +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 8) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 25) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 87) +#define wg_get_device_dump(a, b) wg_get_device_dump_real(a, b); \ +static int wg_get_device_dump(a, b) { \ + struct wg_device *wg = (struct wg_device *)cb->args[0]; \ + if (!wg) { \ + int ret = wg_get_device_start(cb); \ + if (ret) \ + return ret; \ + } \ + __COMPAT_NETLINK_DUMP_BLOCK \ +} \ +static int wg_get_device_dump_real(a, b) +#define COMPAT_CANNOT_USE_NETLINK_START +#elif defined(__COMPAT_NETLINK_DUMP_OVERRIDE) +#define wg_get_device_dump(a, b) wg_get_device_dump_real(a, b); \ +static int wg_get_device_dump(a, b) { \ + __COMPAT_NETLINK_DUMP_BLOCK \ +} \ +static int wg_get_device_dump_real(a, b) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define COMPAT_CANNOT_USE_IN6_DEV_GET +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#define COMPAT_CANNOT_USE_IFF_NO_QUEUE +#endif + +#if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +#include <asm/user.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +#include <asm/xsave.h> +#include <asm/xcr.h> +static inline int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name) +{ + return boot_cpu_has(X86_FEATURE_XSAVE) && xgetbv(XCR_XFEATURE_ENABLED_MASK) & xfeatures_needed; +} +#endif +#ifndef XFEATURE_MASK_YMM +#define XFEATURE_MASK_YMM XSTATE_YMM +#endif +#ifndef XFEATURE_MASK_SSE +#define XFEATURE_MASK_SSE XSTATE_SSE +#endif +#ifndef XSTATE_AVX512 +#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) +#endif +#ifndef XFEATURE_MASK_AVX512 +#define XFEATURE_MASK_AVX512 XSTATE_AVX512 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && defined(CONFIG_X86_64) +/* This is incredibly dumb and reckless, but as it turns out, there's + * not really hardware Linux runs properly on that supports F but not BW + * and VL, so in practice this isn't so bad. Plus, this is compat layer, + * so the bar remains fairly low. + */ +#include <asm/cpufeature.h> +#ifndef X86_FEATURE_AVX512BW +#define X86_FEATURE_AVX512BW X86_FEATURE_AVX512F +#endif +#ifndef X86_FEATURE_AVX512VL +#define X86_FEATURE_AVX512VL X86_FEATURE_AVX512F +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) +struct __compat_dummy_container { char dev; }; +#define netdev_notifier_info net_device *)data); __attribute((unused)) char __compat_dummy_variable = ((struct __compat_dummy_container +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#define timer_setup(a, b, c) setup_timer(a, ((void (*)(unsigned long))b), ((unsigned long)a)) +#define from_timer(var, callback_timer, timer_fieldname) container_of(callback_timer, typeof(*var), timer_fieldname) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 3) +#define COMPAT_CANNOT_USE_AVX512 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +#include <net/genetlink.h> +#define genl_dump_check_consistent(a, b) genl_dump_check_consistent(a, b, &genl_family) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && !defined(ISRHEL7) +static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned int len) +{ + void *tmp = skb_put(skb, len); + memcpy(tmp, data, len); + return tmp; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && !defined(ISRHEL7) +#define napi_complete_done(n, work_done) napi_complete(n) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) +#include <linux/netdevice.h> +/* NAPI_STATE_SCHED gets set by netif_napi_add anyway, so this is safe. + * Also, kernels without NAPI_STATE_NO_BUSY_POLL don't have a call to + * napi_hash_add inside of netif_napi_add. + */ +#define NAPI_STATE_NO_BUSY_POLL NAPI_STATE_SCHED +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include <linux/atomic.h> +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) ({ int __compat_p1 = atomic_read(v); smp_rmb(); __compat_p1; }) +#endif +#ifndef atomic_set_release +#define atomic_set_release(v, i) ({ smp_wmb(); atomic_set(v, i); }) +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#include <linux/atomic.h> +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif +#ifndef atomic_set_release +#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + __le32_to_cpus(buf); + buf++; + } +} +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le32s(buf); + buf++; + } +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include <crypto/algapi.h> +static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, + unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s1 = (unsigned long *)src1; + unsigned long *s2 = (unsigned long *)src2; + + while (size > 0) { + *d++ = *s1++ ^ *s2++; + size -= sizeof(unsigned long); + } + } else { + if (unlikely(dst != src1)) + memmove(dst, src1, size); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) + crypto_xor(dst, src2, size); +#else + __crypto_xor(dst, src2, size); +#endif + } +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define read_cpuid_part() read_cpuid_part_number() +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && !defined(ISRHEL7) +#define hlist_add_behind(a, b) hlist_add_after(b, a) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) +#define totalram_pages() totalram_pages +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) +struct __kernel_timespec { + int64_t tv_sec, tv_nsec; +}; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) +#include <linux/time64.h> +#ifdef __kernel_timespec +#undef __kernel_timespec +struct __kernel_timespec { + int64_t tv_sec, tv_nsec; +}; +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include <linux/kernel.h> +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +#include <linux/skbuff.h> +#define skb_probe_transport_header(a) skb_probe_transport_header(a, 0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && !defined(ISRHEL7) +#define ignore_df local_df +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +/* Note that all intentional uses of the non-_bh variety need to explicitly + * undef these, conditionalized on COMPAT_CANNOT_DEPRECIATE_BH_RCU. + */ +#include <linux/rcupdate.h> +static __always_inline void old_synchronize_rcu(void) +{ + synchronize_rcu(); +} +static __always_inline void old_call_rcu(void *a, void *b) +{ + call_rcu(a, b); +} +static __always_inline void old_rcu_barrier(void) +{ + rcu_barrier(); +} +#ifdef synchronize_rcu +#undef synchronize_rcu +#endif +#ifdef call_rcu +#undef call_rcu +#endif +#ifdef rcu_barrier +#undef rcu_barrier +#endif +#define synchronize_rcu synchronize_rcu_bh +#define call_rcu call_rcu_bh +#define rcu_barrier rcu_barrier_bh +#define COMPAT_CANNOT_DEPRECIATE_BH_RCU +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 10) && !defined(ISRHEL8) +static inline void skb_mark_not_on_list(struct sk_buff *skb) +{ + skb->next = NULL; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && !defined(ISRHEL8) +#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_UNSPEC, .len = _len } +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && !defined(ISRHEL8) +#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_UNSPEC, .len = _len } +#define COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && defined(__aarch64__) +#define cpu_have_named_feature(name) (elf_hwcap & (HWCAP_ ## name)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +#include <linux/stddef.h> +#ifndef offsetofend +#define offsetofend(TYPE, MEMBER) (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) +#define genl_dumpit_info(cb) ({ \ + struct { struct nlattr **attrs; } *a = (void *)((u8 *)cb->args + offsetofend(struct dump_ctx, next_allowedip)); \ + BUILD_BUG_ON(sizeof(cb->args) < offsetofend(struct dump_ctx, next_allowedip) + sizeof(*a)); \ + a->attrs = genl_family_attrbuf(&genl_family); \ + if (nlmsg_parse(cb->nlh, GENL_HDRLEN + genl_family.hdrsize, a->attrs, genl_family.maxattr, device_policy, NULL) < 0) \ + memset(a->attrs, 0, (genl_family.maxattr + 1) * sizeof(struct nlattr *)); \ + a; \ +}) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) +#include <linux/skbuff.h> +#ifndef skb_list_walk_safe +#define skb_list_walk_safe(first, skb, next) \ + for ((skb) = (first), (next) = (skb) ? (skb)->next : NULL; (skb); \ + (skb) = (next), (next) = (skb) ? (skb)->next : NULL) +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) +#define blake2s_init zinc_blake2s_init +#define blake2s_init_key zinc_blake2s_init_key +#define blake2s_update zinc_blake2s_update +#define blake2s_final zinc_blake2s_final +#define blake2s_hmac zinc_blake2s_hmac +#define chacha20 zinc_chacha20 +#define hchacha20 zinc_hchacha20 +#define chacha20poly1305_encrypt zinc_chacha20poly1305_encrypt +#define chacha20poly1305_encrypt_sg_inplace zinc_chacha20poly1305_encrypt_sg_inplace +#define chacha20poly1305_decrypt zinc_chacha20poly1305_decrypt +#define chacha20poly1305_decrypt_sg_inplace zinc_chacha20poly1305_decrypt_sg_inplace +#define xchacha20poly1305_encrypt zinc_xchacha20poly1305_encrypt +#define xchacha20poly1305_decrypt zinc_xchacha20poly1305_decrypt +#define curve25519 zinc_curve25519 +#define curve25519_generate_secret zinc_curve25519_generate_secret +#define curve25519_generate_public zinc_curve25519_generate_public +#define poly1305_init zinc_poly1305_init +#define poly1305_update zinc_poly1305_update +#define poly1305_final zinc_poly1305_final +#define blake2s_compress_ssse3 zinc_blake2s_compress_ssse3 +#define blake2s_compress_avx512 zinc_blake2s_compress_avx512 +#define poly1305_init_arm zinc_poly1305_init_arm +#define poly1305_blocks_arm zinc_poly1305_blocks_arm +#define poly1305_emit_arm zinc_poly1305_emit_arm +#define poly1305_blocks_neon zinc_poly1305_blocks_neon +#define poly1305_emit_neon zinc_poly1305_emit_neon +#define poly1305_init_mips zinc_poly1305_init_mips +#define poly1305_blocks_mips zinc_poly1305_blocks_mips +#define poly1305_emit_mips zinc_poly1305_emit_mips +#define poly1305_init_x86_64 zinc_poly1305_init_x86_64 +#define poly1305_blocks_x86_64 zinc_poly1305_blocks_x86_64 +#define poly1305_emit_x86_64 zinc_poly1305_emit_x86_64 +#define poly1305_emit_avx zinc_poly1305_emit_avx +#define poly1305_blocks_avx zinc_poly1305_blocks_avx +#define poly1305_blocks_avx2 zinc_poly1305_blocks_avx2 +#define poly1305_blocks_avx512 zinc_poly1305_blocks_avx512 +#define curve25519_neon zinc_curve25519_neon +#define hchacha20_ssse3 zinc_hchacha20_ssse3 +#define chacha20_ssse3 zinc_chacha20_ssse3 +#define chacha20_avx2 zinc_chacha20_avx2 +#define chacha20_avx512 zinc_chacha20_avx512 +#define chacha20_avx512vl zinc_chacha20_avx512vl +#define chacha20_mips zinc_chacha20_mips +#define chacha20_arm zinc_chacha20_arm +#define hchacha20_arm zinc_hchacha20_arm +#define chacha20_neon zinc_chacha20_neon +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && !defined(ISRHEL7) +#include <linux/skbuff.h> +static inline int skb_ensure_writable(struct sk_buff *skb, int write_len) +{ + if (!pskb_may_pull(skb, write_len)) + return -ENOMEM; + + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + + return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) +#if IS_ENABLED(CONFIG_NF_NAT) +#include <linux/ip.h> +#include <linux/icmpv6.h> +#include <net/ipv6.h> +#include <net/icmp.h> +#include <net/netfilter/nf_conntrack.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +#include <net/netfilter/nf_nat_core.h> +#endif +static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + __be32 orig_ip; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + icmp_send(skb_in, type, code, info); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct iphdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct iphdr)))) + goto out; + + orig_ip = ip_hdr(skb_in)->saddr; + ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; + icmp_send(skb_in, type, code, info); + ip_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; + struct in6_addr orig_ip; + struct nf_conn *ct; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + icmpv6_send(skb_in, type, code, info); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct ipv6hdr)))) + goto out; + + orig_ip = ipv6_hdr(skb_in)->saddr; + ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; + icmpv6_send(skb_in, type, code, info); + ipv6_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +#else +#define __compat_icmp_ndo_send icmp_send +#define __compat_icmpv6_ndo_send icmpv6_send +#endif +#define icmp_ndo_send __compat_icmp_ndo_send +#define icmpv6_ndo_send __compat_icmpv6_ndo_send +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +#define COMPAT_CANNOT_USE_MAX_MTU +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 14) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 29) && !defined(ISUBUNTU1910) && !defined(ISUBUNTU1904) && (!defined(ISRHEL8) || defined(ISCENTOS8))) +#include <linux/skbuff.h> +#include <net/sch_generic.h> +static inline void skb_reset_redirect(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_SCHED + skb_reset_tc(skb); +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#define skb_get_hash skb_get_rxhash +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(ISRHEL7) +#define hash rxhash +#define l4_hash l4_rxhash +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && !defined(ISRHEL7) +#define sw_hash ignore_df = 0; skb->nf_trace = skb->ooo_okay +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) +#define pre_exit exit +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) +#include <linux/skbuff.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +static inline __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) +{ + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && + ip_hdr(skb)->version == 4) + return htons(ETH_P_IP); + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && + ipv6_hdr(skb)->version == 6) + return htons(ETH_P_IPV6); + return 0; +} +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) || defined(ISRHEL8) +static const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; +#else +#define header_ops hard_header_len +#define ip_tunnel_header_ops *(char *)0 - (char *)0 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) +#define kfree_sensitive(a) kzfree(a) +#endif + +#if defined(ISUBUNTU1604) || defined(ISRHEL7) +#include <linux/siphash.h> +#ifndef _WG_LINUX_SIPHASH_H +#define hsiphash_1u32 siphash_1u32 +#define hsiphash_2u32 siphash_2u32 +#define hsiphash_3u32 siphash_3u32 +#define hsiphash_key_t siphash_key_t +#endif +#endif + +#ifdef CONFIG_VE +#include <linux/netdev_features.h> +#ifdef NETIF_F_VIRTUAL +#undef NETIF_F_LLTX +#define NETIF_F_LLTX (__NETIF_F(LLTX) | __NETIF_F(VIRTUAL)) +#endif +#endif + +/* https://github.com/ClangBuiltLinux/linux/issues/7 */ +#if defined( __clang__) && (!defined(CONFIG_CLANG_VERSION) || CONFIG_CLANG_VERSION < 80000) +#include <linux/bug.h> +#undef BUILD_BUG_ON +#define BUILD_BUG_ON(x) +#endif + +/* PaX compatibility */ +#ifdef CONSTIFY_PLUGIN +#include <linux/cache.h> +#undef __read_mostly +#define __read_mostly +#endif +#if (defined(RAP_PLUGIN) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +#include <linux/timer.h> +#define wg_expired_retransmit_handshake(a) wg_expired_retransmit_handshake(unsigned long timer) +#define wg_expired_send_keepalive(a) wg_expired_send_keepalive(unsigned long timer) +#define wg_expired_new_handshake(a) wg_expired_new_handshake(unsigned long timer) +#define wg_expired_zero_key_material(a) wg_expired_zero_key_material(unsigned long timer) +#define wg_expired_send_persistent_keepalive(a) wg_expired_send_persistent_keepalive(unsigned long timer) +#undef timer_setup +#define timer_setup(a, b, c) setup_timer(a, ((void (*)(unsigned long))b), ((unsigned long)a)) +#undef from_timer +#define from_timer(var, callback_timer, timer_fieldname) container_of((struct timer_list *)callback_timer, typeof(*var), timer_fieldname) +#endif + +#endif /* _WG_COMPAT_H */ diff --git a/drivers/net/wireguard/compat/dst_cache/dst_cache.c b/drivers/net/wireguard/compat/dst_cache/dst_cache.c new file mode 100644 index 000000000000..7ec22f768a8f --- /dev/null +++ b/drivers/net/wireguard/compat/dst_cache/dst_cache.c @@ -0,0 +1,175 @@ +/* + * net/core/dst_cache.c - dst entry cache + * + * Copyright (c) 2016 Paolo Abeni <pabeni@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/percpu.h> +#include <net/dst_cache.h> +#include <net/route.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 50) +static inline u32 rt6_get_cookie(const struct rt6_info *rt) +{ + if ((unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from)) + rt = (struct rt6_info *)(rt->dst.from); + + return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; +} +#endif +#endif +#include <uapi/linux/in.h> + +struct dst_cache_pcpu { + unsigned long refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache, + struct dst_entry *dst, u32 cookie) +{ + dst_release(dst_cache->dst); + if (dst) + dst_hold(dst); + + dst_cache->cookie = cookie; + dst_cache->dst = dst; +} + +static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache, + struct dst_cache_pcpu *idst) +{ + struct dst_entry *dst; + + dst = idst->dst; + if (!dst) + goto fail; + + /* the cache already hold a dst reference; it can't go away */ + dst_hold(dst); + + if (unlikely(!time_after(idst->refresh_ts, dst_cache->reset_ts) || + (dst->obsolete && !dst->ops->check(dst, idst->cookie)))) { + dst_cache_per_cpu_dst_set(idst, NULL, 0); + dst_release(dst); + goto fail; + } + return dst; + +fail: + idst->refresh_ts = jiffies; + return NULL; +} + +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache) +{ + if (!dst_cache->cache) + return NULL; + + return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache)); +} + +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr) +{ + struct dst_cache_pcpu *idst; + struct dst_entry *dst; + + if (!dst_cache->cache) + return NULL; + + idst = this_cpu_ptr(dst_cache->cache); + dst = dst_cache_per_cpu_get(dst_cache, idst); + if (!dst) + return NULL; + + *saddr = idst->in_saddr.s_addr; + return container_of(dst, struct rtable, dst); +} + +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr) +{ + struct dst_cache_pcpu *idst; + + if (!dst_cache->cache) + return; + + idst = this_cpu_ptr(dst_cache->cache); + dst_cache_per_cpu_dst_set(idst, dst, 0); + idst->in_saddr.s_addr = saddr; +} + +#if IS_ENABLED(CONFIG_IPV6) +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr) +{ + struct dst_cache_pcpu *idst; + + if (!dst_cache->cache) + return; + + idst = this_cpu_ptr(dst_cache->cache); + dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst, + rt6_get_cookie((struct rt6_info *)dst)); + idst->in6_saddr = *addr; +} + +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr) +{ + struct dst_cache_pcpu *idst; + struct dst_entry *dst; + + if (!dst_cache->cache) + return NULL; + + idst = this_cpu_ptr(dst_cache->cache); + dst = dst_cache_per_cpu_get(dst_cache, idst); + if (!dst) + return NULL; + + *saddr = idst->in6_saddr; + return dst; +} +#endif + +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) + BUG_ON(gfp & GFP_ATOMIC); + dst_cache->cache = alloc_percpu(struct dst_cache_pcpu); +#else + dst_cache->cache = alloc_percpu_gfp(struct dst_cache_pcpu, + gfp | __GFP_ZERO); +#endif + if (!dst_cache->cache) + return -ENOMEM; + + dst_cache_reset(dst_cache); + return 0; +} + +void dst_cache_destroy(struct dst_cache *dst_cache) +{ + int i; + + if (!dst_cache->cache) + return; + + for_each_possible_cpu(i) + dst_release(per_cpu_ptr(dst_cache->cache, i)->dst); + + free_percpu(dst_cache->cache); +} diff --git a/drivers/net/wireguard/compat/dst_cache/include/net/dst_cache.h b/drivers/net/wireguard/compat/dst_cache/include/net/dst_cache.h new file mode 100644 index 000000000000..48021c0d6be1 --- /dev/null +++ b/drivers/net/wireguard/compat/dst_cache/include/net/dst_cache.h @@ -0,0 +1,97 @@ +#ifndef _WG_NET_DST_CACHE_H +#define _WG_NET_DST_CACHE_H + +#include <linux/jiffies.h> +#include <net/dst.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#endif + +struct dst_cache { + struct dst_cache_pcpu __percpu *cache; + unsigned long reset_ts; +}; + +/** + * dst_cache_get - perform cache lookup + * @dst_cache: the cache + * + * The caller should use dst_cache_get_ip4() if it need to retrieve the + * source address to be used when xmitting to the cached dst. + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache); + +/** + * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr); + +/** + * dst_cache_set_ip4 - store the ipv4 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr); + +#if IS_ENABLED(CONFIG_IPV6) + +/** + * dst_cache_set_ip6 - store the ipv6 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr); + +/** + * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr); +#endif + +/** + * dst_cache_reset - invalidate the cache contents + * @dst_cache: the cache + * + * This do not free the cached dst to avoid races and contentions. + * the dst will be freed on later cache lookup. + */ +static inline void dst_cache_reset(struct dst_cache *dst_cache) +{ + dst_cache->reset_ts = jiffies; +} + +/** + * dst_cache_init - initialize the cache, allocating the required storage + * @dst_cache: the cache + * @gfp: allocation flags + */ +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp); + +/** + * dst_cache_destroy - empty the cache and free the allocated storage + * @dst_cache: the cache + * + * No synchronization is enforced: it must be called only when the cache + * is unused. + */ +void dst_cache_destroy(struct dst_cache *dst_cache); + +#endif /* _WG_NET_DST_CACHE_H */ diff --git a/drivers/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h b/drivers/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h new file mode 100644 index 000000000000..f3f9117bcb88 --- /dev/null +++ b/drivers/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h @@ -0,0 +1 @@ +#include <asm/i387.h> diff --git a/drivers/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h b/drivers/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h new file mode 100644 index 000000000000..35a6bc4da8ad --- /dev/null +++ b/drivers/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_INTEL_FAMILY_H +#define _ASM_X86_INTEL_FAMILY_H + +/* + * "Big Core" Processors (Branded as Core, Xeon, etc...) + * + * The "_X" parts are generally the EP and EX Xeons, or the + * "Extreme" ones, like Broadwell-E. + * + * Things ending in "2" are usually because we have no better + * name for them. There's no processor called "SILVERMONT2". + */ + +#define INTEL_FAM6_CORE_YONAH 0x0E + +#define INTEL_FAM6_CORE2_MEROM 0x0F +#define INTEL_FAM6_CORE2_MEROM_L 0x16 +#define INTEL_FAM6_CORE2_PENRYN 0x17 +#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D + +#define INTEL_FAM6_NEHALEM 0x1E +#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ +#define INTEL_FAM6_NEHALEM_EP 0x1A +#define INTEL_FAM6_NEHALEM_EX 0x2E + +#define INTEL_FAM6_WESTMERE 0x25 +#define INTEL_FAM6_WESTMERE_EP 0x2C +#define INTEL_FAM6_WESTMERE_EX 0x2F + +#define INTEL_FAM6_SANDYBRIDGE 0x2A +#define INTEL_FAM6_SANDYBRIDGE_X 0x2D +#define INTEL_FAM6_IVYBRIDGE 0x3A +#define INTEL_FAM6_IVYBRIDGE_X 0x3E + +#define INTEL_FAM6_HASWELL_CORE 0x3C +#define INTEL_FAM6_HASWELL_X 0x3F +#define INTEL_FAM6_HASWELL_ULT 0x45 +#define INTEL_FAM6_HASWELL_GT3E 0x46 + +#define INTEL_FAM6_BROADWELL_CORE 0x3D +#define INTEL_FAM6_BROADWELL_GT3E 0x47 +#define INTEL_FAM6_BROADWELL_X 0x4F +#define INTEL_FAM6_BROADWELL_XEON_D 0x56 + +#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E +#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E +#define INTEL_FAM6_SKYLAKE_X 0x55 +#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E +#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E + +/* "Small Core" Processors (Atom) */ + +#define INTEL_FAM6_ATOM_PINEVIEW 0x1C +#define INTEL_FAM6_ATOM_LINCROFT 0x26 +#define INTEL_FAM6_ATOM_PENWELL 0x27 +#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35 +#define INTEL_FAM6_ATOM_CEDARVIEW 0x36 +#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ +#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ +#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ +#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ +#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */ +#define INTEL_FAM6_ATOM_GOLDMONT 0x5C +#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ +#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A + +/* Xeon Phi */ + +#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ +#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */ + +#endif /* _ASM_X86_INTEL_FAMILY_H */ diff --git a/drivers/net/wireguard/compat/memneq/include.h b/drivers/net/wireguard/compat/memneq/include.h new file mode 100644 index 000000000000..2d18acd9b6c8 --- /dev/null +++ b/drivers/net/wireguard/compat/memneq/include.h @@ -0,0 +1,5 @@ +extern noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} diff --git a/drivers/net/wireguard/compat/memneq/memneq.c b/drivers/net/wireguard/compat/memneq/memneq.c new file mode 100644 index 000000000000..1c427d405537 --- /dev/null +++ b/drivers/net/wireguard/compat/memneq/memneq.c @@ -0,0 +1,170 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan <james@openvpn.net> + * Daniel Borkmann <dborkman@redhat.com> + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <crypto/algapi.h> + +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define COMPILER_OPTIMIZER_HIDE_VAR(var) asm("" : "=r" (var) : "0" (var)) + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= *(unsigned long *)a ^ *(unsigned long *)b; + COMPILER_OPTIMIZER_HIDE_VAR(neq); + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + COMPILER_OPTIMIZER_HIDE_VAR(neq); + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ + unsigned long neq = 0; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) { + neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } else if (sizeof(unsigned int) == 4) { + neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } + + return neq; +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/drivers/net/wireguard/compat/neon-arm/include/asm/neon.h b/drivers/net/wireguard/compat/neon-arm/include/asm/neon.h new file mode 100644 index 000000000000..980d831e201a --- /dev/null +++ b/drivers/net/wireguard/compat/neon-arm/include/asm/neon.h @@ -0,0 +1,7 @@ +#ifndef _ARCH_ARM_ASM_NEON +#define _ARCH_ARM_ASM_NEON +#define kernel_neon_begin() \ + BUILD_BUG_ON_MSG(1, "This kernel does not support ARM NEON") +#define kernel_neon_end() \ + BUILD_BUG_ON_MSG(1, "This kernel does not support ARM NEON") +#endif diff --git a/drivers/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h b/drivers/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h new file mode 100644 index 000000000000..9d514bac1388 --- /dev/null +++ b/drivers/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h @@ -0,0 +1,674 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Definitions for the 'struct ptr_ring' datastructure. + * + * Author: + * Michael S. Tsirkin <mst@redhat.com> + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This is a limited-size FIFO maintaining pointers in FIFO order, with + * one CPU producing entries and another consuming entries from a FIFO. + * + * This implementation tries to minimize cache-contention when there is a + * single producer and a single consumer CPU. + */ + +#ifndef _LINUX_PTR_RING_H +#define _LINUX_PTR_RING_H 1 + +#ifdef __KERNEL__ +#include <linux/spinlock.h> +#include <linux/cache.h> +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <asm/errno.h> +#endif + +struct ptr_ring { + int producer ____cacheline_aligned_in_smp; + spinlock_t producer_lock; + int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ + int consumer_tail; /* next entry to invalidate */ + spinlock_t consumer_lock; + /* Shared consumer/producer data */ + /* Read-only by both the producer and the consumer */ + int size ____cacheline_aligned_in_smp; /* max entries in queue */ + int batch; /* number of entries to consume in a batch */ + void **queue; +}; + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + * + * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: + * see e.g. ptr_ring_full. + */ +static inline bool __ptr_ring_full(struct ptr_ring *r) +{ + return r->queue[r->producer]; +} + +static inline bool ptr_ring_full(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_full(r); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_full_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must hold producer_lock. + * Callers are responsible for making sure pointer that is being queued + * points to a valid data. + */ +static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + if (unlikely(!r->size) || r->queue[r->producer]) + return -ENOSPC; + + /* Make sure the pointer we are storing points to a valid data. */ + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + smp_wmb(); + + WRITE_ONCE(r->queue[r->producer++], ptr); + if (unlikely(r->producer >= r->size)) + r->producer = 0; + return 0; +} + +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * consume in interrupt or BH context, you must disable interrupts/BH when + * calling this. + */ +static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +static inline void *__ptr_ring_peek(struct ptr_ring *r) +{ + if (likely(r->size)) + return READ_ONCE(r->queue[r->consumer_head]); + return NULL; +} + +/* + * Test ring empty status without taking any locks. + * + * NB: This is only safe to call if ring is never resized. + * + * However, if some other CPU consumes ring entries at the same time, the value + * returned is not guaranteed to be correct. + * + * In this case - to avoid incorrectly detecting the ring + * as empty - the CPU consuming the ring entries is responsible + * for either consuming all ring entries until the ring is empty, + * or synchronizing with some other CPU and causing it to + * re-test __ptr_ring_empty and/or consume the ring enteries + * after the synchronization point. + * + * Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + */ +static inline bool __ptr_ring_empty(struct ptr_ring *r) +{ + if (likely(r->size)) + return !r->queue[READ_ONCE(r->consumer_head)]; + return true; +} + +static inline bool ptr_ring_empty(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_empty(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_empty_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Must only be called after __ptr_ring_peek returned !NULL */ +static inline void __ptr_ring_discard_one(struct ptr_ring *r) +{ + /* Fundamentally, what we want to do is update consumer + * index and zero out the entry so producer can reuse it. + * Doing it naively at each consume would be as simple as: + * consumer = r->consumer; + * r->queue[consumer++] = NULL; + * if (unlikely(consumer >= r->size)) + * consumer = 0; + * r->consumer = consumer; + * but that is suboptimal when the ring is full as producer is writing + * out new entries in the same cache line. Defer these updates until a + * batch of entries has been consumed. + */ + /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty + * to work correctly. + */ + int consumer_head = r->consumer_head; + int head = consumer_head++; + + /* Once we have processed enough entries invalidate them in + * the ring all at once so producer can reuse their space in the ring. + * We also do this when we reach end of the ring - not mandatory + * but helps keep the implementation simple. + */ + if (unlikely(consumer_head - r->consumer_tail >= r->batch || + consumer_head >= r->size)) { + /* Zero out entries in the reverse order: this way we touch the + * cache line that producer might currently be reading the last; + * producer won't make progress and touch other cache lines + * besides the first one until we write out all entries. + */ + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = consumer_head; + } + if (unlikely(consumer_head >= r->size)) { + consumer_head = 0; + r->consumer_tail = 0; + } + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, consumer_head); +} + +static inline void *__ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + ptr = __ptr_ring_peek(r); + if (ptr) + __ptr_ring_discard_one(r); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + /* The READ_ONCE in __ptr_ring_peek doesn't imply a barrier on old kernels. */ + smp_read_barrier_depends(); +#endif + + return ptr; +} + +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i; + + for (i = 0; i < n; i++) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i] = ptr; + } + + return i; +} + +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * call this in interrupt or BH context, you must disable interrupts/BH when + * producing. + */ +static inline void *ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + spin_lock(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_irq(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_irq(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_irq(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_any(struct ptr_ring *r) +{ + unsigned long flags; + void *ptr; + + spin_lock_irqsave(&r->consumer_lock, flags); + ptr = __ptr_ring_consume(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ptr; +} + +static inline void *ptr_ring_consume_bh(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_bh(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_bh(&r->consumer_lock); + + return ptr; +} + +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Cast to structure type and call a function without discarding from FIFO. + * Function must return a value. + * Callers must take consumer_lock. + */ +#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) + +#define PTR_RING_PEEK_CALL(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_BH(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + unsigned long __PTR_RING_PEEK_CALL_f;\ + \ + spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See + * documentation for vmalloc for which of them are legal. + */ +static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) +{ + if (size > KMALLOC_MAX_SIZE / sizeof(void *)) + return NULL; + return kvmalloc(size * sizeof(void *), gfp | __GFP_ZERO); +} + +static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) +{ + r->size = size; + r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); + /* We need to set batch at least to 1 to make logic + * in __ptr_ring_discard_one work correctly. + * Batching too much (because ring is small) would cause a lot of + * burstiness. Needs tuning, for now disable batching. + */ + if (r->batch > r->size / 2 || !r->batch) + r->batch = 1; +} + +static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +{ + r->queue = __ptr_ring_init_queue_alloc(size, gfp); + if (!r->queue) + return -ENOMEM; + + __ptr_ring_set_size(r, size); + r->producer = r->consumer_head = r->consumer_tail = 0; + spin_lock_init(&r->producer_lock); + spin_lock_init(&r->consumer_lock); + + return 0; +} + +/* + * Return entries into ring. Destroy entries that don't fit. + * + * Note: this is expected to be a rare slow path operation. + * + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, + void (*destroy)(void *)) +{ + unsigned long flags; + int head; + + spin_lock_irqsave(&r->consumer_lock, flags); + spin_lock(&r->producer_lock); + + if (!r->size) + goto done; + + /* + * Clean out buffered entries (for simplicity). This way following code + * can test entries for NULL and if not assume they are valid. + */ + head = r->consumer_head - 1; + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = r->consumer_head; + + /* + * Go over entries in batch, start moving head back and copy entries. + * Stop when we run into previously unconsumed entries. + */ + while (n) { + head = r->consumer_head - 1; + if (head < 0) + head = r->size - 1; + if (r->queue[head]) { + /* This batch entry will have to be destroyed. */ + goto done; + } + r->queue[head] = batch[--n]; + r->consumer_tail = head; + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, head); + } + +done: + /* Destroy all entries left in the batch. */ + while (n) + destroy(batch[--n]); + spin_unlock(&r->producer_lock); + spin_unlock_irqrestore(&r->consumer_lock, flags); +} + +static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, + int size, gfp_t gfp, + void (*destroy)(void *)) +{ + int producer = 0; + void **old; + void *ptr; + + while ((ptr = __ptr_ring_consume(r))) + if (producer < size) + queue[producer++] = ptr; + else if (destroy) + destroy(ptr); + + if (producer >= size) + producer = 0; + __ptr_ring_set_size(r, size); + r->producer = producer; + r->consumer_head = 0; + r->consumer_tail = 0; + old = r->queue; + r->queue = queue; + + return old; +} + +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, + void (*destroy)(void *)) +{ + unsigned long flags; + void **queue = __ptr_ring_init_queue_alloc(size, gfp); + void **old; + + if (!queue) + return -ENOMEM; + + spin_lock_irqsave(&(r)->consumer_lock, flags); + spin_lock(&(r)->producer_lock); + + old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); + + spin_unlock(&(r)->producer_lock); + spin_unlock_irqrestore(&(r)->consumer_lock, flags); + + kvfree(old); + + return 0; +} + +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, + unsigned int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) +{ + unsigned long flags; + void ***queues; + int i; + + queues = kmalloc_array(nrings, sizeof(*queues), gfp); + if (!queues) + goto noqueues; + + for (i = 0; i < nrings; ++i) { + queues[i] = __ptr_ring_init_queue_alloc(size, gfp); + if (!queues[i]) + goto nomem; + } + + for (i = 0; i < nrings; ++i) { + spin_lock_irqsave(&(rings[i])->consumer_lock, flags); + spin_lock(&(rings[i])->producer_lock); + queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], + size, gfp, destroy); + spin_unlock(&(rings[i])->producer_lock); + spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); + } + + for (i = 0; i < nrings; ++i) + kvfree(queues[i]); + + kfree(queues); + + return 0; + +nomem: + while (--i >= 0) + kvfree(queues[i]); + + kfree(queues); + +noqueues: + return -ENOMEM; +} + +static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) +{ + void *ptr; + + if (destroy) + while ((ptr = ptr_ring_consume(r))) + destroy(ptr); + kvfree(r->queue); +} + +#endif /* _LINUX_PTR_RING_H */ diff --git a/drivers/net/wireguard/compat/simd-asm/include/asm/simd.h b/drivers/net/wireguard/compat/simd-asm/include/asm/simd.h new file mode 100644 index 000000000000..a975b38b5578 --- /dev/null +++ b/drivers/net/wireguard/compat/simd-asm/include/asm/simd.h @@ -0,0 +1,21 @@ +#ifndef _COMPAT_ASM_SIMD_H +#define _COMPAT_ASM_SIMD_H + +#if defined(CONFIG_X86_64) +#include <asm/fpu/api.h> +#endif + +static __must_check inline bool may_use_simd(void) +{ +#if defined(CONFIG_X86_64) + return irq_fpu_usable(); +#elif defined(CONFIG_ARM64) && defined(CONFIG_KERNEL_MODE_NEON) + return true; +#elif defined(CONFIG_ARM) && defined(CONFIG_KERNEL_MODE_NEON) + return !in_nmi() && !in_irq() && !in_serving_softirq(); +#else + return false; +#endif +} + +#endif diff --git a/drivers/net/wireguard/compat/simd/include/linux/simd.h b/drivers/net/wireguard/compat/simd/include/linux/simd.h new file mode 100644 index 000000000000..c75c72471e9e --- /dev/null +++ b/drivers/net/wireguard/compat/simd/include/linux/simd.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_SIMD_H +#define _WG_SIMD_H + +#include <linux/sched.h> +#include <asm/simd.h> +#if defined(CONFIG_X86_64) +#include <linux/version.h> +#include <asm/fpu/api.h> +#elif defined(CONFIG_KERNEL_MODE_NEON) +#include <asm/neon.h> +#endif + +typedef enum { + HAVE_NO_SIMD = 1 << 0, + HAVE_FULL_SIMD = 1 << 1, + HAVE_SIMD_IN_USE = 1 << 31 +} simd_context_t; + +#define DONT_USE_SIMD ((simd_context_t []){ HAVE_NO_SIMD }) + +static inline void simd_get(simd_context_t *ctx) +{ + *ctx = !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && may_use_simd() ? HAVE_FULL_SIMD : HAVE_NO_SIMD; +} + +static inline void simd_put(simd_context_t *ctx) +{ +#if defined(CONFIG_X86_64) + if (*ctx & HAVE_SIMD_IN_USE) + kernel_fpu_end(); +#elif defined(CONFIG_KERNEL_MODE_NEON) + if (*ctx & HAVE_SIMD_IN_USE) + kernel_neon_end(); +#endif + *ctx = HAVE_NO_SIMD; +} + +static inline bool simd_relax(simd_context_t *ctx) +{ +#ifdef CONFIG_PREEMPT + if ((*ctx & HAVE_SIMD_IN_USE) && need_resched()) { + simd_put(ctx); + simd_get(ctx); + return true; + } +#endif + return false; +} + +static __must_check inline bool simd_use(simd_context_t *ctx) +{ + if (!(*ctx & HAVE_FULL_SIMD)) + return false; + if (*ctx & HAVE_SIMD_IN_USE) + return true; +#if defined(CONFIG_X86_64) + kernel_fpu_begin(); +#elif defined(CONFIG_KERNEL_MODE_NEON) + kernel_neon_begin(); +#endif + *ctx |= HAVE_SIMD_IN_USE; + return true; +} + +#endif /* _WG_SIMD_H */ diff --git a/drivers/net/wireguard/compat/siphash/include/linux/siphash.h b/drivers/net/wireguard/compat/siphash/include/linux/siphash.h new file mode 100644 index 000000000000..1e5e337d15bf --- /dev/null +++ b/drivers/net/wireguard/compat/siphash/include/linux/siphash.h @@ -0,0 +1,140 @@ +/* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#ifndef _WG_LINUX_SIPHASH_H +#define _WG_LINUX_SIPHASH_H + +#include <linux/types.h> +#include <linux/kernel.h> + +#define SIPHASH_ALIGNMENT __alignof__(u64) +typedef struct { + u64 key[2]; +} siphash_key_t; + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); +#endif + +u64 siphash_1u64(const u64 a, const siphash_key_t *key); +u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +u64 siphash_3u64(const u64 a, const u64 b, const u64 c, + const siphash_key_t *key); +u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, + const siphash_key_t *key); +u64 siphash_1u32(const u32 a, const siphash_key_t *key); +u64 siphash_3u32(const u32 a, const u32 b, const u32 c, + const siphash_key_t *key); + +static inline u64 siphash_2u32(const u32 a, const u32 b, + const siphash_key_t *key) +{ + return siphash_1u64((u64)b << 32 | a, key); +} +static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, + const u32 d, const siphash_key_t *key) +{ + return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); +} + + +static inline u64 ___siphash_aligned(const __le64 *data, size_t len, + const siphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return siphash_1u32(le32_to_cpup((const __le32 *)data), key); + if (__builtin_constant_p(len) && len == 8) + return siphash_1u64(le64_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 16) + return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 24) + return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 32) + return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), le64_to_cpu(data[3]), + key); + return __siphash_aligned(data, len, key); +} + +/** + * siphash - compute 64-bit siphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the siphash key + */ +static inline u64 siphash(const void *data, size_t len, + const siphash_key_t *key) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + return __siphash_unaligned(data, len, key); +#endif + return ___siphash_aligned(data, len, key); +} + +#define HSIPHASH_ALIGNMENT __alignof__(unsigned long) +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key); +#endif + +u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); +u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, + const hsiphash_key_t *key); +u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, + const hsiphash_key_t *key); + +static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, + const hsiphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return hsiphash_1u32(le32_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 8) + return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 12) + return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 16) + return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), le32_to_cpu(data[3]), + key); + return __hsiphash_aligned(data, len, key); +} + +/** + * hsiphash - compute 32-bit hsiphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the hsiphash key + */ +static inline u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + return __hsiphash_unaligned(data, len, key); +#endif + return ___hsiphash_aligned(data, len, key); +} + +#endif /* _WG_LINUX_SIPHASH_H */ diff --git a/drivers/net/wireguard/compat/siphash/siphash.c b/drivers/net/wireguard/compat/siphash/siphash.c new file mode 100644 index 000000000000..58855328e6e0 --- /dev/null +++ b/drivers/net/wireguard/compat/siphash/siphash.c @@ -0,0 +1,539 @@ +/* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#include <linux/siphash.h> +#include <asm/unaligned.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#ifdef __LITTLE_ENDIAN +#define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) +#else +#define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) +#endif +#endif + +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 +#include <linux/dcache.h> +#include <asm/word-at-a-time.h> +#endif + +#define SIPROUND \ + do { \ + v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ + v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ + v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ + v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ + } while (0) + +#define PREAMBLE(len) \ + u64 v0 = 0x736f6d6570736575ULL; \ + u64 v1 = 0x646f72616e646f6dULL; \ + u64 v2 = 0x6c7967656e657261ULL; \ + u64 v3 = 0x7465646279746573ULL; \ + u64 b = ((u64)(len)) << 56; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define POSTAMBLE \ + v3 ^= b; \ + SIPROUND; \ + SIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +#endif + +/** + * siphash_1u64 - compute 64-bit siphash PRF value of a u64 + * @first: first u64 + * @key: the siphash key + */ +u64 siphash_1u64(const u64 first, const siphash_key_t *key) +{ + PREAMBLE(8) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + POSTAMBLE +} + +/** + * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 + * @first: first u64 + * @second: second u64 + * @key: the siphash key + */ +u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) +{ + PREAMBLE(16) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + POSTAMBLE +} + +/** + * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @key: the siphash key + */ +u64 siphash_3u64(const u64 first, const u64 second, const u64 third, + const siphash_key_t *key) +{ + PREAMBLE(24) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + POSTAMBLE +} + +/** + * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @forth: forth u64 + * @key: the siphash key + */ +u64 siphash_4u64(const u64 first, const u64 second, const u64 third, + const u64 forth, const siphash_key_t *key) +{ + PREAMBLE(32) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + v3 ^= forth; + SIPROUND; + SIPROUND; + v0 ^= forth; + POSTAMBLE +} + +u64 siphash_1u32(const u32 first, const siphash_key_t *key) +{ + PREAMBLE(4) + b |= first; + POSTAMBLE +} + +u64 siphash_3u32(const u32 first, const u32 second, const u32 third, + const siphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + PREAMBLE(12) + v3 ^= combined; + SIPROUND; + SIPROUND; + v0 ^= combined; + b |= third; + POSTAMBLE +} + +#if BITS_PER_LONG == 64 +/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for + * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3. + */ + +#define HSIPROUND SIPROUND +#define HPREAMBLE(len) PREAMBLE(len) +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +#endif + +/** + * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + b |= first; + HPOSTAMBLE +} + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(8) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(12) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + b |= third; + HPOSTAMBLE +} + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(16) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + combined = (u64)forth << 32 | third; + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +#else +#define HSIPROUND \ + do { \ + v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ + v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ + v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ + v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ + } while (0) + +#define HPREAMBLE(len) \ + u32 v0 = 0; \ + u32 v1 = 0; \ + u32 v2 = 0x6c796765U; \ + u32 v3 = 0x74656462U; \ + u32 b = ((u32)(len)) << 24; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return v1 ^ v3; + +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = le32_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = get_unaligned_le32(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +#endif + +/** + * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + v3 ^= first; + HSIPROUND; + v0 ^= first; + HPOSTAMBLE +} + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + HPREAMBLE(8) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + HPOSTAMBLE +} + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + HPREAMBLE(12) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + HPOSTAMBLE +} + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + HPREAMBLE(16) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + v3 ^= forth; + HSIPROUND; + v0 ^= forth; + HPOSTAMBLE +} +#endif diff --git a/drivers/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h b/drivers/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h new file mode 100644 index 000000000000..8999527d6952 --- /dev/null +++ b/drivers/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h @@ -0,0 +1,94 @@ +#ifndef _WG_NET_UDP_TUNNEL_H +#define _WG_NET_UDP_TUNNEL_H + +#include <net/ip_tunnels.h> +#include <net/udp.h> + +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ipv6.h> +#include <net/addrconf.h> +#endif + +struct udp_port_cfg { + u8 family; + + /* Used only for kernel-created sockets */ + union { + struct in_addr local_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr local_ip6; +#endif + }; + + union { + struct in_addr peer_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr peer_ip6; +#endif + }; + + __be16 local_udp_port; + __be16 peer_udp_port; + unsigned int use_udp_checksums:1, + use_udp6_tx_checksums:1, + use_udp6_rx_checksums:1, + ipv6_v6only:1; +}; + +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); +#else +static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + return 0; +} +#endif + +static inline int udp_sock_create(struct net *net, + struct udp_port_cfg *cfg, + struct socket **sockp) +{ + if (cfg->family == AF_INET) + return udp_sock_create4(net, cfg, sockp); + + if (cfg->family == AF_INET6) + return udp_sock_create6(net, cfg, sockp); + + return -EPFNOSUPPORT; +} + +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); + +struct udp_tunnel_sock_cfg { + void *sk_user_data; + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; +}; + +/* Setup the given (UDP) sock to receive UDP encapsulated packets */ +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *sock_cfg); + +/* Transmit the skb using UDP encapsulation. */ +void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, + __be16 df, __be16 src_port, __be16 dst_port, + bool xnet, bool nocheck); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, + struct net_device *dev, struct in6_addr *saddr, + struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck); +#endif + +void udp_tunnel_sock_release(struct socket *sock); + +#endif /* _WG_NET_UDP_TUNNEL_H */ diff --git a/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel.c b/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel.c new file mode 100644 index 000000000000..9b8770ae7b3f --- /dev/null +++ b/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel.c @@ -0,0 +1,394 @@ +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/socket.h> +#include <linux/udp.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <net/net_namespace.h> +#include <net/inet_common.h> +#include <net/udp.h> +#include <net/udp_tunnel.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) +#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) +#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) +#endif + +/* This is global so, uh, only one real call site... This is the kind of horrific hack you'd expect to see in compat code. */ +static udp_tunnel_encap_rcv_t encap_rcv = NULL; +static void __compat_sk_data_ready(struct sock *sk +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) + ,int unused_vulnerable_length_param +#endif + ) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + skb_orphan(skb); + sk_mem_reclaim(sk); + encap_rcv(sk, skb); + } +} + +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + int err; + struct socket *sock = NULL; + struct sockaddr_in udp_addr; + + err = __sock_create(net, AF_INET, SOCK_DGRAM, 0, &sock, 1); + if (err < 0) + goto error; + + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->local_ip; + udp_addr.sin_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->peer_ip; + udp_addr.sin_port = cfg->peer_udp_port; + err = kernel_connect(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr), 0); + if (err < 0) + goto error; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) + sock->sk->sk_no_check = !cfg->use_udp_checksums; +#else + sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; +#endif + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sock_release(sock); + } + *sockp = NULL; + return err; +} + +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + inet_sk(sock->sk)->mc_loop = 0; + encap_rcv = cfg->encap_rcv; + rcu_assign_sk_user_data(sock->sk, cfg->sk_user_data); + /* We force the cast in this awful way, due to various Android kernels + * backporting things stupidly. */ + *(void **)&sock->sk->sk_data_ready = (void *)__compat_sk_data_ready; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) +static inline __sum16 udp_v4_check(int len, __be32 saddr, + __be32 daddr, __wsum base) +{ + return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base); +} + +static void udp_set_csum(bool nocheck, struct sk_buff *skb, + __be32 saddr, __be32 daddr, int len) +{ + struct udphdr *uh = udp_hdr(skb); + + if (nocheck) + uh->check = 0; + else if (skb_is_gso(skb)) + uh->check = ~udp_v4_check(len, saddr, daddr, 0); + else if (skb_dst(skb) && skb_dst(skb)->dev && + (skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~udp_v4_check(len, saddr, daddr, 0); + } else { + __wsum csum; + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + uh->check = 0; + csum = skb_checksum(skb, 0, len, 0); + uh->check = udp_v4_check(len, saddr, daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +#endif + +static void __compat_fake_destructor(struct sk_buff *skb) +{ +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) +static void __compat_iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 proto, + __u8 tos, __u8 ttl, __be16 df, bool xnet) +{ + struct iphdr *iph; + struct pcpu_tstats *tstats = this_cpu_ptr(skb->dev->tstats); + + skb_scrub_packet(skb, xnet); + + skb->rxhash = 0; + skb_dst_set(skb, &rt->dst); + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + + /* Push down and install the IP header. */ + skb_push(skb, sizeof(struct iphdr)); + skb_reset_network_header(skb); + + iph = ip_hdr(skb); + + iph->version = 4; + iph->ihl = sizeof(struct iphdr) >> 2; + iph->frag_off = df; + iph->protocol = proto; + iph->tos = tos; + iph->daddr = dst; + iph->saddr = src; + iph->ttl = ttl; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 53) + __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); +#else + __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); +#endif + + iptunnel_xmit(skb, skb->dev); + u64_stats_update_begin(&tstats->syncp); + tstats->tx_bytes -= 8; + u64_stats_update_end(&tstats->syncp); +} +#define iptunnel_xmit __compat_iptunnel_xmit +#endif + +void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, + __be16 df, __be16 src_port, __be16 dst_port, + bool xnet, bool nocheck) +{ + struct udphdr *uh; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + struct net_device *dev = skb->dev; + int ret; +#endif + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + + udp_set_csum(nocheck, skb, src, dst, skb->len); + + if (!skb->sk) + skb->sk = sk; + if (!skb->destructor) + skb->destructor = __compat_fake_destructor; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + ret = +#endif + iptunnel_xmit( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + sk, +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + dev_net(dev), +#endif + rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) + , xnet +#endif + ); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + if (ret) + iptunnel_xmit_stats(ret - 8, &dev->stats, dev->tstats); +#endif +} + +void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sock_release(sock); +} + +#if IS_ENABLED(CONFIG_IPV6) +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/socket.h> +#include <linux/udp.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/in6.h> +#include <net/udp.h> +#include <net/udp_tunnel.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> +#include <net/ip6_tunnel.h> +#include <net/ip6_checksum.h> + +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + struct sockaddr_in6 udp6_addr; + int err; + struct socket *sock = NULL; + + err = __sock_create(net, AF_INET6, SOCK_DGRAM, 0, &sock, 1); + if (err < 0) + goto error; + + if (cfg->ipv6_v6only) { + int val = 1; + + err = kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, + (char *) &val, sizeof(val)); + if (err < 0) + goto error; + } + + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->peer_udp_port; + err = kernel_connect(sock, + (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr), 0); + } + if (err < 0) + goto error; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) + sock->sk->sk_no_check = !cfg->use_udp_checksums; +#else + udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums); + udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums); +#endif + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sock_release(sock); + } + *sockp = NULL; + return err; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) +static inline __sum16 udp_v6_check(int len, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __wsum base) +{ + return csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, base); +} +static void udp6_set_csum(bool nocheck, struct sk_buff *skb, + const struct in6_addr *saddr, + const struct in6_addr *daddr, int len) +{ + struct udphdr *uh = udp_hdr(skb); + + if (nocheck) + uh->check = 0; + else if (skb_is_gso(skb)) + uh->check = ~udp_v6_check(len, saddr, daddr, 0); + else if (skb_dst(skb) && skb_dst(skb)->dev && + (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) { + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~udp_v6_check(len, saddr, daddr, 0); + } else { + __wsum csum; + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + uh->check = 0; + csum = skb_checksum(skb, 0, len, 0); + uh->check = udp_v6_check(len, saddr, daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} +#endif + +int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, + struct net_device *dev, struct in6_addr *saddr, + struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck) +{ + struct udphdr *uh; + struct ipv6hdr *ip6h; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + + uh->len = htons(skb->len); + + skb_dst_set(skb, dst); + + udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); + + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6_flow_hdr(ip6h, prio, label); + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + if (!skb->sk) + skb->sk = sk; + if (!skb->destructor) + skb->destructor = __compat_fake_destructor; + + ip6tunnel_xmit(skb, dev); + return 0; +} +#endif diff --git a/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h b/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h new file mode 100644 index 000000000000..0605896e902f --- /dev/null +++ b/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) +#define udp_sock_create4 udp_sock_create +#define udp_sock_create6 udp_sock_create +#include <linux/socket.h> +#include <linux/if.h> +#include <linux/in.h> +#include <net/ip_tunnels.h> +#include <net/udp.h> +#include <net/inet_common.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <linux/in6.h> +#include <net/ipv6.h> +#include <net/addrconf.h> +#include <net/ip6_checksum.h> +#include <net/ip6_tunnel.h> +#endif +static inline void __compat_fake_destructor(struct sk_buff *skb) +{ +} +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); +struct udp_tunnel_sock_cfg { + void *sk_user_data; + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; +}; +/* This is global so, uh, only one real call site... This is the kind of horrific hack you'd expect to see in compat code. */ +static udp_tunnel_encap_rcv_t encap_rcv = NULL; +static void __compat_sk_data_ready(struct sock *sk) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + skb_orphan(skb); + sk_mem_reclaim(sk); + encap_rcv(sk, skb); + } +} +static inline void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + struct sock *sk = sock->sk; + inet_sk(sk)->mc_loop = 0; + encap_rcv = cfg->encap_rcv; + rcu_assign_sk_user_data(sk, cfg->sk_user_data); + sk->sk_data_ready = __compat_sk_data_ready; +} +static inline void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); +} +static inline int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, + __u8 tos, __u8 ttl, __be16 df, __be16 src_port, + __be16 dst_port, bool xnet) +{ + struct udphdr *uh; + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + udp_set_csum(sock->sk->sk_no_check_tx, skb, src, dst, skb->len); + return iptunnel_xmit(sock->sk, rt, skb, src, dst, IPPROTO_UDP, + tos, ttl, df, xnet); +} +#if IS_ENABLED(CONFIG_IPV6) +static inline int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, + struct sk_buff *skb, struct net_device *dev, + struct in6_addr *saddr, struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be16 src_port, + __be16 dst_port) +{ + struct udphdr *uh; + struct ipv6hdr *ip6h; + struct sock *sk = sock->sk; + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED + | IPSKB_REROUTED); + skb_dst_set(skb, dst); + udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, + &sk->sk_v6_daddr, skb->len); + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6_flow_hdr(ip6h, prio, htonl(0)); + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + ip6tunnel_xmit(skb, dev); + return 0; +} +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/udp.h> +#include <linux/skbuff.h> +#include <linux/if.h> +#include <net/udp_tunnel.h> +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__; ret__ = udp_tunnel_xmit_skb((b)->sk_socket, a, c, d, e, f, g, h, i, j, k); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#if IS_ENABLED(CONFIG_IPV6) +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) udp_tunnel6_xmit_skb((b)->sk_socket, a, c, d, e, f, g, h, j, k); +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/if.h> +#include <net/udp_tunnel.h> +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) +static inline void __compat_fake_destructor(struct sk_buff *skb) +{ +} +#endif +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__; if (!(c)->destructor) (c)->destructor = __compat_fake_destructor; if (!(c)->sk) (c)->sk = (b); ret__ = udp_tunnel_xmit_skb(a, c, d, e, f, g, h, i, j, k, l); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#if IS_ENABLED(CONFIG_IPV6) +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { if (!(c)->destructor) (c)->destructor = __compat_fake_destructor; if (!(c)->sk) (c)->sk = (b); udp_tunnel6_xmit_skb(a, c, d, e, f, g, h, j, k, l); } while(0) +#endif +#else + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/if.h> +#include <net/udp_tunnel.h> +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__ = udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) +#include <linux/if.h> +#include <net/udp_tunnel.h> +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__ = udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l); iptunnel_xmit_stats(ret__, &dev__->stats, dev__->tstats); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && IS_ENABLED(CONFIG_IPV6) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/if.h> +#include <net/udp_tunnel.h> +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, j, k, l) +#endif + +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/skbuff.h> +#include <linux/if.h> +#include <net/udp_tunnel.h> +struct __compat_udp_port_cfg { + u8 family; + union { + struct in_addr local_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr local_ip6; +#endif + }; + union { + struct in_addr peer_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr peer_ip6; +#endif + }; + __be16 local_udp_port; + __be16 peer_udp_port; + unsigned int use_udp_checksums:1, use_udp6_tx_checksums:1, use_udp6_rx_checksums:1, ipv6_v6only:1; +}; +static inline int __maybe_unused __compat_udp_sock_create(struct net *net, struct __compat_udp_port_cfg *cfg, struct socket **sockp) +{ + struct udp_port_cfg old_cfg = { + .family = cfg->family, + .local_ip = cfg->local_ip, +#if IS_ENABLED(CONFIG_IPV6) + .local_ip6 = cfg->local_ip6, +#endif + .peer_ip = cfg->peer_ip, +#if IS_ENABLED(CONFIG_IPV6) + .peer_ip6 = cfg->peer_ip6, +#endif + .local_udp_port = cfg->local_udp_port, + .peer_udp_port = cfg->peer_udp_port, + .use_udp_checksums = cfg->use_udp_checksums, + .use_udp6_tx_checksums = cfg->use_udp6_tx_checksums, + .use_udp6_rx_checksums = cfg->use_udp6_rx_checksums + }; + if (cfg->family == AF_INET) + return udp_sock_create4(net, &old_cfg, sockp); + +#if IS_ENABLED(CONFIG_IPV6) + if (cfg->family == AF_INET6) { + int ret; + int old_bindv6only; + struct net *nobns; + + if (cfg->ipv6_v6only) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) + nobns = &init_net; +#else + nobns = net; +#endif + /* Since udp_port_cfg only learned of ipv6_v6only in 4.3, we do this horrible + * hack here and set the sysctl variable temporarily to something that will + * set the right option for us in sock_create. It's super racey! */ + old_bindv6only = nobns->ipv6.sysctl.bindv6only; + nobns->ipv6.sysctl.bindv6only = 1; + } + ret = udp_sock_create6(net, &old_cfg, sockp); + if (cfg->ipv6_v6only) + nobns->ipv6.sysctl.bindv6only = old_bindv6only; + return ret; + } +#endif + return -EPFNOSUPPORT; +} +#define udp_port_cfg __compat_udp_port_cfg +#define udp_sock_create(a, b, c) __compat_udp_sock_create(a, b, c) +#endif |
