summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-udc16
-rw-r--r--Documentation/networking/filter.txt4
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Makefile1
-rw-r--r--arch/arm64/include/asm/insn.h38
-rw-r--r--arch/arm64/kernel/insn.c146
-rw-r--r--arch/arm64/kernel/vdso32/Makefile19
-rw-r--r--arch/arm64/net/bpf_jit.h21
-rw-r--r--arch/arm64/net/bpf_jit_comp.c63
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/net/bpf_jit_comp.c10
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/x86/net/bpf_jit_comp.c10
-rw-r--r--drivers/char/adsprpc.c114
-rw-r--r--drivers/char/diag/diag_dci.c2
-rw-r--r--drivers/char/diag/diag_memorydevice.c4
-rw-r--r--drivers/char/diag/diag_mux.c4
-rw-r--r--drivers/char/diag/diagchar_core.c6
-rw-r--r--drivers/char/diag/diagfwd_bridge.c4
-rw-r--r--drivers/char/diag/diagfwd_mhi.c4
-rw-r--r--drivers/gpu/msm/kgsl.c103
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c14
-rw-r--r--drivers/gpu/msm/kgsl_mmu.c4
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c8
-rw-r--r--drivers/hid/hid-core.c25
-rw-r--r--drivers/input/input.c10
-rw-r--r--drivers/iommu/io-pgtable-arm.c4
-rw-r--r--drivers/leds/leds-qpnp.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c2
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/wireguard/compat/Makefile.include9
-rw-r--r--drivers/net/wireguard/compat/compat-asm.h6
-rw-r--r--drivers/net/wireguard/compat/compat.h89
-rw-r--r--drivers/net/wireguard/compat/dst_cache/dst_cache.c2
-rw-r--r--drivers/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h3
-rw-r--r--drivers/net/wireguard/compat/siphash/include/linux/siphash.h14
-rw-r--r--drivers/net/wireguard/compat/siphash/siphash.c48
-rw-r--r--drivers/net/wireguard/compat/skb_array/include/linux/skb_array.h11
-rw-r--r--drivers/net/wireguard/compat/udp_tunnel/udp_tunnel.c12
-rw-r--r--drivers/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c797
-rw-r--r--drivers/net/wireguard/device.c42
-rw-r--r--drivers/net/wireguard/device.h9
-rw-r--r--drivers/net/wireguard/main.c8
-rw-r--r--drivers/net/wireguard/queueing.c7
-rw-r--r--drivers/net/wireguard/queueing.h2
-rw-r--r--drivers/net/wireguard/ratelimiter.c4
-rw-r--r--drivers/net/wireguard/receive.c39
-rw-r--r--drivers/net/wireguard/socket.c11
-rw-r--r--drivers/net/wireguard/version.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c4
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c2
-rw-r--r--drivers/soc/qcom/qdsp6v2/voice_svc.c2
-rw-r--r--drivers/soc/qcom/smem.c60
-rw-r--r--drivers/thermal/msm_thermal-dev.c4
-rw-r--r--drivers/tty/pty.c14
-rw-r--r--drivers/tty/tty_buffer.c48
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/core/sysfs.c2
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c48
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_util.c2
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/libfs.c6
-rw-r--r--fs/open.c6
-rw-r--r--fs/ramfs/inode.c2
-rw-r--r--fs/splice.c270
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/cgroup.h16
-rw-r--r--include/linux/filter.h9
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/if_vlan.h2
-rw-r--r--include/linux/lsm_hooks.h6
-rw-r--r--include/linux/netdevice.h111
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/security.h23
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/thread_info.h3
-rw-r--r--include/linux/tty_flip.h3
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/flow.h10
-rw-r--r--include/net/route.h6
-rw-r--r--include/sound/rawmidi.h1
-rw-r--r--include/trace/events/sched.h68
-rw-r--r--include/trace/events/writeback.h6
-rw-r--r--include/uapi/linux/bpf.h5
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--kernel/bpf/core.c74
-rw-r--r--kernel/bpf/inode.c2
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cgroup.c57
-rw-r--r--kernel/compat.c2
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/cpufreq_schedutil.c122
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/energy.c13
-rw-r--r--kernel/sched/fair.c782
-rw-r--r--kernel/sched/features.h26
-rw-r--r--kernel/sched/rt.c331
-rw-r--r--kernel/sched/sched.h37
-rw-r--r--kernel/sched/tune.c112
-rw-r--r--kernel/sched/walt.c2
-rw-r--r--kernel/softirq.c30
-rw-r--r--kernel/trace/blktrace.c3
-rw-r--r--kernel/trace/trace.c45
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--lib/string_helpers.c20
-rw-r--r--lib/test_bpf.c498
-rw-r--r--net/Kconfig14
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/core/dev.c35
-rw-r--r--net/core/ethtool.c55
-rw-r--r--net/core/filter.c21
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c2
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c10
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/mac80211/mesh.c11
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_plink.c32
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/nl80211.c26
-rw-r--r--net/wireless/sme.c34
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/xfrm/xfrm_state.c6
-rwxr-xr-xscripts/kconfig/lxdialog/check-lxdialog.sh2
-rw-r--r--scripts/kconfig/symbol.c8
-rw-r--r--security/pfe/pfk.c10
-rw-r--r--security/security.c17
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/selinux/include/xfrm.h2
-rw-r--r--security/selinux/xfrm.c8
-rw-r--r--sound/core/rawmidi.c32
-rw-r--r--sound/core/sound_oss.c13
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c3
-rw-r--r--sound/soc/codecs/wcd9xxx-mbhc.c3
-rw-r--r--sound/soc/codecs/wcd_cpe_services.c5
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c14
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c12
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c5
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c32
-rw-r--r--sound/soc/msm/qdsp6v2/q6afe.c7
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c19
-rw-r--r--sound/soc/msm/qdsp6v2/q6core.c3
-rw-r--r--sound/soc/msm/qdsp6v2/q6lsm.c9
-rw-r--r--sound/soc/msm/qdsp6v2/q6voice.c7
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh30
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile11
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/aarch64.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/arm.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/armeb.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/i686.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/m68k.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips64.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips64el.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mipsel.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/x86_64.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/init.c6
189 files changed, 3669 insertions, 1674 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-udc b/Documentation/ABI/testing/sysfs-class-udc
new file mode 100644
index 000000000000..1b9c566d0552
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-udc
@@ -0,0 +1,16 @@
+What: /sys/class/udc/<udc name>/device/usb_data_enabled
+Date: December 2020
+Contact: "Ray Chi" <raychi@google.com>
+Description:
+ The attribute can allow user space can check and modify
+ the value to enable or disable usb functionality. Therefore,
+ if the attritube is set to 0, USB host and USB peripheral
+ modes wouldn't be working.
+
+ Example:
+ Enable USB data functionality
+ # echo 1 > /sys/class/udc/.../device/usb_data_enabled
+
+ Disable USB data functionality
+ # echo 0 > /sys/class/udc/.../device/usb_data_enabled
+
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 96da119a47e7..4e3e22b053cc 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -907,6 +907,10 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
BPF_JSGE 0x70 /* eBPF only: signed '>=' */
BPF_CALL 0x80 /* eBPF only: function call */
BPF_EXIT 0x90 /* eBPF only: function return */
+ BPF_JLT 0xa0 /* eBPF only: unsigned '<' */
+ BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */
+ BPF_JSLT 0xc0 /* eBPF only: signed '<' */
+ BPF_JSLE 0xd0 /* eBPF only: signed '<=' */
So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
and eBPF. There are only two registers in classic BPF, so it means A += X.
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7f94cbb0c77b..1ed7dda5abde 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -40,7 +40,7 @@ config ARM
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
select HAVE_ARM_SMCCC if CPU_V7
- select HAVE_BPF_JIT
+ select HAVE_CBPF_JIT
select HAVE_CC_STACKPROTECTOR
select HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 28f09a00143b..5ce5a0e10c8f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -60,7 +60,7 @@ config ARM64
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_BPF_JIT
+ select HAVE_EBPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index adc88e707c09..d4b3753862aa 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -191,6 +191,7 @@ Image-dtb Image.gz-dtb: vmlinux scripts dtbs
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 1dbaa901d7e5..b1c4947f907c 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -80,6 +80,7 @@ enum aarch64_insn_register_type {
AARCH64_INSN_REGTYPE_RM,
AARCH64_INSN_REGTYPE_RD,
AARCH64_INSN_REGTYPE_RA,
+ AARCH64_INSN_REGTYPE_RS,
};
enum aarch64_insn_register {
@@ -188,6 +189,8 @@ enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
+ AARCH64_INSN_LDST_LOAD_EX,
+ AARCH64_INSN_LDST_STORE_EX,
};
enum aarch64_insn_adsb_type {
@@ -240,6 +243,23 @@ enum aarch64_insn_logic_type {
AARCH64_INSN_LOGIC_BIC_SETFLAGS
};
+enum aarch64_insn_prfm_type {
+ AARCH64_INSN_PRFM_TYPE_PLD,
+ AARCH64_INSN_PRFM_TYPE_PLI,
+ AARCH64_INSN_PRFM_TYPE_PST,
+};
+
+enum aarch64_insn_prfm_target {
+ AARCH64_INSN_PRFM_TARGET_L1,
+ AARCH64_INSN_PRFM_TARGET_L2,
+ AARCH64_INSN_PRFM_TARGET_L3,
+};
+
+enum aarch64_insn_prfm_policy {
+ AARCH64_INSN_PRFM_POLICY_KEEP,
+ AARCH64_INSN_PRFM_POLICY_STRM,
+};
+
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
{ return (code & (mask)) == (val); } \
@@ -247,8 +267,10 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
{ return (val); }
__AARCH64_INSN_FUNCS(adr_adrp, 0x1F000000, 0x10000000)
+__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
+__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
@@ -349,6 +371,18 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
int offset,
enum aarch64_insn_variant variant,
enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_register state,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size);
+u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
@@ -389,6 +423,10 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_logic_type type);
+u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
+ enum aarch64_insn_prfm_type type,
+ enum aarch64_insn_prfm_target target,
+ enum aarch64_insn_prfm_policy policy);
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 59a4139b3294..53a7e2f97a19 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -445,6 +445,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
shift = 10;
break;
case AARCH64_INSN_REGTYPE_RM:
+ case AARCH64_INSN_REGTYPE_RS:
shift = 16;
break;
default:
@@ -728,6 +729,151 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
offset >> shift);
}
+u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_register state,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_EX:
+ insn = aarch64_insn_get_load_ex_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_EX:
+ insn = aarch64_insn_get_store_ex_value();
+ break;
+ default:
+ pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ reg);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
+ AARCH64_INSN_REG_ZR);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+ state);
+}
+
+u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ u32 insn = aarch64_insn_get_ldadd_value();
+
+ switch (size) {
+ case AARCH64_INSN_SIZE_32:
+ case AARCH64_INSN_SIZE_64:
+ break;
+ default:
+ pr_err("%s: unimplemented size encoding %d\n", __func__, size);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ result);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ address);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+ value);
+}
+
+u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ /*
+ * STADD is simply encoded as an alias for LDADD with XZR as
+ * the destination register.
+ */
+ return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
+ value, size);
+}
+
+static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
+ enum aarch64_insn_prfm_target target,
+ enum aarch64_insn_prfm_policy policy,
+ u32 insn)
+{
+ u32 imm_type = 0, imm_target = 0, imm_policy = 0;
+
+ switch (type) {
+ case AARCH64_INSN_PRFM_TYPE_PLD:
+ break;
+ case AARCH64_INSN_PRFM_TYPE_PLI:
+ imm_type = BIT(0);
+ break;
+ case AARCH64_INSN_PRFM_TYPE_PST:
+ imm_type = BIT(1);
+ break;
+ default:
+ pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (target) {
+ case AARCH64_INSN_PRFM_TARGET_L1:
+ break;
+ case AARCH64_INSN_PRFM_TARGET_L2:
+ imm_target = BIT(0);
+ break;
+ case AARCH64_INSN_PRFM_TARGET_L3:
+ imm_target = BIT(1);
+ break;
+ default:
+ pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (policy) {
+ case AARCH64_INSN_PRFM_POLICY_KEEP:
+ break;
+ case AARCH64_INSN_PRFM_POLICY_STRM:
+ imm_policy = BIT(0);
+ break;
+ default:
+ pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ /* In this case, imm5 is encoded into Rt field. */
+ insn &= ~GENMASK(4, 0);
+ insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
+
+ return insn;
+}
+
+u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
+ enum aarch64_insn_prfm_type type,
+ enum aarch64_insn_prfm_target target,
+ enum aarch64_insn_prfm_policy policy)
+{
+ u32 insn = aarch64_insn_get_prfm_value();
+
+ insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
+
+ insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
+}
+
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 807d08e28c27..b8f70da2e718 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -5,7 +5,10 @@
# A mix between the arm64 and arm vDSO Makefiles.
ifeq ($(cc-name),clang)
- CC_ARM32 := $(CC) $(CLANG_TARGET_ARM32) -no-integrated-as
+ CC_ARM32 := $(CC) $(CLANG_TARGET_ARM32)
+ ifneq ($(LLVM_IAS),1)
+ CC_ARM32 += -no-integrated-as
+ endif
GCC_ARM32_TC := $(realpath $(dir $(shell which $(CROSS_COMPILE_ARM32)ld))/..)
ifneq ($(GCC_ARM32_TC),)
CC_ARM32 += --gcc-toolchain=$(GCC_ARM32_TC)
@@ -29,13 +32,10 @@ cc32-ldoption = $(call try-run,\
# arm64 one.
# As a result we set our own flags here.
-# From top-level Makefile
-# NOSTDINC_FLAGS
-VDSO_CPPFLAGS := -nostdinc -isystem $(shell $(CC_ARM32) -print-file-name=include)
+# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
+VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc
+VDSO_CPPFLAGS += -isystem $(shell $(CC_ARM32) -print-file-name=include 2>/dev/null)
VDSO_CPPFLAGS += $(LINUXINCLUDE)
-VDSO_CPPFLAGS += -D__KERNEL__
-VDSO_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-VDSO_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
# Common C and assembly flags
# From top-level Makefile
@@ -44,9 +44,6 @@ VDSO_CAFLAGS += $(call cc32-option,-fno-PIE)
ifdef CONFIG_DEBUG_INFO
VDSO_CAFLAGS += -g
endif
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC_ARM32)), y)
-VDSO_CAFLAGS += -DCC_HAVE_ASM_GOTO
-endif
# From arm Makefile
VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm)
@@ -168,7 +165,7 @@ quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
# Install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
+quiet_cmd_vdso_install = INSTALL32 $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
vdso.so: $(obj)/vdso.so.dbg
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 7c16e547ccb2..76606e87233f 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -44,8 +44,12 @@
#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
+#define A64_COND_LS AARCH64_INSN_COND_LS /* unsigned <= */
+#define A64_COND_CC AARCH64_INSN_COND_CC /* unsigned < */
#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
+#define A64_COND_LE AARCH64_INSN_COND_LE /* signed <= */
+#define A64_COND_LT AARCH64_INSN_COND_LT /* signed < */
#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
/* Unconditional branch (immediate) */
@@ -83,6 +87,23 @@
/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
+/* Load/store exclusive */
+#define A64_SIZE(sf) \
+ ((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
+#define A64_LSX(sf, Rt, Rn, Rs, type) \
+ aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
+ AARCH64_INSN_LDST_##type)
+/* Rt = [Rn]; (atomic) */
+#define A64_LDXR(sf, Rt, Rn) \
+ A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
+/* [Rn] = Rt; (atomic) Rs = [state] */
+#define A64_STXR(sf, Rt, Rn, Rs) \
+ A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
+
+/* LSE atomics */
+#define A64_STADD(sf, Rn, Rs) \
+ aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
+
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 6da00264d1fb..4cb127b2f10e 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -33,6 +33,7 @@
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
+#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
@@ -54,6 +55,7 @@ static const int bpf2a64[] = {
/* temporary registers for internal BPF JIT */
[TMP_REG_1] = A64_R(10),
[TMP_REG_2] = A64_R(11),
+ [TMP_REG_3] = A64_R(12),
/* tail_call_cnt */
[TCALL_CNT] = A64_R(26),
/* temporary register for blinding constants */
@@ -65,7 +67,7 @@ struct jit_ctx {
int idx;
int epilogue_offset;
int *offset;
- u32 *image;
+ __le32 *image;
};
static inline void emit(const u32 insn, struct jit_ctx *ctx)
@@ -125,7 +127,7 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from,
static void jit_fill_hole(void *area, unsigned int size)
{
- u32 *ptr;
+ __le32 *ptr;
/* We are guaranteed to have aligned memory. */
for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
@@ -317,11 +319,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const u8 tmp3 = bpf2a64[TMP_REG_3];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64;
- u8 jmp_cond;
+ const bool isdw = BPF_SIZE(code) == BPF_DW;
+ u8 jmp_cond, reg;
s32 jmp_offset;
#define check_imm(bits, imm) do { \
@@ -521,10 +525,14 @@ emit_bswap_uxt:
/* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
emit(A64_CMP(1, dst, src), ctx);
emit_cond_jmp:
jmp_offset = bpf2a64_offset(i + off, i, ctx);
@@ -536,9 +544,15 @@ emit_cond_jmp:
case BPF_JGT:
jmp_cond = A64_COND_HI;
break;
+ case BPF_JLT:
+ jmp_cond = A64_COND_CC;
+ break;
case BPF_JGE:
jmp_cond = A64_COND_CS;
break;
+ case BPF_JLE:
+ jmp_cond = A64_COND_LS;
+ break;
case BPF_JSET:
case BPF_JNE:
jmp_cond = A64_COND_NE;
@@ -546,9 +560,15 @@ emit_cond_jmp:
case BPF_JSGT:
jmp_cond = A64_COND_GT;
break;
+ case BPF_JSLT:
+ jmp_cond = A64_COND_LT;
+ break;
case BPF_JSGE:
jmp_cond = A64_COND_GE;
break;
+ case BPF_JSLE:
+ jmp_cond = A64_COND_LE;
+ break;
default:
return -EFAULT;
}
@@ -560,10 +580,14 @@ emit_cond_jmp:
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
emit_a64_mov_i(1, tmp, imm, ctx);
emit(A64_CMP(1, dst, tmp), ctx);
goto emit_cond_jmp;
@@ -583,7 +607,7 @@ emit_cond_jmp:
break;
}
/* tail call */
- case BPF_JMP | BPF_CALL | BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
if (emit_bpf_tail_call(ctx))
return -EFAULT;
break;
@@ -686,11 +710,29 @@ emit_cond_jmp:
break;
}
break;
+
/* STX XADD: lock *(u32 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_W:
/* STX XADD: lock *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
- goto notyet;
+ if (!off) {
+ reg = dst;
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ reg = tmp;
+ }
+ if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
+ emit(A64_STADD(isdw, reg, src), ctx);
+ } else {
+ emit(A64_LDXR(isdw, tmp2, reg), ctx);
+ emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
+ emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -3;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ }
+ break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
case BPF_LD | BPF_ABS | BPF_W:
@@ -757,10 +799,6 @@ emit_cond_jmp:
}
break;
}
-notyet:
- pr_info_once("*** NOT YET: opcode %02x ***\n", code);
- return -EFAULT;
-
default:
pr_err_once("unknown opcode %02x\n", code);
return -EINVAL;
@@ -813,11 +851,6 @@ static inline void bpf_flush_icache(void *start, void *end)
flush_icache_range((unsigned long)start, (unsigned long)end);
}
-void bpf_jit_compile(struct bpf_prog *prog)
-{
- /* Nothing to do here. We support Internal BPF. */
-}
-
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_prog *tmp, *orig_prog = prog;
@@ -877,7 +910,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 2. Now, the actual pass. */
- ctx.image = (u32 *)image_ptr;
+ ctx.image = (__le32 *)image_ptr;
ctx.idx = 0;
build_prologue(&ctx);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f00329d8210d..5f3eea935689 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -17,7 +17,7 @@ config MIPS
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_BPF_JIT if !CPU_MICROMIPS
+ select HAVE_CBPF_JIT if !CPU_MICROMIPS
select HAVE_FUNCTION_TRACER
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 356a48c18dbd..28db06b51fca 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -131,7 +131,7 @@ config PPC
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_BPF_JIT if CPU_BIG_ENDIAN
+ select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 848539d8cac1..d466e6c0c5cb 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -124,7 +124,7 @@ config S390
select HAVE_ARCH_SOFT_DIRTY
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
- select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
+ select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 633172c5b065..992ee13ccccc 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1046,7 +1046,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
}
break;
}
- case BPF_JMP | BPF_CALL | BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
/*
* Implicit input:
* B1: pointer to ctx
@@ -1317,14 +1317,6 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
}
/*
- * Classic BPF function stub. BPF programs will be converted into
- * eBPF and then bpf_int_jit_compile() will be called.
- */
-void bpf_jit_compile(struct bpf_prog *fp)
-{
-}
-
-/*
* Compile eBPF program "fp"
*/
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 4209f72c1973..7c89bdd38da2 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -34,7 +34,7 @@ config SPARC
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64
- select HAVE_BPF_JIT
+ select HAVE_CBPF_JIT
select HAVE_DEBUG_BUGVERBOSE
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CLOCKEVENTS
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 85a7c0b0627c..4d3b0540279f 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -855,7 +855,7 @@ xadd: if (is_imm8(insn->off))
}
break;
- case BPF_JMP | BPF_CALL | BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
emit_bpf_tail_call(&prog);
break;
@@ -1035,7 +1035,7 @@ common_load:
ilen = prog - temp;
if (ilen > BPF_MAX_INSN_SIZE) {
- pr_err("bpf_jit_compile fatal insn size error\n");
+ pr_err("bpf_jit: fatal insn size error\n");
return -EFAULT;
}
@@ -1050,7 +1050,7 @@ common_load:
*/
if (unlikely(proglen + ilen > oldproglen ||
proglen + ilen != addrs[i])) {
- pr_err("bpf_jit_compile fatal error\n");
+ pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
memcpy(image + proglen, temp, ilen);
@@ -1062,10 +1062,6 @@ common_load:
return proglen;
}
-void bpf_jit_compile(struct bpf_prog *prog)
-{
-}
-
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header = NULL;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 69bfaa0bc6f4..1662707f0d1a 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -298,7 +299,8 @@ struct fastrpc_mmap {
int uncached;
int secure;
uintptr_t attr;
- bool is_filemap; /*flag to indicate map used in process init*/
+ bool is_filemap; /* flag to indicate map used in process init */
+ unsigned int ctx_refs; /* Indicates reference count for context map */
};
struct fastrpc_perf {
@@ -334,6 +336,7 @@ struct fastrpc_file {
struct fastrpc_perf perf;
struct dentry *debugfs_file;
struct mutex map_mutex;
+ struct mutex internal_map_mutex;
char *debug_buf;
/* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
int dev_minor;
@@ -473,9 +476,7 @@ static void fastrpc_mmap_add(struct fastrpc_mmap *map)
} else {
struct fastrpc_file *fl = map->fl;
- spin_lock(&fl->hlock);
hlist_add_head(&map->hn, &fl->maps);
- spin_unlock(&fl->hlock);
}
}
@@ -506,21 +507,17 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
}
spin_unlock(&me->hlock);
} else {
- spin_lock(&fl->hlock);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
if (va >= map->va &&
va + len <= map->va + map->len &&
map->fd == fd) {
- if (map->refs + 1 == INT_MAX) {
- spin_unlock(&fl->hlock);
+ if (map->refs + 1 == INT_MAX)
return -ETOOMANYREFS;
- }
map->refs++;
match = map;
break;
}
}
- spin_unlock(&fl->hlock);
}
if (match) {
*ppmap = match;
@@ -560,7 +557,7 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
hlist_for_each_entry_safe(map, n, &me->maps, hn) {
if (map->refs == 1 && map->raddr == va &&
map->raddr + map->len == va + len &&
- /*Remove map if not used in process initialization*/
+ /* Remove map if not used in process initialization */
!map->is_filemap) {
match = map;
hlist_del_init(&map->hn);
@@ -572,18 +569,17 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
*ppmap = match;
return 0;
}
- spin_lock(&fl->hlock);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
- if (map->refs == 1 && map->raddr == va &&
+ /* Remove if only one reference map and no context map */
+ if (map->refs == 1 && !map->ctx_refs && map->raddr == va &&
map->raddr + map->len == va + len &&
- /*Remove map if not used in process initialization*/
+ /* Remove map if not used in process initialization */
!map->is_filemap) {
match = map;
hlist_del_init(&map->hn);
break;
}
}
- spin_unlock(&fl->hlock);
if (match) {
*ppmap = match;
return 0;
@@ -614,17 +610,13 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
}
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
- spin_lock(&me->hlock);
map->refs--;
- if (!map->refs)
+ if (!map->refs && !map->ctx_refs)
hlist_del_init(&map->hn);
- spin_unlock(&me->hlock);
} else {
- spin_lock(&fl->hlock);
map->refs--;
- if (!map->refs)
+ if (!map->refs && !map->ctx_refs)
hlist_del_init(&map->hn);
- spin_unlock(&fl->hlock);
}
if (map->refs > 0)
return;
@@ -716,6 +708,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
map->fd = fd;
map->attr = attr;
map->is_filemap = false;
+ map->ctx_refs = 0;
if (mflags == ADSP_MMAP_HEAP_ADDR ||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
DEFINE_DMA_ATTRS(rh_attrs);
@@ -1133,8 +1126,13 @@ static void context_free(struct smq_invoke_ctx *ctx)
spin_lock(&ctx->fl->hlock);
hlist_del_init(&ctx->hn);
spin_unlock(&ctx->fl->hlock);
- for (i = 0; i < nbufs; ++i)
+ mutex_lock(&ctx->fl->map_mutex);
+ for (i = 0; i < nbufs; ++i) {
+ if (ctx->maps[i] && ctx->maps[i]->ctx_refs)
+ ctx->maps[i]->ctx_refs--;
fastrpc_mmap_free(ctx->maps[i]);
+ }
+ mutex_unlock(&ctx->fl->map_mutex);
fastrpc_buf_free(ctx->buf, 1);
fastrpc_buf_free(ctx->lbuf, 1);
ctx->magic = 0;
@@ -1274,10 +1272,14 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
size_t len = lpra[i].buf.len;
+ mutex_lock(&ctx->fl->map_mutex);
if (ctx->fds[i] && (ctx->fds[i] != -1))
fastrpc_mmap_create(ctx->fl, ctx->fds[i],
ctx->attrs[i], buf, len,
mflags, &ctx->maps[i]);
+ if (ctx->maps[i])
+ ctx->maps[i]->ctx_refs++;
+ mutex_unlock(&ctx->fl->map_mutex);
ipage += 1;
}
metalen = copylen = (size_t)&ipage[0];
@@ -1494,7 +1496,11 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
if (err)
goto bail;
} else {
+ mutex_lock(&ctx->fl->map_mutex);
+ if (ctx->maps[i]->ctx_refs)
+ ctx->maps[i]->ctx_refs--;
fastrpc_mmap_free(ctx->maps[i]);
+ mutex_unlock(&ctx->fl->map_mutex);
ctx->maps[i] = NULL;
}
}
@@ -1903,10 +1909,12 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
init->filelen))
goto bail;
if (init->filelen) {
+ mutex_lock(&fl->map_mutex);
VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
init->file, init->filelen, mflags, &file));
if (file)
file->is_filemap = true;
+ mutex_unlock(&fl->map_mutex);
if (err)
goto bail;
}
@@ -1996,9 +2004,11 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
inbuf.pageslen = 0;
if (!me->staticpd_flags) {
inbuf.pageslen = 1;
+ mutex_lock(&fl->map_mutex);
VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
&mem));
+ mutex_unlock(&fl->map_mutex);
if (err)
goto bail;
phys = mem->phys;
@@ -2050,10 +2060,15 @@ bail:
if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
hyp_assign_phys(mem->phys, (uint64_t)mem->size,
destVM, 1, srcVM, hlosVMperm, 1);
+ mutex_lock(&fl->map_mutex);
fastrpc_mmap_free(mem);
+ mutex_unlock(&fl->map_mutex);
}
- if (file)
+ if (file) {
+ mutex_lock(&fl->map_mutex);
fastrpc_mmap_free(file);
+ mutex_unlock(&fl->map_mutex);
+ }
return err;
}
@@ -2309,7 +2324,7 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
struct fastrpc_buf *rbuf = NULL, *free = NULL;
struct hlist_node *n;
- mutex_lock(&fl->map_mutex);
+ mutex_lock(&fl->internal_map_mutex);
spin_lock(&fl->hlock);
hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
@@ -2328,11 +2343,13 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
if (err)
goto bail;
fastrpc_buf_free(rbuf, 0);
- mutex_unlock(&fl->map_mutex);
+ mutex_unlock(&fl->internal_map_mutex);
return err;
}
+ mutex_lock(&fl->map_mutex);
VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
+ mutex_unlock(&fl->map_mutex);
if (err)
goto bail;
if (map) {
@@ -2340,12 +2357,17 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
map->phys, map->size, map->flags));
if (err)
goto bail;
+ mutex_lock(&fl->map_mutex);
fastrpc_mmap_free(map);
+ mutex_unlock(&fl->map_mutex);
}
bail:
- if (err && map)
+ if (err && map) {
+ mutex_lock(&fl->map_mutex);
fastrpc_mmap_add(map);
- mutex_unlock(&fl->map_mutex);
+ mutex_unlock(&fl->map_mutex);
+ }
+ mutex_unlock(&fl->internal_map_mutex);
return err;
}
@@ -2358,7 +2380,7 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
uintptr_t raddr = 0;
int err = 0;
- mutex_lock(&fl->map_mutex);
+ mutex_lock(&fl->internal_map_mutex);
if (ud->flags == ADSP_MMAP_ADD_PAGES) {
DEFINE_DMA_ATTRS(dma_attr);
@@ -2385,9 +2407,11 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
} else {
uintptr_t va_to_dsp;
+ mutex_lock(&fl->map_mutex);
VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
(uintptr_t)ud->vaddrin, ud->size,
ud->flags, &map));
+ mutex_unlock(&fl->map_mutex);
if (err)
goto bail;
@@ -2404,9 +2428,16 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
}
ud->vaddrout = raddr;
bail:
- if (err && map)
- fastrpc_mmap_free(map);
- mutex_unlock(&fl->map_mutex);
+ if (err) {
+ if (map) {
+ mutex_lock(&fl->map_mutex);
+ fastrpc_mmap_free(map);
+ mutex_unlock(&fl->map_mutex);
+ }
+ if (!IS_ERR_OR_NULL(rbuf))
+ fastrpc_buf_free(rbuf, 0);
+ }
+ mutex_unlock(&fl->internal_map_mutex);
return err;
}
@@ -2562,8 +2593,8 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
static int fastrpc_file_free(struct fastrpc_file *fl)
{
- struct hlist_node *n;
- struct fastrpc_mmap *map = NULL;
+ struct hlist_node *n = NULL;
+ struct fastrpc_mmap *map = NULL, *lmap = NULL;
int cid;
if (!fl)
@@ -2587,9 +2618,18 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
fastrpc_buf_free(fl->init_mem, 0);
fastrpc_context_list_dtor(fl);
fastrpc_cached_buf_list_free(fl);
- hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
- fastrpc_mmap_free(map);
- }
+ mutex_lock(&fl->map_mutex);
+ do {
+ lmap = NULL;
+ hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
+ hlist_del_init(&map->hn);
+ lmap = map;
+ break;
+ }
+ fastrpc_mmap_free(lmap);
+ } while (lmap);
+ mutex_unlock(&fl->map_mutex);
+
if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
kref_put_mutex(&fl->apps->channel[cid].kref,
fastrpc_channel_close, &fl->apps->smd_mutex);
@@ -2600,6 +2640,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
bail:
fastrpc_remote_buf_list_free(fl);
mutex_destroy(&fl->map_mutex);
+ mutex_destroy(&fl->internal_map_mutex);
kfree(fl);
return 0;
}
@@ -2611,7 +2652,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
if (fl) {
if (fl->debugfs_file != NULL)
debugfs_remove(fl->debugfs_file);
-
fastrpc_file_free(fl);
file->private_data = NULL;
}
@@ -2914,6 +2954,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
map->secure, map->attr);
}
mutex_unlock(&fl->map_mutex);
+ spin_lock(&fl->hlock);
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
"\n%s %s %s\n", title,
" LIST OF PENDING SMQCONTEXTS ", title);
@@ -3025,8 +3066,10 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
}
if (cid == 0 && me->channel[cid].ssrcount !=
me->channel[cid].prevssrcount) {
+ mutex_lock(&fl->map_mutex);
if (fastrpc_mmap_remove_ssr(fl))
pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
+ mutex_unlock(&fl->map_mutex);
me->channel[cid].prevssrcount =
me->channel[cid].ssrcount;
}
@@ -3091,6 +3134,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
fl->debugfs_file = debugfs_file;
memset(&fl->perf, 0, sizeof(fl->perf));
filp->private_data = fl;
+ mutex_init(&fl->internal_map_mutex);
mutex_init(&fl->map_mutex);
spin_lock(&me->hlock);
hlist_add_head(&fl->hn, &me->drivers);
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index ed52ebbb786a..9241f339716f 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -370,7 +370,7 @@ static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
return -EIO;
}
-void diag_dci_wakeup_clients()
+void diag_dci_wakeup_clients(void)
{
struct list_head *start, *temp;
struct diag_dci_client_tbl *entry = NULL;
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 7cdb2e36eece..a68c0125861a 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -82,7 +82,7 @@ int diag_md_register(int id, int ctx, struct diag_mux_ops *ops)
return 0;
}
-void diag_md_open_all()
+void diag_md_open_all(void)
{
int i;
struct diag_md_info *ch = NULL;
@@ -98,7 +98,7 @@ void diag_md_open_all()
return;
}
-void diag_md_close_all()
+void diag_md_close_all(void)
{
int i, j;
unsigned long flags;
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 8d766e1ae583..c93a62b397b4 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -49,7 +49,7 @@ static struct diag_logger_ops md_log_ops = {
.close_peripheral = diag_md_close_peripheral,
};
-int diag_mux_init()
+int diag_mux_init(void)
{
diag_mux = kzalloc(sizeof(struct diag_mux_state_t),
GFP_KERNEL);
@@ -76,7 +76,7 @@ int diag_mux_init()
return 0;
}
-void diag_mux_exit()
+void diag_mux_exit(void)
{
kfree(diag_mux);
}
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 47e0dab9d762..571654342764 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -3496,7 +3496,7 @@ static ssize_t diagchar_write(struct file *file, const char __user *buf,
return err;
}
-void diag_ws_init()
+void diag_ws_init(void)
{
driver->dci_ws.ref_count = 0;
driver->dci_ws.copy_count = 0;
@@ -3522,7 +3522,7 @@ static void diag_stats_init(void)
driver->event_stats.drop_count = 0;
}
-void diag_ws_on_notify()
+void diag_ws_on_notify(void)
{
/*
* Do not deal with reference count here as there can be spurious
@@ -3665,7 +3665,7 @@ void diag_ws_reset(int type)
diag_ws_release();
}
-void diag_ws_release()
+void diag_ws_release(void)
{
if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
pm_relax(driver->diag_dev);
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 298813917dec..51be1a939d45 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -299,7 +299,7 @@ fail:
return err;
}
-void diagfwd_bridge_exit()
+void diagfwd_bridge_exit(void)
{
#ifdef USB_QCOM_DIAG_BRIDGE
diag_hsic_exit();
@@ -326,7 +326,7 @@ int diagfwd_bridge_write(int id, unsigned char *buf, int len)
return 0;
}
-uint16_t diag_get_remote_device_mask()
+uint16_t diag_get_remote_device_mask(void)
{
int i;
uint16_t remote_dev = 0;
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index edfba6bb09c9..75a24e27f390 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -699,7 +699,7 @@ static void diag_mhi_dev_exit(int dev)
diagmem_exit(driver, mhi_info->mempool);
}
-int diag_mhi_init()
+int diag_mhi_init(void)
{
int i;
int err = 0;
@@ -748,7 +748,7 @@ fail:
return -ENOMEM;
}
-void diag_mhi_exit()
+void diag_mhi_exit(void)
{
int i;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 4b9f779e525e..c03345ec61f4 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -272,8 +272,15 @@ kgsl_mem_entry_create(void)
return entry;
}
#ifdef CONFIG_DMA_SHARED_BUFFER
-static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
+static void kgsl_destroy_ion(struct kgsl_memdesc *memdesc)
{
+ struct kgsl_mem_entry *entry = container_of(memdesc,
+ struct kgsl_mem_entry, memdesc);
+ struct kgsl_dma_buf_meta *meta = entry->priv_data;
+
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
if (meta != NULL) {
dma_buf_unmap_attachment(meta->attach, meta->table,
DMA_FROM_DEVICE);
@@ -281,13 +288,47 @@ static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
dma_buf_put(meta->dmabuf);
kfree(meta);
}
+
+ /*
+ * Ion takes care of freeing the sg_table for us so
+ * clear the sg table to ensure kgsl_sharedmem_free
+ * doesn't try to free it again
+ */
+ memdesc->sgt = NULL;
}
-#else
-static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
+
+static struct kgsl_memdesc_ops kgsl_dmabuf_ops = {
+ .free = kgsl_destroy_ion,
+};
+#endif
+
+static void kgsl_destroy_anon(struct kgsl_memdesc *memdesc)
{
+ int i = 0, j;
+ struct scatterlist *sg;
+ struct page *page;
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
+ for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
+ page = sg_page(sg);
+ for (j = 0; j < (sg->length >> PAGE_SHIFT); j++) {
+ /*
+ * Mark the page in the scatterlist as dirty if they
+ * were writable by the GPU.
+ */
+ if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
+ set_page_dirty_lock(nth_page(page, j));
+
+ /*
+ * Put the page reference taken using get_user_pages
+ * during memdesc_sg_virt.
+ */
+ put_page(nth_page(page, j));
+ }
+ }
}
-#endif
void
kgsl_mem_entry_destroy(struct kref *kref)
@@ -310,41 +351,8 @@ kgsl_mem_entry_destroy(struct kref *kref)
atomic_long_sub(entry->memdesc.size,
&kgsl_driver.stats.mapped);
- /*
- * Ion takes care of freeing the sg_table for us so
- * clear the sg table before freeing the sharedmem
- * so kgsl_sharedmem_free doesn't try to free it again
- */
- if (memtype == KGSL_MEM_ENTRY_ION)
- entry->memdesc.sgt = NULL;
-
- if ((memtype == KGSL_MEM_ENTRY_USER)
- && !(entry->memdesc.flags & KGSL_MEMFLAGS_GPUREADONLY)) {
- int i = 0, j;
- struct scatterlist *sg;
- struct page *page;
- /*
- * Mark all of pages in the scatterlist as dirty since they
- * were writable by the GPU.
- */
- for_each_sg(entry->memdesc.sgt->sgl, sg,
- entry->memdesc.sgt->nents, i) {
- page = sg_page(sg);
- for (j = 0; j < (sg->length >> PAGE_SHIFT); j++)
- set_page_dirty_lock(nth_page(page, j));
- }
- }
-
kgsl_sharedmem_free(&entry->memdesc);
- switch (memtype) {
- case KGSL_MEM_ENTRY_ION:
- kgsl_destroy_ion(entry->priv_data);
- break;
- default:
- break;
- }
-
kfree(entry);
}
EXPORT_SYMBOL(kgsl_mem_entry_destroy);
@@ -2205,6 +2213,10 @@ out:
return ret;
}
+static struct kgsl_memdesc_ops kgsl_usermem_ops = {
+ .free = kgsl_destroy_anon,
+};
+
static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,
struct kgsl_mem_entry *entry, unsigned long hostptr,
size_t offset, size_t size)
@@ -2220,6 +2232,7 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = (uint64_t) size;
entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ADDR;
+ entry->memdesc.ops = &kgsl_usermem_ops;
if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
@@ -2530,11 +2543,6 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
return 0;
unmap:
- if (kgsl_memdesc_usermem_type(&entry->memdesc) == KGSL_MEM_ENTRY_ION) {
- kgsl_destroy_ion(entry->priv_data);
- entry->memdesc.sgt = NULL;
- }
-
kgsl_sharedmem_free(&entry->memdesc);
out:
@@ -2631,6 +2639,7 @@ static int kgsl_setup_dma_buf(struct kgsl_device *device,
entry->priv_data = meta;
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = 0;
+ entry->memdesc.ops = &kgsl_dmabuf_ops;
/* USE_CPU_MAP is not impemented for ION. */
entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ION;
@@ -2841,14 +2850,6 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
return result;
error_attach:
- switch (kgsl_memdesc_usermem_type(&entry->memdesc)) {
- case KGSL_MEM_ENTRY_ION:
- kgsl_destroy_ion(entry->priv_data);
- entry->memdesc.sgt = NULL;
- break;
- default:
- break;
- }
kgsl_sharedmem_free(&entry->memdesc);
error:
/* Clear gpuaddr here so userspace doesn't get any wrong ideas */
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 31e8a7ea5f65..1f10cb4c1568 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2397,14 +2397,18 @@ static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable,
static bool iommu_addr_in_svm_ranges(struct kgsl_iommu_pt *pt,
u64 gpuaddr, u64 size)
{
+ u64 end = gpuaddr + size;
+
+ /* Make sure size is not zero and we don't wrap around */
+ if (end <= gpuaddr)
+ return false;
+
if ((gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end) &&
- ((gpuaddr + size) > pt->compat_va_start &&
- (gpuaddr + size) <= pt->compat_va_end))
+ (end > pt->compat_va_start && end <= pt->compat_va_end))
return true;
if ((gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end) &&
- ((gpuaddr + size) > pt->svm_start &&
- (gpuaddr + size) <= pt->svm_end))
+ (end > pt->svm_start && end <= pt->svm_end))
return true;
return false;
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 228f3396ae90..7aa68abbf91c 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2002,2007-2017,2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -498,6 +498,8 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
size = kgsl_memdesc_footprint(memdesc);
ret = pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
+ if (ret)
+ return ret;
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index dfbea53c306b..39039b27d0e0 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2002,2007-2017,2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -407,6 +408,9 @@ done:
static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
{
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
kgsl_page_alloc_unmap_kernel(memdesc);
/* we certainly do not expect the hostptr to still be mapped */
BUG_ON(memdesc->hostptr);
@@ -510,6 +514,9 @@ static void kgsl_cma_coherent_free(struct kgsl_memdesc *memdesc)
{
struct dma_attrs *attrs = NULL;
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
if (memdesc->hostptr) {
if (memdesc->priv & KGSL_MEMDESC_SECURE) {
atomic_long_sub(memdesc->size,
@@ -874,6 +881,7 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
if (memdesc->sgt) {
sg_free_table(memdesc->sgt);
kfree(memdesc->sgt);
+ memdesc->sgt = NULL;
}
if (memdesc->pages)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9a5be0ca4342..c0f276e27561 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -246,6 +246,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
unsigned usages;
unsigned offset;
unsigned i;
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
report = hid_register_report(parser->device, report_type, parser->global.report_id);
if (!report) {
@@ -269,8 +270,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ if (parser->device->ll_driver->max_buffer_size)
+ max_buffer_size = parser->device->ll_driver->max_buffer_size;
+
/* Total size check: Allow for possible report index byte */
- if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ if (report->size > (max_buffer_size - 1) << 3) {
hid_err(parser->device, "report is too long\n");
return -1;
}
@@ -964,8 +968,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
* Validating on id 0 means we should examine the first
* report in the list.
*/
- report = list_entry(
- hid->report_enum[type].report_list.next,
+ report = list_first_entry_or_null(
+ &hid->report_enum[type].report_list,
struct hid_report, list);
} else {
report = hid->report_enum[type].report_id_hash[id];
@@ -1112,6 +1116,9 @@ static s32 snto32(__u32 value, unsigned n)
if (!value || !n)
return 0;
+ if (n > 32)
+ n = 32;
+
switch (n) {
case 8: return ((__s8)value);
case 16: return ((__s16)value);
@@ -1506,6 +1513,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
+ int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int a;
u32 rsize, csize = size;
u8 *cdata = data;
@@ -1522,10 +1530,13 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
rsize = hid_compute_report_size(report);
- if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
- rsize = HID_MAX_BUFFER_SIZE - 1;
- else if (rsize > HID_MAX_BUFFER_SIZE)
- rsize = HID_MAX_BUFFER_SIZE;
+ if (hid->ll_driver->max_buffer_size)
+ max_buffer_size = hid->ll_driver->max_buffer_size;
+
+ if (report_enum->numbered && rsize >= max_buffer_size)
+ rsize = max_buffer_size - 1;
+ else if (rsize > max_buffer_size)
+ rsize = max_buffer_size;
if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id,
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 857917086cb0..6d9f58a446fa 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1673,14 +1673,8 @@ void input_reset_device(struct input_dev *dev)
mutex_lock(&dev->mutex);
spin_lock_irqsave(&dev->event_lock, flags);
- /*
- * Keys that have been pressed at suspend time are unlikely
- * to be still pressed when we resume.
- */
- if (!test_bit(INPUT_PROP_NO_DUMMY_RELEASE, dev->propbit)) {
- input_dev_toggle(dev, true);
- input_dev_release_keys(dev);
- }
+ input_dev_toggle(dev, true);
+ input_dev_release_keys(dev);
spin_unlock_irqrestore(&dev->event_lock, flags);
mutex_unlock(&dev->mutex);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 3f1617ca2fc0..137062b22ca9 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -642,9 +642,11 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte *ptep = ms.pgtable +
ARM_LPAE_LVL_IDX(iova, MAP_STATE_LVL,
data);
- arm_lpae_init_pte(
+ ret = arm_lpae_init_pte(
data, iova, phys, prot, MAP_STATE_LVL,
ptep, ms.prev_pgtable, false);
+ if (ret)
+ goto out_err;
ms.num_pte++;
} else {
ret = __arm_lpae_map(data, iova, phys, pgsize,
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index deec2c4e246a..0eec6d0f52d4 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -2819,7 +2819,7 @@ static ssize_t rgb_blink_store(struct device *dev,
const char *buf, size_t count)
{
struct rgb_sync *rgb_sync;
- struct qpnp_led_data *led;
+ struct qpnp_led_data *led = NULL;
unsigned long blinking;
struct led_classdev *led_cdev = dev_get_drvdata(dev);
ssize_t rc = -EINVAL, i;
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
index ae3f7e89a8b4..0d5a88e78cfe 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
@@ -74,7 +74,7 @@ static struct msm_camera_tz_ctrl_t msm_camera_tz_ctrl = {
static DEFINE_MUTEX(msm_camera_tz_util_lock);
-struct qseecom_handle *msm_camera_tz_get_ta_handle()
+struct qseecom_handle *msm_camera_tz_get_ta_handle(void)
{
return msm_camera_tz_ctrl.ta_qseecom_handle;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index b56b48a6b7b0..b153f8fda0b6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2632,7 +2632,7 @@ err_sess_abort:
return;
}
-void msm_comm_handle_thermal_event()
+void msm_comm_handle_thermal_event(void)
{
struct msm_vidc_core *core;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2ed1453b9224..1e900e4edc8b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2081,6 +2081,7 @@ static void team_setup(struct net_device *dev)
dev->flags |= IFF_MULTICAST;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_TEAM;
/*
* Indicate we support unicast address filtering. That way core won't
diff --git a/drivers/net/wireguard/compat/Makefile.include b/drivers/net/wireguard/compat/Makefile.include
index 513dba444a37..df7670ae8d6c 100644
--- a/drivers/net/wireguard/compat/Makefile.include
+++ b/drivers/net/wireguard/compat/Makefile.include
@@ -6,11 +6,16 @@ kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
ccflags-y += -include $(kbuild-dir)/compat/compat.h
asflags-y += -include $(kbuild-dir)/compat/compat-asm.h
+LINUXINCLUDE := -DCOMPAT_VERSION=$(VERSION) -DCOMPAT_PATCHLEVEL=$(PATCHLEVEL) -DCOMPAT_SUBLEVEL=$(SUBLEVEL) -I$(kbuild-dir)/compat/version $(LINUXINCLUDE)
ifeq ($(wildcard $(srctree)/include/linux/ptr_ring.h),)
ccflags-y += -I$(kbuild-dir)/compat/ptr_ring/include
endif
+ifeq ($(wildcard $(srctree)/include/linux/skb_array.h),)
+ccflags-y += -I$(kbuild-dir)/compat/skb_array/include
+endif
+
ifeq ($(wildcard $(srctree)/include/linux/siphash.h),)
ccflags-y += -I$(kbuild-dir)/compat/siphash/include
wireguard-y += compat/siphash/siphash.o
@@ -64,6 +69,10 @@ ifeq ($(wildcard $(srctree)/arch/arm64/include/asm/neon.h)$(CONFIG_ARM64),y)
ccflags-y += -I$(kbuild-dir)/compat/neon-arm/include
endif
+ifeq ($(wildcard $(srctree)/include/net/dst_metadata.h),)
+ccflags-y += -I$(kbuild-dir)/compat/dstmetadata/include
+endif
+
ifeq ($(CONFIG_X86_64),y)
ifeq ($(ssse3_instr),)
ssse3_instr := $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
diff --git a/drivers/net/wireguard/compat/compat-asm.h b/drivers/net/wireguard/compat/compat-asm.h
index fde21dabba4f..951fc1094470 100644
--- a/drivers/net/wireguard/compat/compat-asm.h
+++ b/drivers/net/wireguard/compat/compat-asm.h
@@ -15,14 +15,14 @@
#define ISRHEL7
#elif RHEL_MAJOR == 8
#define ISRHEL8
-#if RHEL_MINOR >= 4
+#if RHEL_MINOR >= 6
#define ISCENTOS8S
#endif
#endif
#endif
/* PaX compatibility */
-#if defined(RAP_PLUGIN)
+#if defined(RAP_PLUGIN) && defined(RAP_ENTRY)
#undef ENTRY
#define ENTRY RAP_ENTRY
#endif
@@ -51,7 +51,7 @@
#undef pull
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) && !defined(ISCENTOS8S)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) && !defined(ISRHEL8) && !defined(SYM_FUNC_START)
#define SYM_FUNC_START ENTRY
#define SYM_FUNC_END ENDPROC
#endif
diff --git a/drivers/net/wireguard/compat/compat.h b/drivers/net/wireguard/compat/compat.h
index 91d4388824ea..d166ac235a99 100644
--- a/drivers/net/wireguard/compat/compat.h
+++ b/drivers/net/wireguard/compat/compat.h
@@ -16,15 +16,13 @@
#define ISRHEL7
#elif RHEL_MAJOR == 8
#define ISRHEL8
-#if RHEL_MINOR >= 4
+#if RHEL_MINOR >= 6
#define ISCENTOS8S
#endif
#endif
#endif
#ifdef UTS_UBUNTU_RELEASE_ABI
-#if LINUX_VERSION_CODE == KERNEL_VERSION(3, 13, 11)
-#define ISUBUNTU1404
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
#define ISUBUNTU1604
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
#define ISUBUNTU1804
@@ -219,7 +217,7 @@ static inline void skb_scrub_packet(struct sk_buff *skb, bool xnet)
#define skb_scrub_packet(a, b) skb_scrub_packet(a)
#endif
-#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 63) || defined(ISUBUNTU1404)) && !defined(ISRHEL7)
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 63)) && !defined(ISRHEL7)
#include <linux/random.h>
static inline u32 __compat_prandom_u32_max(u32 ep_ro)
{
@@ -268,7 +266,7 @@ static inline u32 __compat_prandom_u32_max(u32 ep_ro)
#endif
#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 3) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 35) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 24) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) && !defined(ISUBUNTU1404)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 33) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 60) && !defined(ISRHEL7))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 3) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 35) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 24) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 33) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 60) && !defined(ISRHEL7))
static inline void memzero_explicit(void *s, size_t count)
{
memset(s, 0, count);
@@ -281,7 +279,7 @@ static const struct in6_addr __compat_in6addr_any = IN6ADDR_ANY_INIT;
#define in6addr_any __compat_in6addr_any
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320))
#include <linux/completion.h>
#include <linux/random.h>
#include <linux/errno.h>
@@ -325,7 +323,7 @@ static inline int wait_for_random_bytes(void)
}
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && !defined(ISRHEL8)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 285)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) && !defined(ISRHEL8)
#include <linux/random.h>
#include <linux/slab.h>
struct rng_is_initialized_callback {
@@ -377,7 +375,7 @@ static inline bool rng_is_initialized(void)
}
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320))
static inline int get_random_bytes_wait(void *buf, int nbytes)
{
int ret = wait_for_random_bytes();
@@ -502,7 +500,7 @@ static inline void *__compat_kvzalloc(size_t size, gfp_t flags)
#define kvzalloc __compat_kvzalloc
#endif
-#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 41)) && !defined(ISUBUNTU1404)
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 41))
#include <linux/vmalloc.h>
#include <linux/mm.h>
static inline void __compat_kvfree(const void *addr)
@@ -515,6 +513,28 @@ static inline void __compat_kvfree(const void *addr)
#define kvfree __compat_kvfree
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+static inline void *__compat_kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (n != 0 && SIZE_MAX / n < size)
+ return NULL;
+ return kvmalloc(n * size, flags);
+}
+#define kvmalloc_array __compat_kvmalloc_array
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+static inline void *__compat_kvcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kvmalloc_array(n, size, flags | __GFP_ZERO);
+}
+#define kvcalloc __compat_kvcalloc
+#endif
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9)
#include <linux/netdevice.h>
#define priv_destructor destructor
@@ -704,7 +724,7 @@ static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned
#endif
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 285)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320))
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
{
while (words--) {
@@ -757,7 +777,7 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
#define hlist_add_behind(a, b) hlist_add_after(b, a)
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) && !defined(ISCENTOS8S)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) && !defined(ISRHEL8)
#define totalram_pages() totalram_pages
#endif
@@ -831,10 +851,16 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && !defined(ISRHEL8)
+#include <net/netlink.h>
+#ifndef NLA_POLICY_EXACT_LEN
#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_UNSPEC, .len = _len }
#endif
+#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && !defined(ISRHEL8)
+#include <net/netlink.h>
+#ifndef NLA_POLICY_MIN_LEN
#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_UNSPEC, .len = _len }
+#endif
#define COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY
#endif
@@ -849,7 +875,7 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
#endif
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) && !defined(ISRHEL8)
#define genl_dumpit_info(cb) ({ \
struct { struct nlattr **attrs; } *a = (void *)((u8 *)cb->args + offsetofend(struct dump_ctx, next_allowedip)); \
BUILD_BUG_ON(sizeof(cb->args) < offsetofend(struct dump_ctx, next_allowedip) + sizeof(*a)); \
@@ -869,11 +895,13 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
#endif
#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 200) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 249)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 285)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 320))
#define blake2s_init zinc_blake2s_init
#define blake2s_init_key zinc_blake2s_init_key
#define blake2s_update zinc_blake2s_update
#define blake2s_final zinc_blake2s_final
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)
#define blake2s_hmac zinc_blake2s_hmac
#define chacha20 zinc_chacha20
#define hchacha20 zinc_hchacha20
@@ -1096,6 +1124,37 @@ static const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tun
#endif
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
+#include <net/dst_cache.h>
+struct dst_cache_pcpu {
+ unsigned long refresh_ts;
+ struct dst_entry *dst;
+ u32 cookie;
+ union {
+ struct in_addr in_saddr;
+ struct in6_addr in6_saddr;
+ };
+};
+#define COMPAT_HAS_DEFINED_DST_CACHE_PCPU
+static inline void dst_cache_reset_now(struct dst_cache *dst_cache)
+{
+ int i;
+
+ if (!dst_cache->cache)
+ return;
+
+ dst_cache->reset_ts = jiffies;
+ for_each_possible_cpu(i) {
+ struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
+ struct dst_entry *dst = idst->dst;
+
+ idst->cookie = 0;
+ idst->dst = NULL;
+ dst_release(dst);
+ }
+}
+#endif
+
#if defined(ISUBUNTU1604) || defined(ISRHEL7)
#include <linux/siphash.h>
#ifndef _WG_LINUX_SIPHASH_H
@@ -1127,7 +1186,7 @@ static const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tun
#undef __read_mostly
#define __read_mostly
#endif
-#if (defined(RAP_PLUGIN) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
+#if (defined(CONFIG_PAX) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
#include <linux/timer.h>
#define wg_expired_retransmit_handshake(a) wg_expired_retransmit_handshake(unsigned long timer)
#define wg_expired_send_keepalive(a) wg_expired_send_keepalive(unsigned long timer)
diff --git a/drivers/net/wireguard/compat/dst_cache/dst_cache.c b/drivers/net/wireguard/compat/dst_cache/dst_cache.c
index 7ec22f768a8f..f74c43c550eb 100644
--- a/drivers/net/wireguard/compat/dst_cache/dst_cache.c
+++ b/drivers/net/wireguard/compat/dst_cache/dst_cache.c
@@ -27,6 +27,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
#endif
#include <uapi/linux/in.h>
+#ifndef COMPAT_HAS_DEFINED_DST_CACHE_PCPU
struct dst_cache_pcpu {
unsigned long refresh_ts;
struct dst_entry *dst;
@@ -36,6 +37,7 @@ struct dst_cache_pcpu {
struct in6_addr in6_saddr;
};
};
+#endif
static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
struct dst_entry *dst, u32 cookie)
diff --git a/drivers/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h b/drivers/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h
new file mode 100644
index 000000000000..995094d4f099
--- /dev/null
+++ b/drivers/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h
@@ -0,0 +1,3 @@
+#ifndef skb_valid_dst
+#define skb_valid_dst(skb) (!!skb_dst(skb))
+#endif
diff --git a/drivers/net/wireguard/compat/siphash/include/linux/siphash.h b/drivers/net/wireguard/compat/siphash/include/linux/siphash.h
index 1e5e337d15bf..3b30b3c47778 100644
--- a/drivers/net/wireguard/compat/siphash/include/linux/siphash.h
+++ b/drivers/net/wireguard/compat/siphash/include/linux/siphash.h
@@ -22,9 +22,7 @@ typedef struct {
} siphash_key_t;
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -77,10 +75,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
-#endif
return ___siphash_aligned(data, len, key);
}
@@ -91,10 +88,8 @@ typedef struct {
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -130,10 +125,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
-#endif
return ___hsiphash_aligned(data, len, key);
}
diff --git a/drivers/net/wireguard/compat/siphash/siphash.c b/drivers/net/wireguard/compat/siphash/siphash.c
index 58855328e6e0..7dc72cb4a710 100644
--- a/drivers/net/wireguard/compat/siphash/siphash.c
+++ b/drivers/net/wireguard/compat/siphash/siphash.c
@@ -57,6 +57,7 @@
SIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -76,19 +77,19 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48;
- case 6: b |= ((u64)end[5]) << 40;
- case 5: b |= ((u64)end[4]) << 32;
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= le32_to_cpup(data); break;
- case 3: b |= ((u64)end[2]) << 16;
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
#endif
POSTAMBLE
}
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -108,18 +109,17 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48;
- case 6: b |= ((u64)end[5]) << 40;
- case 5: b |= ((u64)end[4]) << 32;
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= get_unaligned_le32(end); break;
- case 3: b |= ((u64)end[2]) << 16;
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
#endif
POSTAMBLE
}
-#endif
/**
* siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +250,7 @@ u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
HSIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -268,19 +269,19 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48;
- case 6: b |= ((u64)end[5]) << 40;
- case 5: b |= ((u64)end[4]) << 32;
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= le32_to_cpup(data); break;
- case 3: b |= ((u64)end[2]) << 16;
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
#endif
HPOSTAMBLE
}
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -300,18 +301,17 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48;
- case 6: b |= ((u64)end[5]) << 40;
- case 5: b |= ((u64)end[4]) << 32;
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= get_unaligned_le32(end); break;
- case 3: b |= ((u64)end[2]) << 16;
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
#endif
HPOSTAMBLE
}
-#endif
/**
* hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -412,6 +412,7 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
HSIPROUND; \
return v1 ^ v3;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u32));
@@ -425,14 +426,14 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
v0 ^= m;
}
switch (left) {
- case 3: b |= ((u32)end[2]) << 16;
+ case 3: b |= ((u32)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
HPOSTAMBLE
}
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -447,13 +448,12 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
v0 ^= m;
}
switch (left) {
- case 3: b |= ((u32)end[2]) << 16;
+ case 3: b |= ((u32)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
HPOSTAMBLE
}
-#endif
/**
* hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
diff --git a/drivers/net/wireguard/compat/skb_array/include/linux/skb_array.h b/drivers/net/wireguard/compat/skb_array/include/linux/skb_array.h
new file mode 100644
index 000000000000..c91fedcdbfc6
--- /dev/null
+++ b/drivers/net/wireguard/compat/skb_array/include/linux/skb_array.h
@@ -0,0 +1,11 @@
+#ifndef _WG_SKB_ARRAY_H
+#define _WG_SKB_ARRAY_H
+
+#include <linux/skbuff.h>
+
+static void __skb_array_destroy_skb(void *ptr)
+{
+ kfree_skb(ptr);
+}
+
+#endif
diff --git a/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel.c b/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel.c
index 9b8770ae7b3f..d287b917be84 100644
--- a/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel.c
+++ b/drivers/net/wireguard/compat/udp_tunnel/udp_tunnel.c
@@ -38,9 +38,10 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
struct socket *sock = NULL;
struct sockaddr_in udp_addr;
- err = __sock_create(net, AF_INET, SOCK_DGRAM, 0, &sock, 1);
+ err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
if (err < 0)
goto error;
+ sk_change_net(sock->sk, net);
udp_addr.sin_family = AF_INET;
udp_addr.sin_addr = cfg->local_ip;
@@ -72,7 +73,7 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
error:
if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
- sock_release(sock);
+ sk_release_kernel(sock->sk);
}
*sockp = NULL;
return err;
@@ -229,7 +230,7 @@ void udp_tunnel_sock_release(struct socket *sock)
{
rcu_assign_sk_user_data(sock->sk, NULL);
kernel_sock_shutdown(sock, SHUT_RDWR);
- sock_release(sock);
+ sk_release_kernel(sock->sk);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -254,9 +255,10 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
int err;
struct socket *sock = NULL;
- err = __sock_create(net, AF_INET6, SOCK_DGRAM, 0, &sock, 1);
+ err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
if (err < 0)
goto error;
+ sk_change_net(sock->sk, net);
if (cfg->ipv6_v6only) {
int val = 1;
@@ -301,7 +303,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
error:
if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
- sock_release(sock);
+ sk_release_kernel(sock->sk);
}
*sockp = NULL;
return err;
diff --git a/drivers/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c b/drivers/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c
index 79716c425b0c..8b6872a2f0d0 100644
--- a/drivers/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c
+++ b/drivers/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c
@@ -34,11 +34,11 @@ static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2)
asm volatile(
/* Clear registers to propagate the carry bit */
- " xor %%r8, %%r8;"
- " xor %%r9, %%r9;"
- " xor %%r10, %%r10;"
- " xor %%r11, %%r11;"
- " xor %1, %1;"
+ " xor %%r8d, %%r8d;"
+ " xor %%r9d, %%r9d;"
+ " xor %%r10d, %%r10d;"
+ " xor %%r11d, %%r11d;"
+ " xor %k1, %k1;"
/* Begin addition chain */
" addq 0(%3), %0;"
@@ -52,10 +52,9 @@ static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2)
/* Return the carry bit in a register */
" adcx %%r11, %1;"
- : "+&r" (f2), "=&r" (carry_r)
- : "r" (out), "r" (f1)
- : "%r8", "%r9", "%r10", "%r11", "memory", "cc"
- );
+ : "+&r"(f2), "=&r"(carry_r)
+ : "r"(out), "r"(f1)
+ : "%r8", "%r9", "%r10", "%r11", "memory", "cc");
return carry_r;
}
@@ -82,7 +81,7 @@ static inline void fadd(u64 *out, const u64 *f1, const u64 *f2)
" cmovc %0, %%rax;"
/* Step 2: Add carry*38 to the original sum */
- " xor %%rcx, %%rcx;"
+ " xor %%ecx, %%ecx;"
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
" movq %%r9, 8(%1);"
@@ -96,17 +95,16 @@ static inline void fadd(u64 *out, const u64 *f1, const u64 *f2)
" cmovc %0, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 0(%1);"
- : "+&r" (f2)
- : "r" (out), "r" (f1)
- : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc"
- );
+ : "+&r"(f2)
+ : "r"(out), "r"(f1)
+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc");
}
-/* Computes the field substraction of two field elements */
+/* Computes the field subtraction of two field elements */
static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
{
asm volatile(
- /* Compute the raw substraction of f1-f2 */
+ /* Compute the raw subtraction of f1-f2 */
" movq 0(%1), %%r8;"
" subq 0(%2), %%r8;"
" movq 8(%1), %%r9;"
@@ -123,7 +121,7 @@ static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
" mov $38, %%rcx;"
" cmovc %%rcx, %%rax;"
- /* Step 2: Substract carry*38 from the original difference */
+ /* Step 2: Subtract carry*38 from the original difference */
" sub %%rax, %%r8;"
" sbb $0, %%r9;"
" sbb $0, %%r10;"
@@ -139,10 +137,9 @@ static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
" movq %%r9, 8(%0);"
" movq %%r10, 16(%0);"
" movq %%r11, 24(%0);"
- :
- : "r" (out), "r" (f1), "r" (f2)
- : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc"
- );
+ :
+ : "r"(out), "r"(f1), "r"(f2)
+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc");
}
/* Computes a field multiplication: out <- f1 * f2
@@ -150,239 +147,400 @@ static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
{
asm volatile(
+
/* Compute the raw multiplication: tmp <- src1 * src2 */
/* Compute src1[0] * src2 */
- " movq 0(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);"
- " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
- " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;"
+ " movq 0(%0), %%rdx;"
+ " mulxq 0(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " movq %%r8, 0(%2);"
+ " mulxq 8(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " movq %%r10, 8(%2);"
+ " mulxq 16(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " mulxq 24(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+
/* Compute src1[1] * src2 */
- " movq 8(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);"
- " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
- " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ " movq 8(%0), %%rdx;"
+ " mulxq 0(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 8(%2), %%r8;"
+ " movq %%r8, 8(%2);"
+ " mulxq 8(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 16(%2);"
+ " mulxq 16(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " mov $0, %%r8;"
+ " mulxq 24(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+
/* Compute src1[2] * src2 */
- " movq 16(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);"
- " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
- " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ " movq 16(%0), %%rdx;"
+ " mulxq 0(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 16(%2), %%r8;"
+ " movq %%r8, 16(%2);"
+ " mulxq 8(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 24(%2);"
+ " mulxq 16(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " mov $0, %%r8;"
+ " mulxq 24(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+
/* Compute src1[3] * src2 */
- " movq 24(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);"
- " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;"
- " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);"
+ " movq 24(%0), %%rdx;"
+ " mulxq 0(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 24(%2), %%r8;"
+ " movq %%r8, 24(%2);"
+ " mulxq 8(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 32(%2);"
+ " mulxq 16(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " movq %%rbx, 40(%2);"
+ " mov $0, %%r8;"
+ " mulxq 24(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " movq %%r14, 48(%2);"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+ " movq %%rax, 56(%2);"
+
/* Line up pointers */
- " mov %0, %1;"
" mov %2, %0;"
+ " mov %3, %2;"
/* Wrap the result back into the field */
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
- " mulxq 32(%1), %%r8, %%r13;"
- " xor %3, %3;"
- " adoxq 0(%1), %%r8;"
- " mulxq 40(%1), %%r9, %%rbx;"
+ " mulxq 32(%0), %%r8, %%r13;"
+ " xor %k1, %k1;"
+ " adoxq 0(%0), %%r8;"
+ " mulxq 40(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
- " adoxq 8(%1), %%r9;"
- " mulxq 48(%1), %%r10, %%r13;"
+ " adoxq 8(%0), %%r9;"
+ " mulxq 48(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
- " adoxq 16(%1), %%r10;"
- " mulxq 56(%1), %%r11, %%rax;"
+ " adoxq 16(%0), %%r10;"
+ " mulxq 56(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
- " adoxq 24(%1), %%r11;"
- " adcx %3, %%rax;"
- " adox %3, %%rax;"
+ " adoxq 24(%0), %%r11;"
+ " adcx %1, %%rax;"
+ " adox %1, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
- " adcx %3, %%r9;"
- " movq %%r9, 8(%0);"
- " adcx %3, %%r10;"
- " movq %%r10, 16(%0);"
- " adcx %3, %%r11;"
- " movq %%r11, 24(%0);"
+ " adcx %1, %%r9;"
+ " movq %%r9, 8(%2);"
+ " adcx %1, %%r10;"
+ " movq %%r10, 16(%2);"
+ " adcx %1, %%r11;"
+ " movq %%r11, 24(%2);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
- " movq %%r8, 0(%0);"
- : "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2)
- :
- : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "memory", "cc"
- );
+ " movq %%r8, 0(%2);"
+ : "+&r"(f1), "+&r"(f2), "+&r"(tmp)
+ : "r"(out)
+ : "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13",
+ "%r14", "memory", "cc");
}
/* Computes two field multiplications:
- * out[0] <- f1[0] * f2[0]
- * out[1] <- f1[1] * f2[1]
- * Uses the 16-element buffer tmp for intermediate results. */
+ * out[0] <- f1[0] * f2[0]
+ * out[1] <- f1[1] * f2[1]
+ * Uses the 16-element buffer tmp for intermediate results: */
static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
{
asm volatile(
+
/* Compute the raw multiplication tmp[0] <- f1[0] * f2[0] */
/* Compute src1[0] * src2 */
- " movq 0(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);"
- " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
- " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;"
+ " movq 0(%0), %%rdx;"
+ " mulxq 0(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " movq %%r8, 0(%2);"
+ " mulxq 8(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " movq %%r10, 8(%2);"
+ " mulxq 16(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " mulxq 24(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+
/* Compute src1[1] * src2 */
- " movq 8(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);"
- " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
- " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ " movq 8(%0), %%rdx;"
+ " mulxq 0(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 8(%2), %%r8;"
+ " movq %%r8, 8(%2);"
+ " mulxq 8(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 16(%2);"
+ " mulxq 16(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " mov $0, %%r8;"
+ " mulxq 24(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+
/* Compute src1[2] * src2 */
- " movq 16(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);"
- " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
- " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ " movq 16(%0), %%rdx;"
+ " mulxq 0(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 16(%2), %%r8;"
+ " movq %%r8, 16(%2);"
+ " mulxq 8(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 24(%2);"
+ " mulxq 16(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " mov $0, %%r8;"
+ " mulxq 24(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+
/* Compute src1[3] * src2 */
- " movq 24(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);"
- " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;"
- " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);"
+ " movq 24(%0), %%rdx;"
+ " mulxq 0(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 24(%2), %%r8;"
+ " movq %%r8, 24(%2);"
+ " mulxq 8(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 32(%2);"
+ " mulxq 16(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " movq %%rbx, 40(%2);"
+ " mov $0, %%r8;"
+ " mulxq 24(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " movq %%r14, 48(%2);"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+ " movq %%rax, 56(%2);"
/* Compute the raw multiplication tmp[1] <- f1[1] * f2[1] */
/* Compute src1[0] * src2 */
- " movq 32(%1), %%rdx;"
- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 64(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);"
- " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
- " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;"
+ " movq 32(%0), %%rdx;"
+ " mulxq 32(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " movq %%r8, 64(%2);"
+ " mulxq 40(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " movq %%r10, 72(%2);"
+ " mulxq 48(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " mulxq 56(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+
/* Compute src1[1] * src2 */
- " movq 40(%1), %%rdx;"
- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);"
- " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
- " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ " movq 40(%0), %%rdx;"
+ " mulxq 32(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 72(%2), %%r8;"
+ " movq %%r8, 72(%2);"
+ " mulxq 40(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 80(%2);"
+ " mulxq 48(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " mov $0, %%r8;"
+ " mulxq 56(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+
/* Compute src1[2] * src2 */
- " movq 48(%1), %%rdx;"
- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);"
- " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
- " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ " movq 48(%0), %%rdx;"
+ " mulxq 32(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 80(%2), %%r8;"
+ " movq %%r8, 80(%2);"
+ " mulxq 40(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 88(%2);"
+ " mulxq 48(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " mov $0, %%r8;"
+ " mulxq 56(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+
/* Compute src1[3] * src2 */
- " movq 56(%1), %%rdx;"
- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);"
- " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 104(%0);" " mov $0, %%r8;"
- " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%0);" " mov $0, %%rax;"
- " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%0);"
+ " movq 56(%0), %%rdx;"
+ " mulxq 32(%1), %%r8, %%r9;"
+ " xor %%r10d, %%r10d;"
+ " adcxq 88(%2), %%r8;"
+ " movq %%r8, 88(%2);"
+ " mulxq 40(%1), %%r10, %%r11;"
+ " adox %%r9, %%r10;"
+ " adcx %%rbx, %%r10;"
+ " movq %%r10, 96(%2);"
+ " mulxq 48(%1), %%rbx, %%r13;"
+ " adox %%r11, %%rbx;"
+ " adcx %%r14, %%rbx;"
+ " movq %%rbx, 104(%2);"
+ " mov $0, %%r8;"
+ " mulxq 56(%1), %%r14, %%rdx;"
+ " adox %%r13, %%r14;"
+ " adcx %%rax, %%r14;"
+ " movq %%r14, 112(%2);"
+ " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ " adcx %%r8, %%rax;"
+ " movq %%rax, 120(%2);"
+
/* Line up pointers */
- " mov %0, %1;"
" mov %2, %0;"
+ " mov %3, %2;"
/* Wrap the results back into the field */
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
- " mulxq 32(%1), %%r8, %%r13;"
- " xor %3, %3;"
- " adoxq 0(%1), %%r8;"
- " mulxq 40(%1), %%r9, %%rbx;"
+ " mulxq 32(%0), %%r8, %%r13;"
+ " xor %k1, %k1;"
+ " adoxq 0(%0), %%r8;"
+ " mulxq 40(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
- " adoxq 8(%1), %%r9;"
- " mulxq 48(%1), %%r10, %%r13;"
+ " adoxq 8(%0), %%r9;"
+ " mulxq 48(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
- " adoxq 16(%1), %%r10;"
- " mulxq 56(%1), %%r11, %%rax;"
+ " adoxq 16(%0), %%r10;"
+ " mulxq 56(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
- " adoxq 24(%1), %%r11;"
- " adcx %3, %%rax;"
- " adox %3, %%rax;"
+ " adoxq 24(%0), %%r11;"
+ " adcx %1, %%rax;"
+ " adox %1, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
- " adcx %3, %%r9;"
- " movq %%r9, 8(%0);"
- " adcx %3, %%r10;"
- " movq %%r10, 16(%0);"
- " adcx %3, %%r11;"
- " movq %%r11, 24(%0);"
+ " adcx %1, %%r9;"
+ " movq %%r9, 8(%2);"
+ " adcx %1, %%r10;"
+ " movq %%r10, 16(%2);"
+ " adcx %1, %%r11;"
+ " movq %%r11, 24(%2);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
- " movq %%r8, 0(%0);"
+ " movq %%r8, 0(%2);"
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
- " mulxq 96(%1), %%r8, %%r13;"
- " xor %3, %3;"
- " adoxq 64(%1), %%r8;"
- " mulxq 104(%1), %%r9, %%rbx;"
+ " mulxq 96(%0), %%r8, %%r13;"
+ " xor %k1, %k1;"
+ " adoxq 64(%0), %%r8;"
+ " mulxq 104(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
- " adoxq 72(%1), %%r9;"
- " mulxq 112(%1), %%r10, %%r13;"
+ " adoxq 72(%0), %%r9;"
+ " mulxq 112(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
- " adoxq 80(%1), %%r10;"
- " mulxq 120(%1), %%r11, %%rax;"
+ " adoxq 80(%0), %%r10;"
+ " mulxq 120(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
- " adoxq 88(%1), %%r11;"
- " adcx %3, %%rax;"
- " adox %3, %%rax;"
+ " adoxq 88(%0), %%r11;"
+ " adcx %1, %%rax;"
+ " adox %1, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
- " adcx %3, %%r9;"
- " movq %%r9, 40(%0);"
- " adcx %3, %%r10;"
- " movq %%r10, 48(%0);"
- " adcx %3, %%r11;"
- " movq %%r11, 56(%0);"
+ " adcx %1, %%r9;"
+ " movq %%r9, 40(%2);"
+ " adcx %1, %%r10;"
+ " movq %%r10, 48(%2);"
+ " adcx %1, %%r11;"
+ " movq %%r11, 56(%2);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
- " movq %%r8, 32(%0);"
- : "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2)
- :
- : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "memory", "cc"
- );
+ " movq %%r8, 32(%2);"
+ : "+&r"(f1), "+&r"(f2), "+&r"(tmp)
+ : "r"(out)
+ : "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13",
+ "%r14", "memory", "cc");
}
-/* Computes the field multiplication of four-element f1 with value in f2 */
+/* Computes the field multiplication of four-element f1 with value in f2
+ * Requires f2 to be smaller than 2^17 */
static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2)
{
register u64 f2_r asm("rdx") = f2;
asm volatile(
/* Compute the raw multiplication of f1*f2 */
- " mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */
- " mulxq 8(%2), %%r9, %%rbx;" /* f1[1]*f2 */
+ " mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */
+ " mulxq 8(%2), %%r9, %%rbx;" /* f1[1]*f2 */
" add %%rcx, %%r9;"
" mov $0, %%rcx;"
- " mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */
+ " mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */
" adcx %%rbx, %%r10;"
- " mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */
+ " mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */
" adcx %%r13, %%r11;"
" adcx %%rcx, %%rax;"
@@ -406,17 +564,17 @@ static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2)
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 0(%1);"
- : "+&r" (f2_r)
- : "r" (out), "r" (f1)
- : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "memory", "cc"
- );
+ : "+&r"(f2_r)
+ : "r"(out), "r"(f1)
+ : "%rax", "%rbx", "%rcx", "%r8", "%r9", "%r10", "%r11", "%r13",
+ "memory", "cc");
}
/* Computes p1 <- bit ? p2 : p1 in constant time */
static inline void cswap2(u64 bit, const u64 *p1, const u64 *p2)
{
asm volatile(
- /* Invert the polarity of bit to match cmov expectations */
+ /* Transfer bit into CF flag */
" add $18446744073709551615, %0;"
/* cswap p1[0], p2[0] */
@@ -490,10 +648,9 @@ static inline void cswap2(u64 bit, const u64 *p1, const u64 *p2)
" cmovc %%r10, %%r9;"
" movq %%r8, 56(%1);"
" movq %%r9, 56(%2);"
- : "+&r" (bit)
- : "r" (p1), "r" (p2)
- : "%r8", "%r9", "%r10", "memory", "cc"
- );
+ : "+&r"(bit)
+ : "r"(p1), "r"(p2)
+ : "%r8", "%r9", "%r10", "memory", "cc");
}
/* Computes the square of a field element: out <- f * f
@@ -504,18 +661,25 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
/* Compute the raw multiplication: tmp <- f * f */
/* Step 1: Compute all partial products */
- " movq 0(%1), %%rdx;" /* f[0] */
- " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
- " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
- " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
- " movq 24(%1), %%rdx;" /* f[3] */
- " mulxq 8(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
- " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
- " movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
- " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
+ " movq 0(%0), %%rdx;" /* f[0] */
+ " mulxq 8(%0), %%r8, %%r14;"
+ " xor %%r15d, %%r15d;" /* f[1]*f[0] */
+ " mulxq 16(%0), %%r9, %%r10;"
+ " adcx %%r14, %%r9;" /* f[2]*f[0] */
+ " mulxq 24(%0), %%rax, %%rcx;"
+ " adcx %%rax, %%r10;" /* f[3]*f[0] */
+ " movq 24(%0), %%rdx;" /* f[3] */
+ " mulxq 8(%0), %%r11, %%rbx;"
+ " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 16(%0), %%rax, %%r13;"
+ " adcx %%rax, %%rbx;" /* f[2]*f[3] */
+ " movq 8(%0), %%rdx;"
+ " adcx %%r15, %%r13;" /* f1 */
+ " mulxq 16(%0), %%rax, %%rcx;"
+ " mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
- " xor %%r15, %%r15;"
+ " xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
@@ -530,39 +694,50 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
" adcx %%r14, %%r14;"
/* Step 3: Compute intermediate squares */
- " movq 0(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
- " movq %%rax, 0(%0);"
- " add %%rcx, %%r8;" " movq %%r8, 8(%0);"
- " movq 8(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
- " adcx %%rax, %%r9;" " movq %%r9, 16(%0);"
- " adcx %%rcx, %%r10;" " movq %%r10, 24(%0);"
- " movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
- " adcx %%rax, %%r11;" " movq %%r11, 32(%0);"
- " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%0);"
- " movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
- " adcx %%rax, %%r13;" " movq %%r13, 48(%0);"
- " adcx %%rcx, %%r14;" " movq %%r14, 56(%0);"
+ " movq 0(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
+ " movq %%rax, 0(%1);"
+ " add %%rcx, %%r8;"
+ " movq %%r8, 8(%1);"
+ " movq 8(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
+ " adcx %%rax, %%r9;"
+ " movq %%r9, 16(%1);"
+ " adcx %%rcx, %%r10;"
+ " movq %%r10, 24(%1);"
+ " movq 16(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
+ " adcx %%rax, %%r11;"
+ " movq %%r11, 32(%1);"
+ " adcx %%rcx, %%rbx;"
+ " movq %%rbx, 40(%1);"
+ " movq 24(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
+ " adcx %%rax, %%r13;"
+ " movq %%r13, 48(%1);"
+ " adcx %%rcx, %%r14;"
+ " movq %%r14, 56(%1);"
/* Line up pointers */
- " mov %0, %1;"
- " mov %2, %0;"
+ " mov %1, %0;"
+ " mov %2, %1;"
/* Wrap the result back into the field */
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
- " mulxq 32(%1), %%r8, %%r13;"
- " xor %%rcx, %%rcx;"
- " adoxq 0(%1), %%r8;"
- " mulxq 40(%1), %%r9, %%rbx;"
+ " mulxq 32(%0), %%r8, %%r13;"
+ " xor %%ecx, %%ecx;"
+ " adoxq 0(%0), %%r8;"
+ " mulxq 40(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
- " adoxq 8(%1), %%r9;"
- " mulxq 48(%1), %%r10, %%r13;"
+ " adoxq 8(%0), %%r9;"
+ " mulxq 48(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
- " adoxq 16(%1), %%r10;"
- " mulxq 56(%1), %%r11, %%rax;"
+ " adoxq 16(%0), %%r10;"
+ " mulxq 56(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
- " adoxq 24(%1), %%r11;"
+ " adoxq 24(%0), %%r11;"
" adcx %%rcx, %%rax;"
" adox %%rcx, %%rax;"
" imul %%rdx, %%rax;"
@@ -570,43 +745,50 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
- " movq %%r9, 8(%0);"
+ " movq %%r9, 8(%1);"
" adcx %%rcx, %%r10;"
- " movq %%r10, 16(%0);"
+ " movq %%r10, 16(%1);"
" adcx %%rcx, %%r11;"
- " movq %%r11, 24(%0);"
+ " movq %%r11, 24(%1);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
- " movq %%r8, 0(%0);"
- : "+&r" (tmp), "+&r" (f), "+&r" (out)
- :
- : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc"
- );
+ " movq %%r8, 0(%1);"
+ : "+&r,&r"(f), "+&r,&r"(tmp)
+ : "r,m"(out)
+ : "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11",
+ "%r13", "%r14", "%r15", "memory", "cc");
}
/* Computes two field squarings:
- * out[0] <- f[0] * f[0]
- * out[1] <- f[1] * f[1]
+ * out[0] <- f[0] * f[0]
+ * out[1] <- f[1] * f[1]
* Uses the 16-element buffer tmp for intermediate results */
static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
{
asm volatile(
/* Step 1: Compute all partial products */
- " movq 0(%1), %%rdx;" /* f[0] */
- " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
- " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
- " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
- " movq 24(%1), %%rdx;" /* f[3] */
- " mulxq 8(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
- " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
- " movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
- " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
+ " movq 0(%0), %%rdx;" /* f[0] */
+ " mulxq 8(%0), %%r8, %%r14;"
+ " xor %%r15d, %%r15d;" /* f[1]*f[0] */
+ " mulxq 16(%0), %%r9, %%r10;"
+ " adcx %%r14, %%r9;" /* f[2]*f[0] */
+ " mulxq 24(%0), %%rax, %%rcx;"
+ " adcx %%rax, %%r10;" /* f[3]*f[0] */
+ " movq 24(%0), %%rdx;" /* f[3] */
+ " mulxq 8(%0), %%r11, %%rbx;"
+ " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 16(%0), %%rax, %%r13;"
+ " adcx %%rax, %%rbx;" /* f[2]*f[3] */
+ " movq 8(%0), %%rdx;"
+ " adcx %%r15, %%r13;" /* f1 */
+ " mulxq 16(%0), %%rax, %%rcx;"
+ " mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
- " xor %%r15, %%r15;"
+ " xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
@@ -621,32 +803,50 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" adcx %%r14, %%r14;"
/* Step 3: Compute intermediate squares */
- " movq 0(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
- " movq %%rax, 0(%0);"
- " add %%rcx, %%r8;" " movq %%r8, 8(%0);"
- " movq 8(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
- " adcx %%rax, %%r9;" " movq %%r9, 16(%0);"
- " adcx %%rcx, %%r10;" " movq %%r10, 24(%0);"
- " movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
- " adcx %%rax, %%r11;" " movq %%r11, 32(%0);"
- " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%0);"
- " movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
- " adcx %%rax, %%r13;" " movq %%r13, 48(%0);"
- " adcx %%rcx, %%r14;" " movq %%r14, 56(%0);"
+ " movq 0(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
+ " movq %%rax, 0(%1);"
+ " add %%rcx, %%r8;"
+ " movq %%r8, 8(%1);"
+ " movq 8(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
+ " adcx %%rax, %%r9;"
+ " movq %%r9, 16(%1);"
+ " adcx %%rcx, %%r10;"
+ " movq %%r10, 24(%1);"
+ " movq 16(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
+ " adcx %%rax, %%r11;"
+ " movq %%r11, 32(%1);"
+ " adcx %%rcx, %%rbx;"
+ " movq %%rbx, 40(%1);"
+ " movq 24(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
+ " adcx %%rax, %%r13;"
+ " movq %%r13, 48(%1);"
+ " adcx %%rcx, %%r14;"
+ " movq %%r14, 56(%1);"
/* Step 1: Compute all partial products */
- " movq 32(%1), %%rdx;" /* f[0] */
- " mulxq 40(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
- " mulxq 48(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
- " mulxq 56(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
- " movq 56(%1), %%rdx;" /* f[3] */
- " mulxq 40(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
- " mulxq 48(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
- " movq 40(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
- " mulxq 48(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
+ " movq 32(%0), %%rdx;" /* f[0] */
+ " mulxq 40(%0), %%r8, %%r14;"
+ " xor %%r15d, %%r15d;" /* f[1]*f[0] */
+ " mulxq 48(%0), %%r9, %%r10;"
+ " adcx %%r14, %%r9;" /* f[2]*f[0] */
+ " mulxq 56(%0), %%rax, %%rcx;"
+ " adcx %%rax, %%r10;" /* f[3]*f[0] */
+ " movq 56(%0), %%rdx;" /* f[3] */
+ " mulxq 40(%0), %%r11, %%rbx;"
+ " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 48(%0), %%rax, %%r13;"
+ " adcx %%rax, %%rbx;" /* f[2]*f[3] */
+ " movq 40(%0), %%rdx;"
+ " adcx %%r15, %%r13;" /* f1 */
+ " mulxq 48(%0), %%rax, %%rcx;"
+ " mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
- " xor %%r15, %%r15;"
+ " xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
@@ -661,37 +861,48 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" adcx %%r14, %%r14;"
/* Step 3: Compute intermediate squares */
- " movq 32(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
- " movq %%rax, 64(%0);"
- " add %%rcx, %%r8;" " movq %%r8, 72(%0);"
- " movq 40(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
- " adcx %%rax, %%r9;" " movq %%r9, 80(%0);"
- " adcx %%rcx, %%r10;" " movq %%r10, 88(%0);"
- " movq 48(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
- " adcx %%rax, %%r11;" " movq %%r11, 96(%0);"
- " adcx %%rcx, %%rbx;" " movq %%rbx, 104(%0);"
- " movq 56(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
- " adcx %%rax, %%r13;" " movq %%r13, 112(%0);"
- " adcx %%rcx, %%r14;" " movq %%r14, 120(%0);"
+ " movq 32(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
+ " movq %%rax, 64(%1);"
+ " add %%rcx, %%r8;"
+ " movq %%r8, 72(%1);"
+ " movq 40(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
+ " adcx %%rax, %%r9;"
+ " movq %%r9, 80(%1);"
+ " adcx %%rcx, %%r10;"
+ " movq %%r10, 88(%1);"
+ " movq 48(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
+ " adcx %%rax, %%r11;"
+ " movq %%r11, 96(%1);"
+ " adcx %%rcx, %%rbx;"
+ " movq %%rbx, 104(%1);"
+ " movq 56(%0), %%rdx;"
+ " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
+ " adcx %%rax, %%r13;"
+ " movq %%r13, 112(%1);"
+ " adcx %%rcx, %%r14;"
+ " movq %%r14, 120(%1);"
/* Line up pointers */
- " mov %0, %1;"
- " mov %2, %0;"
+ " mov %1, %0;"
+ " mov %2, %1;"
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
- " mulxq 32(%1), %%r8, %%r13;"
- " xor %%rcx, %%rcx;"
- " adoxq 0(%1), %%r8;"
- " mulxq 40(%1), %%r9, %%rbx;"
+ " mulxq 32(%0), %%r8, %%r13;"
+ " xor %%ecx, %%ecx;"
+ " adoxq 0(%0), %%r8;"
+ " mulxq 40(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
- " adoxq 8(%1), %%r9;"
- " mulxq 48(%1), %%r10, %%r13;"
+ " adoxq 8(%0), %%r9;"
+ " mulxq 48(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
- " adoxq 16(%1), %%r10;"
- " mulxq 56(%1), %%r11, %%rax;"
+ " adoxq 16(%0), %%r10;"
+ " mulxq 56(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
- " adoxq 24(%1), %%r11;"
+ " adoxq 24(%0), %%r11;"
" adcx %%rcx, %%rax;"
" adox %%rcx, %%rax;"
" imul %%rdx, %%rax;"
@@ -699,32 +910,32 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
- " movq %%r9, 8(%0);"
+ " movq %%r9, 8(%1);"
" adcx %%rcx, %%r10;"
- " movq %%r10, 16(%0);"
+ " movq %%r10, 16(%1);"
" adcx %%rcx, %%r11;"
- " movq %%r11, 24(%0);"
+ " movq %%r11, 24(%1);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
- " movq %%r8, 0(%0);"
+ " movq %%r8, 0(%1);"
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
- " mulxq 96(%1), %%r8, %%r13;"
- " xor %%rcx, %%rcx;"
- " adoxq 64(%1), %%r8;"
- " mulxq 104(%1), %%r9, %%rbx;"
+ " mulxq 96(%0), %%r8, %%r13;"
+ " xor %%ecx, %%ecx;"
+ " adoxq 64(%0), %%r8;"
+ " mulxq 104(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
- " adoxq 72(%1), %%r9;"
- " mulxq 112(%1), %%r10, %%r13;"
+ " adoxq 72(%0), %%r9;"
+ " mulxq 112(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
- " adoxq 80(%1), %%r10;"
- " mulxq 120(%1), %%r11, %%rax;"
+ " adoxq 80(%0), %%r10;"
+ " mulxq 120(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
- " adoxq 88(%1), %%r11;"
+ " adoxq 88(%0), %%r11;"
" adcx %%rcx, %%rax;"
" adox %%rcx, %%rax;"
" imul %%rdx, %%rax;"
@@ -732,21 +943,21 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
- " movq %%r9, 40(%0);"
+ " movq %%r9, 40(%1);"
" adcx %%rcx, %%r10;"
- " movq %%r10, 48(%0);"
+ " movq %%r10, 48(%1);"
" adcx %%rcx, %%r11;"
- " movq %%r11, 56(%0);"
+ " movq %%r11, 56(%1);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
- " movq %%r8, 32(%0);"
- : "+&r" (tmp), "+&r" (f), "+&r" (out)
- :
- : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc"
- );
+ " movq %%r8, 32(%1);"
+ : "+&r,&r"(f), "+&r,&r"(tmp)
+ : "r,m"(out)
+ : "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11",
+ "%r13", "%r14", "%r15", "memory", "cc");
}
static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2)
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index b8c2390b0a35..062490f1b8a7 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -19,6 +19,7 @@
#include <linux/if_arp.h>
#include <linux/icmp.h>
#include <linux/suspend.h>
+#include <net/dst_metadata.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
#include <net/ip_tunnels.h>
@@ -106,6 +107,7 @@ static int wg_stop(struct net_device *dev)
{
struct wg_device *wg = netdev_priv(dev);
struct wg_peer *peer;
+ struct sk_buff *skb;
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list) {
@@ -116,7 +118,9 @@ static int wg_stop(struct net_device *dev)
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
}
mutex_unlock(&wg->device_update_lock);
- skb_queue_purge(&wg->incoming_handshakes);
+ while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
+ kfree_skb(skb);
+ atomic_set(&wg->handshake_queue_len, 0);
wg_socket_reinit(wg, NULL, NULL);
return 0;
}
@@ -157,7 +161,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_peer;
}
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+ mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
__skb_queue_head_init(&packets);
if (!skb_is_gso(skb)) {
@@ -243,14 +247,13 @@ static void wg_destruct(struct net_device *dev)
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
- wg_packet_queue_free(&wg->decrypt_queue);
- wg_packet_queue_free(&wg->encrypt_queue);
+ wg_packet_queue_free(&wg->handshake_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue, false);
+ wg_packet_queue_free(&wg->encrypt_queue, false);
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
- skb_queue_purge(&wg->incoming_handshakes);
free_percpu(dev->tstats);
- free_percpu(wg->incoming_handshakes_worker);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
@@ -312,7 +315,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
- skb_queue_head_init(&wg->incoming_handshakes);
wg_allowedips_init(&wg->peer_allowedips);
wg_cookie_checker_init(&wg->cookie_checker, wg);
INIT_LIST_HEAD(&wg->peer_list);
@@ -330,16 +332,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (!dev->tstats)
goto err_free_index_hashtable;
- wg->incoming_handshakes_worker =
- wg_packet_percpu_multicore_worker_alloc(
- wg_packet_handshake_receive_worker, wg);
- if (!wg->incoming_handshakes_worker)
- goto err_free_tstats;
-
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_receive_wq)
- goto err_free_incoming_handshakes;
+ goto err_free_tstats;
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -361,10 +357,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (ret < 0)
goto err_free_encrypt_queue;
- ret = wg_ratelimiter_init();
+ ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
+ MAX_QUEUED_INCOMING_HANDSHAKES);
if (ret < 0)
goto err_free_decrypt_queue;
+ ret = wg_ratelimiter_init();
+ if (ret < 0)
+ goto err_free_handshake_queue;
+
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
@@ -381,18 +382,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
err_uninit_ratelimiter:
wg_ratelimiter_uninit();
+err_free_handshake_queue:
+ wg_packet_queue_free(&wg->handshake_queue, false);
err_free_decrypt_queue:
- wg_packet_queue_free(&wg->decrypt_queue);
+ wg_packet_queue_free(&wg->decrypt_queue, false);
err_free_encrypt_queue:
- wg_packet_queue_free(&wg->encrypt_queue);
+ wg_packet_queue_free(&wg->encrypt_queue, false);
err_destroy_packet_crypt:
destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
destroy_workqueue(wg->handshake_send_wq);
err_destroy_handshake_receive:
destroy_workqueue(wg->handshake_receive_wq);
-err_free_incoming_handshakes:
- free_percpu(wg->incoming_handshakes_worker);
err_free_tstats:
free_percpu(dev->tstats);
err_free_index_hashtable:
@@ -412,6 +413,7 @@ static struct rtnl_link_ops link_ops __read_mostly = {
static void wg_netns_pre_exit(struct net *net)
{
struct wg_device *wg;
+ struct wg_peer *peer;
rtnl_lock();
list_for_each_entry(wg, &device_list, device_list) {
@@ -421,6 +423,8 @@ static void wg_netns_pre_exit(struct net *net)
mutex_lock(&wg->device_update_lock);
rcu_assign_pointer(wg->creating_net, NULL);
wg_socket_reinit(wg, NULL, NULL);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
mutex_unlock(&wg->device_update_lock);
}
}
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
index 854bc3d97150..43c7cebbf50b 100644
--- a/drivers/net/wireguard/device.h
+++ b/drivers/net/wireguard/device.h
@@ -39,21 +39,18 @@ struct prev_queue {
struct wg_device {
struct net_device *dev;
- struct crypt_queue encrypt_queue, decrypt_queue;
+ struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue;
struct sock __rcu *sock4, *sock6;
struct net __rcu *creating_net;
struct noise_static_identity static_identity;
- struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
- struct workqueue_struct *packet_crypt_wq;
- struct sk_buff_head incoming_handshakes;
- int incoming_handshake_cpu;
- struct multicore_worker __percpu *incoming_handshakes_worker;
+ struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq;
struct cookie_checker cookie_checker;
struct pubkey_hashtable *peer_hashtable;
struct index_hashtable *index_hashtable;
struct allowedips peer_allowedips;
struct mutex device_update_lock, socket_update_lock;
struct list_head device_list, peer_list;
+ atomic_t handshake_queue_len;
unsigned int num_peers, device_update_gen;
u32 fwmark;
u16 incoming_port;
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index 9b8bbe27999e..a6714ce13a65 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -18,7 +18,7 @@
#include <linux/genetlink.h>
#include <net/rtnetlink.h>
-static int __init mod_init(void)
+static int __init wg_mod_init(void)
{
int ret;
@@ -66,7 +66,7 @@ err_allowedips:
return ret;
}
-static void __exit mod_exit(void)
+static void __exit wg_mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
@@ -74,8 +74,8 @@ static void __exit mod_exit(void)
wg_allowedips_slab_uninit();
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(wg_mod_init);
+module_exit(wg_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("WireGuard secure network tunnel");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 48e7b982a307..8084e7408c0a 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -4,6 +4,7 @@
*/
#include "queueing.h"
+#include <linux/skb_array.h>
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
@@ -38,11 +39,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
return 0;
}
-void wg_packet_queue_free(struct crypt_queue *queue)
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
{
free_percpu(queue->worker);
- WARN_ON(!__ptr_ring_empty(&queue->ring));
- ptr_ring_cleanup(&queue->ring, NULL);
+ WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
+ ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index b6ccf650c738..03850c43ebaf 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -23,7 +23,7 @@ struct sk_buff;
/* queueing.c APIs: */
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue);
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c
index e33ec72a9642..ecee41f528a5 100644
--- a/drivers/net/wireguard/ratelimiter.c
+++ b/drivers/net/wireguard/ratelimiter.c
@@ -188,12 +188,12 @@ int wg_ratelimiter_init(void)
(1U << 14) / sizeof(struct hlist_head)));
max_entries = table_size * 8;
- table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+ table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
if (unlikely(!table_v4))
goto err_kmemcache;
#if IS_ENABLED(CONFIG_IPV6)
- table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+ table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
if (unlikely(!table_v6)) {
kvfree(table_v4);
goto err_kmemcache;
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 07147ff0522d..214889edb48e 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -117,8 +117,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
return;
}
- under_load = skb_queue_len(&wg->incoming_handshakes) >=
- MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+ under_load = atomic_read(&wg->handshake_queue_len) >=
+ MAX_QUEUED_INCOMING_HANDSHAKES / 8;
if (under_load) {
last_under_load = ktime_get_coarse_boottime_ns();
} else if (last_under_load) {
@@ -213,13 +213,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
void wg_packet_handshake_receive_worker(struct work_struct *work)
{
- struct wg_device *wg = container_of(work, struct multicore_worker,
- work)->ptr;
+ struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
+ struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
struct sk_buff *skb;
- while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
wg_receive_handshake_packet(wg, skb);
dev_kfree_skb(skb);
+ atomic_dec(&wg->handshake_queue_len);
cond_resched();
}
}
@@ -562,22 +563,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
- int cpu;
-
- if (skb_queue_len(&wg->incoming_handshakes) >
- MAX_QUEUED_INCOMING_HANDSHAKES ||
- unlikely(!rng_is_initialized())) {
+ int cpu, ret = -EBUSY;
+
+ if (unlikely(!rng_is_initialized()))
+ goto drop;
+ if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
+ if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
+ ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
+ spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
+ }
+ } else
+ ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
+ if (ret) {
+ drop:
net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
wg->dev->name, skb);
goto err;
}
- skb_queue_tail(&wg->incoming_handshakes, skb);
- /* Queues up a call to packet_process_queued_handshake_
- * packets(skb):
- */
- cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+ atomic_inc(&wg->handshake_queue_len);
+ cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
+ /* Queues up a call to packet_process_queued_handshake_packets(skb): */
queue_work_on(cpu, wg->handshake_receive_wq,
- &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+ &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
break;
}
case cpu_to_le32(MESSAGE_DATA):
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 04739763e303..0414d7a6ce74 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -49,7 +49,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
rt = dst_cache_get_ip4(cache, &fl.saddr);
if (!rt) {
- security_sk_classify_flow(sock, flowi4_to_flowi(&fl));
+ security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl));
if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
fl.saddr, RT_SCOPE_HOST))) {
endpoint->src4.s_addr = 0;
@@ -129,7 +129,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
dst = dst_cache_get_ip6(cache, &fl.saddr);
if (!dst) {
- security_sk_classify_flow(sock, flowi6_to_flowi(&fl));
+ security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl));
if (unlikely(!ipv6_addr_any(&fl.saddr) &&
!ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
endpoint->src6 = fl.saddr = in6addr_any;
@@ -160,6 +160,7 @@ out:
rcu_read_unlock_bh();
return ret;
#else
+ kfree_skb(skb);
return -EAFNOSUPPORT;
#endif
}
@@ -241,7 +242,7 @@ int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
endpoint->src4.s_addr = ip_hdr(skb)->daddr;
endpoint->src_if4 = skb->skb_iif;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
endpoint->addr6.sin6_family = AF_INET6;
endpoint->addr6.sin6_port = udp_hdr(skb)->source;
endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
@@ -284,7 +285,7 @@ void wg_socket_set_peer_endpoint(struct wg_peer *peer,
peer->endpoint.addr4 = endpoint->addr4;
peer->endpoint.src4 = endpoint->src4;
peer->endpoint.src_if4 = endpoint->src_if4;
- } else if (endpoint->addr.sa_family == AF_INET6) {
+ } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) {
peer->endpoint.addr6 = endpoint->addr6;
peer->endpoint.src6 = endpoint->src6;
} else {
@@ -308,7 +309,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
{
write_lock_bh(&peer->endpoint_lock);
memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
- dst_cache_reset(&peer->endpoint_cache);
+ dst_cache_reset_now(&peer->endpoint_cache);
write_unlock_bh(&peer->endpoint_lock);
}
diff --git a/drivers/net/wireguard/version.h b/drivers/net/wireguard/version.h
index 35ef576765c9..c7f9028f0177 100644
--- a/drivers/net/wireguard/version.h
+++ b/drivers/net/wireguard/version.h
@@ -1 +1,3 @@
-#define WIREGUARD_VERSION "1.0.20210606"
+#ifndef WIREGUARD_VERSION
+#define WIREGUARD_VERSION "1.0.20220627"
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 79515ba637ee..fefb69a84eb9 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -449,9 +449,9 @@ int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
UL_FILTER_RULE_HANDLE_START + i;
rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
- rule_req->filter_spec_list[i].ip_type;
+ (enum ipa_ip_type)rule_req->filter_spec_list[i].ip_type;
ipa_qmi_ctx->q6_ul_filter_rule[i].action =
- rule_req->filter_spec_list[i].filter_action;
+ (enum ipa_flt_action)rule_req->filter_spec_list[i].filter_action;
if (rule_req->filter_spec_list[i].is_routing_table_index_valid
== true)
ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 3392cd0413a9..c3720888d753 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -454,9 +454,9 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
goto failure;
}
ipa3_qmi_ctx->q6_ul_filter_rule[i].ip =
- rule_req->filter_spec_ex_list[i].ip_type;
+ (enum ipa_ip_type)rule_req->filter_spec_ex_list[i].ip_type;
ipa3_qmi_ctx->q6_ul_filter_rule[i].action =
- rule_req->filter_spec_ex_list[i].filter_action;
+ (enum ipa_flt_action)rule_req->filter_spec_ex_list[i].filter_action;
if (rule_req->filter_spec_ex_list[i].
is_routing_table_index_valid == true)
ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
index 964f2c1e2e75..03fd89c15553 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
@@ -19,7 +19,7 @@
#include <soc/qcom/rpm-smd.h>
/* Stubs for backward compatibility */
-void msm_bus_rpm_set_mt_mask()
+void msm_bus_rpm_set_mt_mask(void)
{
}
diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c
index 0a49a322c9da..0c11f16f50e1 100644
--- a/drivers/soc/qcom/qdsp6v2/voice_svc.c
+++ b/drivers/soc/qcom/qdsp6v2/voice_svc.c
@@ -597,7 +597,7 @@ done:
return ret;
}
-static int voice_svc_dummy_reg()
+static int voice_svc_dummy_reg(void)
{
uint32_t src_port = APR_MAX_PORTS - 1;
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 6ab1d41b90ee..03e26c4417bf 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2015, Sony Mobile Communications AB.
* Copyright (c) 2012-2013, 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -85,6 +86,17 @@
/* Max number of processors/hosts in a system */
#define SMEM_HOST_COUNT 9
+/* Entry range check
+ * ptr >= start : Checks if ptr is greater than the start of access region
+ * ptr + size >= ptr: Check for integer overflow (On 32bit system where ptr
+ * and size are 32bits, ptr + size can wrap around to be a small integer)
+ * ptr + size <= end: Checks if ptr+size is less than the end of access region
+ */
+#define IN_PARTITION_RANGE(ptr, size, start, end) \
+ (((void *)(ptr) >= (void *)(start)) && \
+ (((void *)(ptr) + (size)) >= (void *)(ptr)) && \
+ (((void *)(ptr) + (size)) <= (void *)(end)))
+
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
* @command: current command to be executed
@@ -302,6 +314,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
{
struct smem_partition_header *phdr;
struct smem_private_entry *hdr, *end;
+ struct smem_private_entry *next_hdr;
struct smem_partition_header *phdr;
size_t alloc_size;
void *cached;
@@ -314,10 +327,11 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
end = phdr_to_last_private_entry(phdr);
cached = phdr_to_first_cached_entry(phdr);
- if (WARN_ON((void *)end > p_end || (void *)cached > p_end))
+ if (WARN_ON(!IN_PARTITION_RANGE(end, 0, phdr, cached) ||
+ cached > p_end))
return -EINVAL;
- while (hdr < end) {
+ while ((hdr < end) && ((hdr + 1) < end)) {
if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
"Found invalid canary in host %d:%d partition\n",
@@ -328,9 +342,15 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
if (le16_to_cpu(hdr->item) == item)
return -EEXIST;
- hdr = private_entry_next(hdr);
+ next_hdr = private_entry_next(hdr);
+
+ if (WARN_ON(next_hdr <= hdr))
+ return -EINVAL;
+
+ hdr = next_hdr;
}
- if (WARN_ON((void *)hdr > p_end))
+
+ if (WARN_ON((void *)hdr > (void *)end))
return -EINVAL;
/* Check that we don't grow into the cached region */
@@ -485,7 +505,9 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
{
struct smem_partition_header *phdr;
struct smem_private_entry *e, *end;
+ struct smem_private_entry *next_e;
void *item_ptr, *p_end;
+ size_t entry_size = 0;
u32 partition_size;
u32 padding_data;
u32 e_size;
@@ -500,7 +522,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
if (WARN_ON((void *)end > p_end))
return ERR_PTR(-EINVAL);
- while (e < end) {
+ while ((e < end) && ((e + 1) < end)) {
if (e->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
"Found invalid canary in host %d:%d partition\n",
@@ -509,25 +531,31 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
}
if (le16_to_cpu(e->item) == item) {
- if (size != NULL) {
- e_size = le32_to_cpu(e->size);
- padding_data = le16_to_cpu(e->padding_data);
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
- if (e_size < partition_size
- && padding_data < e_size)
- *size = e_size - padding_data;
- else
- return ERR_PTR(-EINVAL);
- }
+ if (e_size < partition_size && padding_data < e_size)
+ entry_size = e_size - padding_data;
+ else
+ return ERR_PTR(-EINVAL);
item_ptr = entry_to_item(e);
- if (WARN_ON(item_ptr > p_end))
+
+ if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size,
+ e, end)))
return ERR_PTR(-EINVAL);
+ if (size != NULL)
+ *size = entry_size;
+
return item_ptr;
}
- e = private_entry_next(e);
+ next_e = private_entry_next(e);
+ if (WARN_ON(next_e <= e))
+ return ERR_PTR(-EINVAL);
+
+ e = next_e;
}
if (WARN_ON((void *)e > p_end))
return ERR_PTR(-EINVAL);
diff --git a/drivers/thermal/msm_thermal-dev.c b/drivers/thermal/msm_thermal-dev.c
index ead9765666c8..632bfba0c655 100644
--- a/drivers/thermal/msm_thermal-dev.c
+++ b/drivers/thermal/msm_thermal-dev.c
@@ -346,7 +346,7 @@ static const struct file_operations msm_thermal_fops = {
.release = msm_thermal_ioctl_release,
};
-int msm_thermal_ioctl_init()
+int msm_thermal_ioctl_init(void)
{
int ret = 0;
dev_t thermal_dev;
@@ -409,7 +409,7 @@ ioctl_init_exit:
return ret;
}
-void msm_thermal_ioctl_cleanup()
+void msm_thermal_ioctl_cleanup(void)
{
uint32_t idx = 0;
dev_t thermal_dev = MKDEV(msm_thermal_major, 0);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 8ee146b14aae..6a024b5cfb0f 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -106,21 +106,11 @@ static void pty_unthrottle(struct tty_struct *tty)
static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct tty_struct *to = tty->link;
- unsigned long flags;
- if (tty->stopped)
+ if (tty->stopped || !c)
return 0;
- if (c > 0) {
- spin_lock_irqsave(&to->port->lock, flags);
- /* Stuff the data into the input queue of the other end */
- c = tty_insert_flip_string(to->port, buf, c);
- spin_unlock_irqrestore(&to->port->lock, flags);
- /* And shovel */
- if (c)
- tty_flip_buffer_push(to->port);
- }
- return c;
+ return tty_insert_flip_string_and_push_buffer(to->port, buf, c);
}
/**
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 8f1f668c4532..49c6cbab984f 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -168,7 +168,8 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
have queued and recycle that ? */
if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
- p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
+ p = kmalloc(sizeof(struct tty_buffer) + 2 * size,
+ GFP_ATOMIC | __GFP_NOWARN);
if (p == NULL)
return NULL;
@@ -389,6 +390,15 @@ int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
}
EXPORT_SYMBOL(__tty_insert_flip_char);
+static inline void tty_flip_buffer_commit(struct tty_buffer *tail)
+{
+ /*
+ * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
+ * buffer data.
+ */
+ smp_store_release(&tail->commit, tail->used);
+}
+
/**
* tty_schedule_flip - push characters to ldisc
* @port: tty port to push from
@@ -402,10 +412,7 @@ void tty_schedule_flip(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- /* paired w/ acquire in flush_to_ldisc(); ensures
- * flush_to_ldisc() sees buffer data.
- */
- smp_store_release(&buf->tail->commit, buf->tail->used);
+ tty_flip_buffer_commit(buf->tail);
queue_kthread_work(&port->worker, &buf->work);
}
EXPORT_SYMBOL(tty_schedule_flip);
@@ -549,6 +556,37 @@ void tty_flip_buffer_push(struct tty_port *port)
EXPORT_SYMBOL(tty_flip_buffer_push);
/**
+ * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and
+ * push
+ * @port: tty port
+ * @chars: characters
+ * @size: size
+ *
+ * The function combines tty_insert_flip_string() and tty_flip_buffer_push()
+ * with the exception of properly holding the @port->lock.
+ *
+ * To be used only internally (by pty currently).
+ *
+ * Returns: the number added.
+ */
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+ const unsigned char *chars, size_t size)
+{
+ struct tty_bufhead *buf = &port->buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ size = tty_insert_flip_string(port, chars, size);
+ if (size)
+ tty_flip_buffer_commit(buf->tail);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ queue_kthread_work(&port->worker, &buf->work);
+
+ return size;
+}
+
+/**
* tty_buffer_init - prepare a tty buffer structure
* @tty: tty to initialise
*
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index afd509564e2e..143c33cbc3d8 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2257,9 +2257,8 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
* usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
- * This is only called by usb_new_device() and usb_authorize_device()
- * and FIXME -- all comments that apply to them apply here wrt to
- * environment.
+ * This is only called by usb_new_device() -- all comments that apply there
+ * apply here wrt to environment.
*
* If the device is WUSB and not authorized, we don't attempt to read
* the string descriptors, as they will be errored out by the device
@@ -5621,6 +5620,11 @@ re_enumerate_no_bos:
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
+ * However, if a reset is already in progress (for instance, if a
+ * driver doesn't have pre_ or post_reset() callbacks, and while
+ * being unbound or re-bound during the ongoing reset its disconnect()
+ * or probe() routine tries to perform a second, nested reset), the
+ * routine returns -EINPROGRESS.
*
* Note:
* The caller must own the device lock. For example, it's safe to use
@@ -5654,6 +5658,10 @@ int usb_reset_device(struct usb_device *udev)
return -EISDIR;
}
+ if (udev->reset_in_progress)
+ return -EINPROGRESS;
+ udev->reset_in_progress = 1;
+
port_dev = hub->ports[udev->portnum - 1];
/*
@@ -5718,6 +5726,7 @@ int usb_reset_device(struct usb_device *udev)
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
+ udev->reset_in_progress = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 6dc0f4e25cf3..c7c32326d50c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -825,7 +825,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
*/
- usb_lock_device(udev);
for (cfgno = -1; cfgno < udev->descriptor.bNumConfigurations &&
nleft > 0; ++cfgno) {
if (cfgno < 0) {
@@ -846,7 +845,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
- usb_unlock_device(udev);
return count - nleft;
}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index b6b25c75b80c..c40c15a22abc 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -278,6 +278,7 @@ struct dwc3_msm {
enum usb_device_speed override_usb_speed;
bool core_init_failed;
+ bool usb_data_enabled;
};
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
@@ -2692,6 +2693,9 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb,
goto done;
}
+ if (!mdwc->usb_data_enabled)
+ return NOTIFY_DONE;
+
id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
@@ -2769,6 +2773,14 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
goto done;
}
+ if (!mdwc->usb_data_enabled) {
+ if (event)
+ dwc3_msm_gadget_vbus_draw(mdwc, 500);
+ else
+ dwc3_msm_gadget_vbus_draw(mdwc, 0);
+ return NOTIFY_DONE;
+ }
+
dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
if (mdwc->vbus_active == event)
@@ -2977,7 +2989,6 @@ static ssize_t xhci_link_compliance_store(struct device *dev,
return ret;
}
-
static DEVICE_ATTR_RW(xhci_link_compliance);
static ssize_t usb_compliance_mode_show(struct device *dev,
@@ -3004,6 +3015,33 @@ static ssize_t usb_compliance_mode_store(struct device *dev,
}
static DEVICE_ATTR_RW(usb_compliance_mode);
+static ssize_t usb_data_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n",
+ mdwc->usb_data_enabled ? "enabled" : "disabled");
+}
+
+static ssize_t usb_data_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ if (kstrtobool(buf, &mdwc->usb_data_enabled))
+ return -EINVAL;
+
+ if (!mdwc->usb_data_enabled) {
+ mdwc->vbus_active = false;
+ mdwc->id_state = DWC3_ID_FLOAT;
+ dwc3_ext_event_notify(mdwc);
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(usb_data_enabled);
+
static int dwc3_msm_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node, *dwc3_node;
@@ -3336,6 +3374,9 @@ static int dwc3_msm_probe(struct platform_device *pdev)
mdwc->pm_qos_latency = 0;
}
+ /* set the initial value */
+ mdwc->usb_data_enabled = true;
+
mdwc->usb_psy = power_supply_get_by_name("usb");
if (!mdwc->usb_psy) {
dev_warn(mdwc->dev, "Could not get usb power_supply\n");
@@ -3365,6 +3406,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
device_create_file(&pdev->dev, &dev_attr_speed);
device_create_file(&pdev->dev, &dev_attr_xhci_link_compliance);
device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode);
+ device_create_file(&pdev->dev, &dev_attr_usb_data_enabled);
host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
if (host_mode ||
@@ -3400,6 +3442,7 @@ static int dwc3_msm_remove(struct platform_device *pdev)
device_remove_file(&pdev->dev, &dev_attr_mode);
device_remove_file(&pdev->dev, &dev_attr_xhci_link_compliance);
+ device_create_file(&pdev->dev, &dev_attr_usb_data_enabled);
if (cpu_to_affin)
unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
@@ -4009,7 +4052,8 @@ static void dwc3_otg_sm_work(struct work_struct *w)
work = 1;
break;
} else {
- dwc3_msm_gadget_vbus_draw(mdwc, 0);
+ if (mdwc->usb_data_enabled)
+ dwc3_msm_gadget_vbus_draw(mdwc, 0);
pm_relax(mdwc->dev);
dev_dbg(mdwc->dev, "Cable disconnected\n");
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 98172233b5aa..84be290a0369 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -4854,7 +4854,7 @@ struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val)
}
EXPORT_SYMBOL(mdss_panel_intf_type);
-struct irq_info *mdss_intr_line()
+struct irq_info *mdss_intr_line(void)
{
return mdss_mdp_hw.irq_info;
}
diff --git a/drivers/video/fbdev/msm/mdss_util.c b/drivers/video/fbdev/msm/mdss_util.c
index 65941601cfdc..d6c462e04dfd 100644
--- a/drivers/video/fbdev/msm/mdss_util.c
+++ b/drivers/video/fbdev/msm/mdss_util.c
@@ -232,7 +232,7 @@ struct mdss_util_intf mdss_util = {
.mdp_probe_done = false
};
-struct mdss_util_intf *mdss_get_util_intf()
+struct mdss_util_intf *mdss_get_util_intf(void)
{
return &mdss_util;
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index d7111b8ce36a..3590c5c5eb6a 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -699,7 +699,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
take_dentry_name_snapshot(&old_name, old_dentry);
error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
- dentry);
+ dentry, 0);
if (error) {
release_dentry_name_snapshot(&old_name);
goto exit;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index fc55909ce515..a5c3bc632a21 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1441,7 +1441,6 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
goto out;
ret = 0;
- pipe_lock(pipe);
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
@@ -1477,7 +1476,6 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
}
out_unlock:
- pipe_unlock(pipe);
if (do_wakeup) {
smp_mb();
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1d5e3b0a3b1a..00ab6084dcc6 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1022,7 +1022,7 @@ static const struct inode_operations hugetlbfs_dir_inode_operations = {
.mkdir = hugetlbfs_mkdir,
.rmdir = simple_rmdir,
.mknod = hugetlbfs_mknod,
- .rename = simple_rename,
+ .rename2 = simple_rename,
.setattr = hugetlbfs_setattr,
};
diff --git a/fs/libfs.c b/fs/libfs.c
index 01e9cae5b160..883cdd45a08c 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -321,11 +321,15 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
EXPORT_SYMBOL(simple_rmdir);
int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
struct inode *inode = d_inode(old_dentry);
int they_are_dirs = d_is_dir(old_dentry);
+ if (flags & ~RENAME_NOREPLACE)
+ return -EINVAL;
+
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
diff --git a/fs/open.c b/fs/open.c
index b7e2889a710c..c39c1d1fa082 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -732,6 +732,12 @@ static int do_dentry_open(struct file *f,
return 0;
}
+ /* Any file opened for execve()/uselib() has to be a regular file. */
+ if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
+ error = -EACCES;
+ goto cleanup_file;
+ }
+
if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
error = get_write_access(inode);
if (unlikely(error))
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 889d558b4e05..37fcd10866c3 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -145,7 +145,7 @@ static const struct inode_operations ramfs_dir_inode_operations = {
.mkdir = ramfs_mkdir,
.rmdir = simple_rmdir,
.mknod = ramfs_mknod,
- .rename = simple_rename,
+ .rename2 = simple_rename,
};
static const struct super_operations ramfs_ops = {
diff --git a/fs/splice.c b/fs/splice.c
index 57ccc583a172..0562b990d64e 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -183,83 +183,42 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
unsigned int spd_pages = spd->nr_pages;
- int ret, do_wakeup, page_nr;
+ int ret = 0, page_nr = 0;
if (!spd_pages)
return 0;
- ret = 0;
- do_wakeup = 0;
- page_nr = 0;
-
- pipe_lock(pipe);
-
- for (;;) {
- if (!pipe->readers) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
- break;
- }
-
- if (pipe->nrbufs < pipe->buffers) {
- int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
-
- buf->page = spd->pages[page_nr];
- buf->offset = spd->partial[page_nr].offset;
- buf->len = spd->partial[page_nr].len;
- buf->private = spd->partial[page_nr].private;
- buf->ops = spd->ops;
- buf->flags = 0;
- if (spd->flags & SPLICE_F_GIFT)
- buf->flags |= PIPE_BUF_FLAG_GIFT;
-
- pipe->nrbufs++;
- page_nr++;
- ret += buf->len;
+ if (unlikely(!pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ goto out;
+ }
- if (pipe->files)
- do_wakeup = 1;
+ while (pipe->nrbufs < pipe->buffers) {
+ int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
+ struct pipe_buffer *buf = pipe->bufs + newbuf;
- if (!--spd->nr_pages)
- break;
- if (pipe->nrbufs < pipe->buffers)
- continue;
+ buf->page = spd->pages[page_nr];
+ buf->offset = spd->partial[page_nr].offset;
+ buf->len = spd->partial[page_nr].len;
+ buf->private = spd->partial[page_nr].private;
+ buf->ops = spd->ops;
+ buf->flags = 0;
+ if (spd->flags & SPLICE_F_GIFT)
+ buf->flags |= PIPE_BUF_FLAG_GIFT;
- break;
- }
+ pipe->nrbufs++;
+ page_nr++;
+ ret += buf->len;
- if (spd->flags & SPLICE_F_NONBLOCK) {
- if (!ret)
- ret = -EAGAIN;
+ if (!--spd->nr_pages)
break;
- }
-
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
- break;
- }
-
- if (do_wakeup) {
- smp_mb();
- if (waitqueue_active(&pipe->wait))
- wake_up_interruptible_sync(&pipe->wait);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- do_wakeup = 0;
- }
-
- pipe->waiting_writers++;
- pipe_wait(pipe);
- pipe->waiting_writers--;
}
- pipe_unlock(pipe);
-
- if (do_wakeup)
- wakeup_pipe_readers(pipe);
+ if (!ret)
+ ret = -EAGAIN;
+out:
while (page_nr < spd_pages)
spd->spd_release(spd, page_nr++);
@@ -1342,6 +1301,25 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
}
EXPORT_SYMBOL(do_splice_direct);
+static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
+{
+ for (;;) {
+ if (unlikely(!pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ return -EPIPE;
+ }
+ if (pipe->nrbufs != pipe->buffers)
+ return 0;
+ if (flags & SPLICE_F_NONBLOCK)
+ return -EAGAIN;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ pipe->waiting_writers++;
+ pipe_wait(pipe);
+ pipe->waiting_writers--;
+ }
+}
+
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags);
@@ -1424,8 +1402,13 @@ static long do_splice(struct file *in, loff_t __user *off_in,
offset = in->f_pos;
}
- ret = do_splice_to(in, &offset, opipe, len, flags);
-
+ pipe_lock(opipe);
+ ret = wait_for_space(opipe, flags);
+ if (!ret)
+ ret = do_splice_to(in, &offset, opipe, len, flags);
+ pipe_unlock(opipe);
+ if (ret > 0)
+ wakeup_pipe_readers(opipe);
if (!off_in)
in->f_pos = offset;
else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
@@ -1437,106 +1420,32 @@ static long do_splice(struct file *in, loff_t __user *off_in,
return -EINVAL;
}
-/*
- * Map an iov into an array of pages and offset/length tupples. With the
- * partial_page structure, we can map several non-contiguous ranges into
- * our ones pages[] map instead of splitting that operation into pieces.
- * Could easily be exported as a generic helper for other users, in which
- * case one would probably want to add a 'max_nr_pages' parameter as well.
- */
-static int get_iovec_page_array(const struct iovec __user *iov,
- unsigned int nr_vecs, struct page **pages,
- struct partial_page *partial, bool aligned,
+static int get_iovec_page_array(struct iov_iter *from,
+ struct page **pages,
+ struct partial_page *partial,
unsigned int pipe_buffers)
{
- int buffers = 0, error = 0;
-
- while (nr_vecs) {
- unsigned long off, npages;
- struct iovec entry;
- void __user *base;
- size_t len;
- int i;
-
- error = -EFAULT;
- if (copy_from_user(&entry, iov, sizeof(entry)))
- break;
-
- base = entry.iov_base;
- len = entry.iov_len;
-
- /*
- * Sanity check this iovec. 0 read succeeds.
- */
- error = 0;
- if (unlikely(!len))
- break;
- error = -EFAULT;
- if (!access_ok(VERIFY_READ, base, len))
- break;
-
- /*
- * Get this base offset and number of pages, then map
- * in the user pages.
- */
- off = (unsigned long) base & ~PAGE_MASK;
-
- /*
- * If asked for alignment, the offset must be zero and the
- * length a multiple of the PAGE_SIZE.
- */
- error = -EINVAL;
- if (aligned && (off || len & ~PAGE_MASK))
- break;
-
- npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (npages > pipe_buffers - buffers)
- npages = pipe_buffers - buffers;
-
- error = get_user_pages_fast((unsigned long)base, npages,
- 0, &pages[buffers]);
-
- if (unlikely(error <= 0))
- break;
-
- /*
- * Fill this contiguous range into the partial page map.
- */
- for (i = 0; i < error; i++) {
- const int plen = min_t(size_t, len, PAGE_SIZE - off);
-
- partial[buffers].offset = off;
- partial[buffers].len = plen;
-
- off = 0;
- len -= plen;
+ int buffers = 0;
+ while (iov_iter_count(from)) {
+ ssize_t copied;
+ size_t start;
+
+ copied = iov_iter_get_pages(from, pages + buffers, ~0UL,
+ pipe_buffers - buffers, &start);
+ if (copied <= 0)
+ return buffers ? buffers : copied;
+
+ iov_iter_advance(from, copied);
+ while (copied) {
+ int size = min_t(int, copied, PAGE_SIZE - start);
+ partial[buffers].offset = start;
+ partial[buffers].len = size;
+ copied -= size;
+ start = 0;
buffers++;
}
-
- /*
- * We didn't complete this iov, stop here since it probably
- * means we have to move some of this into a pipe to
- * be able to continue.
- */
- if (len)
- break;
-
- /*
- * Don't continue if we mapped fewer pages than we asked for,
- * or if we mapped the max number of pages that we have
- * room for.
- */
- if (error < npages || buffers == pipe_buffers)
- break;
-
- nr_vecs--;
- iov++;
}
-
- if (buffers)
- return buffers;
-
- return error;
+ return buffers;
}
static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
@@ -1590,10 +1499,13 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
* as splice-from-memory, where the regular splice is splice-from-file (or
* to file). In both cases the output is a pipe, naturally.
*/
-static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
+static long vmsplice_to_pipe(struct file *file, const struct iovec __user *uiov,
unsigned long nr_segs, unsigned int flags)
{
struct pipe_inode_info *pipe;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov = iovstack;
+ struct iov_iter from;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = {
@@ -1610,18 +1522,32 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
if (!pipe)
return -EBADF;
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
+ ret = import_iovec(WRITE, uiov, nr_segs,
+ ARRAY_SIZE(iovstack), &iov, &from);
+ if (ret < 0)
+ return ret;
- spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
- spd.partial, false,
- spd.nr_pages_max);
- if (spd.nr_pages <= 0)
- ret = spd.nr_pages;
- else
- ret = splice_to_pipe(pipe, &spd);
+ if (splice_grow_spd(pipe, &spd)) {
+ kfree(iov);
+ return -ENOMEM;
+ }
+ pipe_lock(pipe);
+ ret = wait_for_space(pipe, flags);
+ if (!ret) {
+ spd.nr_pages = get_iovec_page_array(&from, spd.pages,
+ spd.partial,
+ spd.nr_pages_max);
+ if (spd.nr_pages <= 0)
+ ret = spd.nr_pages;
+ else
+ ret = splice_to_pipe(pipe, &spd);
+ }
+ pipe_unlock(pipe);
+ if (ret > 0)
+ wakeup_pipe_readers(pipe);
splice_shrink_spd(&spd);
+ kfree(iov);
return ret;
}
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index d3036be98027..8a590cd40be0 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -452,8 +452,6 @@ struct cgroup_subsys {
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
- int (*allow_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f3b356ee66d6..ca47b5d42764 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -640,17 +640,6 @@ static inline void cgroup_kthread_ready(void)
current->no_cgroup_migration = 0;
}
-/*
- * Default Android check for whether the current process is allowed to move a
- * task across cgroups, either because CAP_SYS_NICE is set or because the uid
- * of the calling process is the same as the moved task or because we are
- * running as root.
- * Returns 0 if this is allowed, or -EACCES otherwise.
- */
-int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
-
-
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
@@ -681,11 +670,6 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
static inline void cgroup_init_kthreadd(void) {}
static inline void cgroup_kthread_ready(void) {}
-static inline int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
-{
- return 0;
-}
#endif /* !CONFIG_CGROUPS */
/*
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1efce43b713b..22e9bd1cf047 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -53,6 +53,9 @@ struct bpf_prog_aux;
#define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+/* unused opcode to mark special call to bpf_tail_call() helper */
+#define BPF_TAIL_CALL 0xf0
+
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
@@ -648,6 +651,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
+void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_skb_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
@@ -668,7 +672,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
-void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct bpf_prog *fp);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
@@ -712,10 +715,6 @@ static inline bool bpf_jit_blinding_enabled(void)
return true;
}
#else
-static inline void bpf_jit_compile(struct bpf_prog *fp)
-{
-}
-
static inline void bpf_jit_free(struct bpf_prog *fp)
{
bpf_prog_unlock_free(fp);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 42ac99e898a4..5105b5be5e68 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2930,7 +2930,8 @@ extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int simple_rename(struct inode *, struct dentry *,
+ struct inode *, struct dentry *, unsigned int);
extern int noop_fsync(struct file *, loff_t, loff_t, int);
extern int simple_empty(struct dentry *);
extern int simple_readpage(struct file *file, struct page *page);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 3b0ad4479725..e709a4cf8999 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -737,6 +737,7 @@ struct hid_driver {
* @raw_request: send raw report request to device (e.g. feature report)
* @output_report: send output report to device
* @idle: send idle request to device
+ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
*/
struct hid_ll_driver {
int (*start)(struct hid_device *hdev);
@@ -761,6 +762,8 @@ struct hid_ll_driver {
int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+
+ unsigned int max_buffer_size;
};
extern struct hid_ll_driver i2c_hid_ll_driver;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 40429b818b45..6c4f2bd8aaa7 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -75,7 +75,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(const struct net_device *dev)
{
return dev->priv_flags & IFF_802_1Q_VLAN;
}
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index e6530c32f211..a46f138a8a8d 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -939,7 +939,7 @@
* @xfrm_state_pol_flow_match:
* @x contains the state to match.
* @xp contains the policy to check for a match.
- * @fl contains the flow to check for a match.
+ * @flic contains the flowi_common struct to check for a match.
* Return 1 if there is a match.
* @xfrm_decode_session:
* @skb points to skb to decode.
@@ -1604,7 +1604,7 @@ union security_list_options {
void (*secmark_refcount_inc)(void);
void (*secmark_refcount_dec)(void);
void (*req_classify_flow)(const struct request_sock *req,
- struct flowi *fl);
+ struct flowi_common *flic);
int (*tun_dev_alloc_security)(void **security);
void (*tun_dev_free_security)(void *security);
int (*tun_dev_create)(void);
@@ -1632,7 +1632,7 @@ union security_list_options {
u8 dir);
int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
struct xfrm_policy *xp,
- const struct flowi *fl);
+ const flowi_common *flic);
int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6fdf0f9e1254..18d0a5beac0c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -60,6 +60,7 @@ struct wireless_dev;
/* 802.15.4 specific */
struct wpan_dev;
struct mpls_dev;
+struct bpf_prog;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -777,6 +778,34 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
+/* These structures hold the attributes of xdp state that are being passed
+ * to the netdevice through the xdp op.
+ */
+enum xdp_netdev_command {
+ /* Set or clear a bpf program used in the earliest stages of packet
+ * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
+ * is responsible for calling bpf_prog_put on any old progs that are
+ * stored. In case of error, the callee need not release the new prog
+ * reference, but on success it takes ownership and must bpf_prog_put
+ * when it is no longer used.
+ */
+ XDP_SETUP_PROG,
+ /* Check if a bpf program is set on the device. The callee should
+ * return true if a program is currently attached and running.
+ */
+ XDP_QUERY_PROG,
+};
+
+struct netdev_xdp {
+ enum xdp_netdev_command command;
+ union {
+ /* XDP_SETUP_PROG */
+ struct bpf_prog *prog;
+ /* XDP_QUERY_PROG */
+ bool prog_attached;
+ };
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -1059,6 +1088,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
* sampling packet.
+ * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
+ * This function is used to specify the headroom that the skb must
+ * consider when allocation skb during packet reception. Setting
+ * appropriate rx headroom value allows avoiding skb head copy on
+ * forward. Setting a negative value reset the rx headroom to the
+ * default value.
+ * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
+ * This function is used to set or query state related to XDP on the
+ * netdevice. See definition of enum xdp_netdev_command for details.
*
*/
struct net_device_ops {
@@ -1236,6 +1274,10 @@ struct net_device_ops {
bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
+ void (*ndo_set_rx_headroom)(struct net_device *dev,
+ int needed_headroom);
+ int (*ndo_xdp)(struct net_device *dev,
+ struct netdev_xdp *xdp);
};
/**
@@ -1271,6 +1313,10 @@ struct net_device_ops {
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
+ * @IFF_TEAM: device is a team device
+ * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
+ * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
+ * entity (i.e. the master device for bridged veth)
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1297,6 +1343,9 @@ enum netdev_priv_flags {
IFF_NO_QUEUE = 1<<21,
IFF_OPENVSWITCH = 1<<22,
IFF_L3MDEV_SLAVE = 1<<23,
+ IFF_TEAM = 1<<24,
+ IFF_RXFH_CONFIGURED = 1<<25,
+ IFF_PHONY_HEADROOM = 1<<26,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1323,6 +1372,8 @@ enum netdev_priv_flags {
#define IFF_NO_QUEUE IFF_NO_QUEUE
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
+#define IFF_TEAM IFF_TEAM
+#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
/**
* struct net_device - The DEVICE structure.
@@ -1890,6 +1941,26 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
+/* returns the headroom that the master device needs to take in account
+ * when forwarding to this dev
+ */
+static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
+}
+
+static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
+{
+ if (dev->netdev_ops->ndo_set_rx_headroom)
+ dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
+}
+
+/* set the device rx headroom to the dev's default */
+static inline void netdev_reset_rx_headroom(struct net_device *dev)
+{
+ netdev_set_rx_headroom(dev, -1);
+}
+
/*
* Net namespace inlines
*/
@@ -3131,6 +3202,7 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
+int dev_change_xdp_fd(struct net_device *dev, int fd);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3717,7 +3789,7 @@ extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
void netdev_rss_key_fill(void *buffer, size_t len);
int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(struct net_device *dev));
+ bool (*type_check)(const struct net_device *dev));
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
@@ -3914,32 +3986,32 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
skb->mac_len = mac_len;
}
-static inline bool netif_is_macvlan(struct net_device *dev)
+static inline bool netif_is_macvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
}
-static inline bool netif_is_macvlan_port(struct net_device *dev)
+static inline bool netif_is_macvlan_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN_PORT;
}
-static inline bool netif_is_ipvlan(struct net_device *dev)
+static inline bool netif_is_ipvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_IPVLAN_SLAVE;
}
-static inline bool netif_is_ipvlan_port(struct net_device *dev)
+static inline bool netif_is_ipvlan_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_IPVLAN_MASTER;
}
-static inline bool netif_is_bond_master(struct net_device *dev)
+static inline bool netif_is_bond_master(const struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
}
-static inline bool netif_is_bond_slave(struct net_device *dev)
+static inline bool netif_is_bond_slave(const struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
@@ -3974,6 +4046,31 @@ static inline bool netif_is_ovs_master(const struct net_device *dev)
return dev->priv_flags & IFF_OPENVSWITCH;
}
+static inline bool netif_is_team_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_TEAM;
+}
+
+static inline bool netif_is_team_port(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_TEAM_PORT;
+}
+
+static inline bool netif_is_lag_master(const struct net_device *dev)
+{
+ return netif_is_bond_master(dev) || netif_is_team_master(dev);
+}
+
+static inline bool netif_is_lag_port(const struct net_device *dev)
+{
+ return netif_is_bond_slave(dev) || netif_is_team_port(dev);
+}
+
+static inline bool netif_is_rxfh_configured(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_RXFH_CONFIGURED;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4e212132a274..50f7ad30d9c7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -170,6 +170,7 @@ extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
+extern bool cpu_has_rt_task(int cpu);
extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
diff --git a/include/linux/security.h b/include/linux/security.h
index 773b33fec14c..2800ef53c424 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -102,7 +102,7 @@ struct sk_buff;
struct sock;
struct sockaddr;
struct socket;
-struct flowi;
+struct flowi_common;
struct dst_entry;
struct xfrm_selector;
struct xfrm_policy;
@@ -1148,8 +1148,9 @@ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u
int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
void security_sk_free(struct sock *sk);
void security_sk_clone(const struct sock *sk, struct sock *newsk);
-void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
-void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
+void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic);
+void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic);
void security_sock_graft(struct sock*sk, struct socket *parent);
int security_inet_conn_request(struct sock *sk,
struct sk_buff *skb, struct request_sock *req);
@@ -1289,11 +1290,13 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
}
-static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+static inline void security_sk_classify_flow(struct sock *sk,
+ struct flowi_common *flic)
{
}
-static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+static inline void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic)
{
}
@@ -1375,9 +1378,9 @@ void security_xfrm_state_free(struct xfrm_state *x);
int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- const struct flowi *fl);
+ const struct flowi_common *flic);
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
-void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
+void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic);
#else /* CONFIG_SECURITY_NETWORK_XFRM */
@@ -1429,7 +1432,8 @@ static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_s
}
static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, const struct flowi *fl)
+ struct xfrm_policy *xp,
+ const struct flowi_common *flic)
{
return 1;
}
@@ -1439,7 +1443,8 @@ static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
return 0;
}
-static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+static inline void security_skb_classify_flow(struct sk_buff *skb,
+ struct flowi_common *flic)
{
}
diff --git a/include/linux/string.h b/include/linux/string.h
index 1a9589a5ace6..9f745d7e9f3f 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -204,4 +204,7 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
+void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ int pad);
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index ecbd6ad90e2e..25f861ecb195 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -11,9 +11,6 @@
#include <linux/bug.h>
#include <linux/restart_block.h>
-struct timespec;
-struct compat_timespec;
-
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
* For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index d43837f2ce3a..7a97606ddb36 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -39,4 +39,7 @@ static inline int tty_insert_flip_string(struct tty_port *port,
extern void tty_buffer_lock_exclusive(struct tty_port *port);
extern void tty_buffer_unlock_exclusive(struct tty_port *port);
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+ const unsigned char *chars, size_t cnt);
+
#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index f404d0f28a1c..8ccb8ebb16aa 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -541,6 +541,7 @@ struct usb3_lpm_parameters {
* @level: number of USB hub ancestors
* @can_submit: URBs may be submitted
* @persist_enabled: USB_PERSIST enabled for this device
+ * @reset_in_progress: the device is being reset
* @have_langid: whether string_langid is valid
* @authorized: policy has said we can use it;
* (user space) policy determines if we authorize this device to be
@@ -620,6 +621,7 @@ struct usb_device {
unsigned can_submit:1;
unsigned persist_enabled:1;
+ unsigned reset_in_progress:1;
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5c0e1954647e..2e11af8cdb02 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4732,7 +4732,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
* cfg80211 then sends a notification to userspace.
*/
void cfg80211_notify_new_peer_candidate(struct net_device *dev,
- const u8 *macaddr, const u8 *ie, u8 ie_len, gfp_t gfp);
+ const u8 *macaddr, const u8 *ie, u8 ie_len,
+ int sig_dbm, gfp_t gfp);
/**
* DOC: RFkill integration
diff --git a/include/net/flow.h b/include/net/flow.h
index 833080732dec..2dae98019fb2 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -191,11 +191,21 @@ static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
return container_of(fl4, struct flowi, u.ip4);
}
+static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4)
+{
+ return &(fl4->__fl_common);
+}
+
static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
{
return container_of(fl6, struct flowi, u.ip6);
}
+static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6)
+{
+ return &(fl6->__fl_common);
+}
+
static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
{
return container_of(fldn, struct flowi, u.dn);
diff --git a/include/net/route.h b/include/net/route.h
index f828a9d83012..0672dac4042c 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -157,7 +157,7 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
sk ? inet_sk_flowi_flags(sk) : 0,
daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}
@@ -303,7 +303,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
ip_rt_put(rt);
flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
}
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}
@@ -319,7 +319,7 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
flowi4_update_output(fl4, sk->sk_bound_dev_if,
RT_CONN_FLAGS(sk), fl4->daddr,
fl4->saddr);
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(sock_net(sk), fl4, sk);
}
return rt;
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index afffa756357a..3e0171ea47f3 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -76,7 +76,6 @@ struct snd_rawmidi_runtime {
size_t avail_min; /* min avail for wakeup */
size_t avail; /* max used buffer for wakeup */
size_t xruns; /* over/underruns counter */
- int buffer_ref; /* buffer reference count */
/* misc */
spinlock_t lock;
struct mutex realloc_mutex;
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 70d6012c89aa..f72832a749d6 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1649,9 +1649,9 @@ TRACE_EVENT(sched_boost_cpu,
TRACE_EVENT(sched_tune_tasks_update,
TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
- int boost, int max_boost),
+ int boost, int max_boost, u64 group_ts),
- TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
+ TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost, group_ts),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -1661,6 +1661,7 @@ TRACE_EVENT(sched_tune_tasks_update,
__field( int, idx )
__field( int, boost )
__field( int, max_boost )
+ __field( u64, group_ts )
),
TP_fast_assign(
@@ -1671,13 +1672,15 @@ TRACE_EVENT(sched_tune_tasks_update,
__entry->idx = idx;
__entry->boost = boost;
__entry->max_boost = max_boost;
+ __entry->group_ts = group_ts;
),
TP_printk("pid=%d comm=%s "
- "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
+ "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d timeout=%llu",
__entry->pid, __entry->comm,
__entry->cpu, __entry->tasks, __entry->idx,
- __entry->boost, __entry->max_boost)
+ __entry->boost, __entry->max_boost,
+ __entry->group_ts)
);
/*
@@ -1778,63 +1781,6 @@ TRACE_EVENT(sched_find_best_target,
);
/*
- * Tracepoint for accounting sched group energy
- */
-TRACE_EVENT(sched_energy_diff,
-
- TP_PROTO(struct task_struct *tsk, int scpu, int dcpu, int udelta,
- int nrgb, int nrga, int nrgd, int capb, int capa, int capd,
- int nrgn, int nrgp),
-
- TP_ARGS(tsk, scpu, dcpu, udelta,
- nrgb, nrga, nrgd, capb, capa, capd,
- nrgn, nrgp),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, scpu )
- __field( int, dcpu )
- __field( int, udelta )
- __field( int, nrgb )
- __field( int, nrga )
- __field( int, nrgd )
- __field( int, capb )
- __field( int, capa )
- __field( int, capd )
- __field( int, nrgn )
- __field( int, nrgp )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
- __entry->pid = tsk->pid;
- __entry->scpu = scpu;
- __entry->dcpu = dcpu;
- __entry->udelta = udelta;
- __entry->nrgb = nrgb;
- __entry->nrga = nrga;
- __entry->nrgd = nrgd;
- __entry->capb = capb;
- __entry->capa = capa;
- __entry->capd = capd;
- __entry->nrgn = nrgn;
- __entry->nrgp = nrgp;
- ),
-
- TP_printk("pid=%d comm=%s "
- "src_cpu=%d dst_cpu=%d usage_delta=%d "
- "nrg_before=%d nrg_after=%d nrg_diff=%d "
- "cap_before=%d cap_after=%d cap_delta=%d "
- "nrg_delta=%d nrg_payoff=%d",
- __entry->pid, __entry->comm,
- __entry->scpu, __entry->dcpu, __entry->udelta,
- __entry->nrgb, __entry->nrga, __entry->nrgd,
- __entry->capb, __entry->capa, __entry->capd,
- __entry->nrgn, __entry->nrgp)
-);
-
-/*
* Tracepoint for schedtune_tasks_update
*/
TRACE_EVENT(sched_tune_filter,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index d01217407d6d..6d9443d45fa2 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -142,10 +142,10 @@ static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
{
struct cgroup *cgrp = wb->memcg_css->cgroup;
- char *path;
+ int len;
- path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
- WARN_ON_ONCE(path != buf);
+ len = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
+ WARN_ON_ONCE(len < 0);
}
static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index bbbf36a963e6..4f415fcb2006 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -30,9 +30,14 @@
#define BPF_FROM_LE BPF_TO_LE
#define BPF_FROM_BE BPF_TO_BE
+/* jmp encodings */
#define BPF_JNE 0x50 /* jump != */
+#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
+#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
+#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
+#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 7a89b7b62ab8..f6a44010e84a 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -181,7 +181,6 @@ struct input_mask {
#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */
#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */
-#define INPUT_PROP_NO_DUMMY_RELEASE 0x06 /* no dummy event */
#define INPUT_PROP_MAX 0x1f
#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 2b1a925489cf..d5c9dcfe9324 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -373,9 +373,13 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSET | BPF_K:
/* Accommodate for extra offset in case of a backjump. */
off = from->off;
@@ -604,7 +608,7 @@ static unsigned int __bpf_prog_run(const struct sk_buff *ctx, const struct bpf_i
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
/* Call instruction */
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
- [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
+ [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
/* Jumps */
[BPF_JMP | BPF_JA] = &&JMP_JA,
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
@@ -613,12 +617,20 @@ static unsigned int __bpf_prog_run(const struct sk_buff *ctx, const struct bpf_i
[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
+ [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
+ [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
+ [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
+ [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
+ [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
+ [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
+ [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
+ [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
/* Program return */
@@ -856,6 +868,18 @@ out:
CONT_JMP;
}
CONT;
+ JMP_JLT_X:
+ if (DST < SRC) {
+ insn += insn->off;
+ CONT_JMP;
+ }
+ CONT;
+ JMP_JLT_K:
+ if (DST < IMM) {
+ insn += insn->off;
+ CONT_JMP;
+ }
+ CONT;
JMP_JGE_X:
if (DST >= SRC) {
insn += insn->off;
@@ -868,6 +892,18 @@ out:
CONT_JMP;
}
CONT;
+ JMP_JLE_X:
+ if (DST <= SRC) {
+ insn += insn->off;
+ CONT_JMP;
+ }
+ CONT;
+ JMP_JLE_K:
+ if (DST <= IMM) {
+ insn += insn->off;
+ CONT_JMP;
+ }
+ CONT;
JMP_JSGT_X:
if (((s64) DST) > ((s64) SRC)) {
insn += insn->off;
@@ -880,6 +916,18 @@ out:
CONT_JMP;
}
CONT;
+ JMP_JSLT_X:
+ if (((s64) DST) < ((s64) SRC)) {
+ insn += insn->off;
+ CONT_JMP;
+ }
+ CONT;
+ JMP_JSLT_K:
+ if (((s64) DST) < ((s64) IMM)) {
+ insn += insn->off;
+ CONT_JMP;
+ }
+ CONT;
JMP_JSGE_X:
if (((s64) DST) >= ((s64) SRC)) {
insn += insn->off;
@@ -892,6 +940,18 @@ out:
CONT_JMP;
}
CONT;
+ JMP_JSLE_X:
+ if (((s64) DST) <= ((s64) SRC)) {
+ insn += insn->off;
+ CONT_JMP;
+ }
+ CONT;
+ JMP_JSLE_K:
+ if (((s64) DST) <= ((s64) IMM)) {
+ insn += insn->off;
+ CONT_JMP;
+ }
+ CONT;
JMP_JSET_X:
if (DST & SRC) {
insn += insn->off;
@@ -1283,12 +1343,22 @@ const struct bpf_func_proto bpf_tail_call_proto = {
.arg3_type = ARG_ANYTHING,
};
-/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
+/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
+ * It is encouraged to implement bpf_int_jit_compile() instead, so that
+ * eBPF and implicitly also cBPF can get JITed!
+ */
struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
{
return prog;
}
+/* Stub for JITs that support eBPF. All cBPF code gets transformed into
+ * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
+ */
+void __weak bpf_jit_compile(struct bpf_prog *prog)
+{
+}
+
bool __weak bpf_helper_changes_skb_data(void *func)
{
return false;
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 5a52c25ba18a..62c4ab07a7ac 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -189,7 +189,7 @@ static const struct inode_operations bpf_dir_iops = {
.mknod = bpf_mkobj,
.mkdir = bpf_mkdir,
.rmdir = simple_rmdir,
- .rename = simple_rename,
+ .rename2 = simple_rename,
.link = simple_link,
.unlink = simple_unlink,
};
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 78bdfbefd996..0bdb7c1b558d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3496,7 +3496,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
* that doesn't support bpf_tail_call yet
*/
insn->imm = 0;
- insn->code |= BPF_X;
+ insn->code = BPF_JMP | BPF_TAIL_CALL;
/* instead of changing every JIT dealing with tail_call
* emit two extra insns:
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c2508ca442b7..64a3b3db2484 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2848,44 +2848,6 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
return ret;
}
-int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset)
-{
- const struct cred *cred = current_cred(), *tcred;
- struct task_struct *task;
-
- if (capable(CAP_SYS_NICE))
- return 0;
-
- cgroup_taskset_for_each(task, css, tset) {
- tcred = __task_cred(task);
-
- if (current != task && !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->euid, tcred->suid))
- return -EACCES;
- }
-
- return 0;
-}
-
-static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
-{
- struct cgroup_subsys_state *css;
- int i;
- int ret;
-
- for_each_css(css, i, cgrp) {
- if (css->ss->allow_attach) {
- ret = css->ss->allow_attach(css, tset);
- if (ret)
- return ret;
- } else {
- return -EACCES;
- }
- }
-
- return 0;
-}
-
static int cgroup_procs_write_permission(struct task_struct *task,
struct cgroup *dst_cgrp,
struct kernfs_open_file *of)
@@ -2901,23 +2863,8 @@ static int cgroup_procs_write_permission(struct task_struct *task,
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid) &&
- !ns_capable(tcred->user_ns, CAP_SYS_NICE)) {
- /*
- * if the default permission check fails, give each
- * cgroup a chance to extend the permission check
- */
- struct cgroup_taskset tset = {
- .src_csets = LIST_HEAD_INIT(tset.src_csets),
- .dst_csets = LIST_HEAD_INIT(tset.dst_csets),
- .csets = &tset.src_csets,
- };
- struct css_set *cset;
- cset = task_css_set(task);
- list_add(&cset->mg_node, &tset.src_csets);
- ret = cgroup_allow_attach(dst_cgrp, &tset);
- if (ret)
- ret = -EACCES;
- }
+ !ns_capable(tcred->user_ns, CAP_SYS_NICE))
+ ret = -EACCES;
if (!ret && cgroup_on_dfl(dst_cgrp)) {
struct super_block *sb = of->file->f_path.dentry->d_sb;
diff --git a/kernel/compat.c b/kernel/compat.c
index 333d364be29d..fd31231bab25 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -644,7 +644,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len,
if (len & (sizeof(compat_ulong_t)-1))
return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a8d2c50737ee..f17cfe5a313f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1170,13 +1170,10 @@ struct migration_arg {
*/
static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
{
- int src_cpu;
-
/* Affinity changed (again). */
if (!is_cpu_allowed(p, dest_cpu))
return rq;
- src_cpu = cpu_of(rq);
rq = move_queued_task(rq, p, dest_cpu);
return rq;
@@ -5064,14 +5061,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
size_t retlen = min_t(size_t, len, cpumask_size());
- if (copy_to_user(user_mask_ptr, mask, retlen))
+ if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
ret = -EFAULT;
else
ret = retlen;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 6effb44aeb30..869a125ebb87 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -82,6 +82,7 @@ struct sugov_cpu {
};
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
+static DEFINE_PER_CPU(struct sugov_tunables *, cached_tunables);
/************************ Governor internals ***********************/
@@ -89,16 +90,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
{
s64 delta_ns;
- if (sg_policy->work_in_progress)
- return false;
-
if (unlikely(sg_policy->need_freq_update)) {
- sg_policy->need_freq_update = false;
- /*
- * This happens when limits change, so forget the previous
- * next_freq value and force an update.
- */
- sg_policy->next_freq = UINT_MAX;
return true;
}
@@ -150,7 +142,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
policy->cur = next_freq;
trace_cpu_frequency(next_freq, smp_processor_id());
- } else {
+ } else if (!sg_policy->work_in_progress) {
sg_policy->work_in_progress = true;
irq_work_queue(&sg_policy->irq_work);
}
@@ -187,8 +179,10 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
freq = (freq + (freq >> 2)) * util / max;
- if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+ if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
return sg_policy->next_freq;
+
+ sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq);
}
@@ -234,6 +228,15 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
if (!sg_policy->tunables->iowait_boost_enable)
return;
+ if (sg_cpu->iowait_boost) {
+ s64 delta_ns = time - sg_cpu->last_update;
+
+ /* Clear iowait_boost if the CPU apprears to have been idle. */
+ if (delta_ns > TICK_NSEC) {
+ sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_pending = false;
+ }
+ }
if (flags & SCHED_CPUFREQ_IOWAIT) {
if (sg_cpu->iowait_boost_pending)
return;
@@ -247,14 +250,6 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
} else {
sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
}
- } else if (sg_cpu->iowait_boost) {
- s64 delta_ns = time - sg_cpu->last_update;
-
- /* Clear iowait_boost if the CPU apprears to have been idle. */
- if (delta_ns > TICK_NSEC) {
- sg_cpu->iowait_boost = 0;
- sg_cpu->iowait_boost_pending = false;
- }
}
}
@@ -311,6 +306,13 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
+ /*
+ * For slow-switch systems, single policy requests can't run at the
+ * moment if update is in progress, unless we acquire update_lock.
+ */
+ if (sg_policy->work_in_progress)
+ return;
+
if (!sugov_should_update_freq(sg_policy, time))
return;
@@ -326,7 +328,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
- if (busy && next_f < sg_policy->next_freq) {
+ if (busy && next_f < sg_policy->next_freq &&
+ sg_policy->next_freq != UINT_MAX) {
next_f = sg_policy->next_freq;
/* Reset cached freq as next_freq has changed */
@@ -366,7 +369,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
- if (j_util * max > j_max * util) {
+ if (j_util * max >= j_max * util) {
util = j_util;
max = j_max;
}
@@ -411,13 +414,27 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
static void sugov_work(struct kthread_work *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
+ unsigned int freq;
+ unsigned long flags;
+
+ /*
+ * Hold sg_policy->update_lock shortly to handle the case where:
+ * incase sg_policy->next_freq is read here, and then updated by
+ * sugov_update_shared just before work_in_progress is set to false
+ * here, we may miss queueing the new update.
+ *
+ * Note: If a work was queued after the update_lock is released,
+ * sugov_work will just be called again by kthread_work code; and the
+ * request will be proceed before the sugov thread sleeps.
+ */
+ raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
+ freq = sg_policy->next_freq;
+ sg_policy->work_in_progress = false;
+ raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
mutex_lock(&sg_policy->work_lock);
- __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
- CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
mutex_unlock(&sg_policy->work_lock);
-
- sg_policy->work_in_progress = false;
}
static void sugov_irq_work(struct irq_work *irq_work)
@@ -640,6 +657,29 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic
return tunables;
}
+static void sugov_tunables_save(struct cpufreq_policy *policy,
+ struct sugov_tunables *tunables)
+{
+ int cpu;
+ struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
+
+ if (!have_governor_per_policy())
+ return;
+
+ if (!cached) {
+ cached = kzalloc(sizeof(*tunables), GFP_KERNEL);
+ if (!cached) {
+ pr_warn("Couldn't allocate tunables for caching\n");
+ return;
+ }
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(cached_tunables, cpu) = cached;
+ }
+
+ cached->up_rate_limit_us = tunables->up_rate_limit_us;
+ cached->down_rate_limit_us = tunables->down_rate_limit_us;
+}
+
static void sugov_tunables_free(struct sugov_tunables *tunables)
{
if (!have_governor_per_policy())
@@ -648,6 +688,25 @@ static void sugov_tunables_free(struct sugov_tunables *tunables)
kfree(tunables);
}
+static void sugov_tunables_restore(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+ struct sugov_tunables *tunables = sg_policy->tunables;
+ struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
+
+ if (!cached)
+ return;
+
+ tunables->up_rate_limit_us = cached->up_rate_limit_us;
+ tunables->down_rate_limit_us = cached->down_rate_limit_us;
+ sg_policy->up_rate_delay_ns =
+ tunables->up_rate_limit_us * NSEC_PER_USEC;
+ sg_policy->down_rate_delay_ns =
+ tunables->down_rate_limit_us * NSEC_PER_USEC;
+ sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
+ sg_policy->down_rate_delay_ns);
+}
+
static int sugov_init(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
@@ -710,6 +769,8 @@ static int sugov_init(struct cpufreq_policy *policy)
policy->governor_data = sg_policy;
sg_policy->tunables = tunables;
+ sugov_tunables_restore(policy);
+
ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
get_governor_parent_kobj(policy), "%s",
cpufreq_gov_schedutil.name);
@@ -749,8 +810,10 @@ static int sugov_exit(struct cpufreq_policy *policy)
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
policy->governor_data = NULL;
- if (!count)
+ if (!count) {
+ sugov_tunables_save(policy, tunables);
sugov_tunables_free(tunables);
+ }
mutex_unlock(&global_tunables_lock);
@@ -772,7 +835,7 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
update_min_rate_limit_us(sg_policy);
sg_policy->last_freq_update_time = 0;
- sg_policy->next_freq = UINT_MAX;
+ sg_policy->next_freq = 0;
sg_policy->work_in_progress = false;
sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = 0;
@@ -784,6 +847,11 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_cpu->sg_policy = sg_policy;
sg_cpu->flags = SCHED_CPUFREQ_DL;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ }
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
policy_is_shared(policy) ?
sugov_update_shared :
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 188c8388a63f..d40995e9cf5f 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1038,6 +1038,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
+ walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
@@ -1053,6 +1054,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
+ walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
index 50d183b1e156..770624996f9f 100644
--- a/kernel/sched/energy.c
+++ b/kernel/sched/energy.c
@@ -91,11 +91,17 @@ void init_sched_energy_costs(void)
sge = kcalloc(1, sizeof(struct sched_group_energy),
GFP_NOWAIT);
+ if (!sge)
+ goto out;
nstates = (prop->length / sizeof(u32)) / 2;
cap_states = kcalloc(nstates,
sizeof(struct capacity_state),
GFP_NOWAIT);
+ if (!cap_states) {
+ kfree(sge);
+ goto out;
+ }
for (i = 0, val = prop->value; i < nstates; i++) {
cap_states[i].cap = be32_to_cpup(val++);
@@ -108,6 +114,8 @@ void init_sched_energy_costs(void)
prop = of_find_property(cp, "idle-cost-data", NULL);
if (!prop || !prop->value) {
pr_warn("No idle-cost data, skipping sched_energy init\n");
+ kfree(sge);
+ kfree(cap_states);
goto out;
}
@@ -115,6 +123,11 @@ void init_sched_energy_costs(void)
idle_states = kcalloc(nstates,
sizeof(struct idle_state),
GFP_NOWAIT);
+ if (!idle_states) {
+ kfree(sge);
+ kfree(cap_states);
+ goto out;
+ }
for (i = 0, val = prop->value; i < nstates; i++)
idle_states[i].power = be32_to_cpup(val++);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 266fc95f6c0f..43c3d2684f64 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3690,68 +3690,6 @@ static inline int migration_needed(struct task_struct *p, int cpu)
return 0;
}
-static inline int
-kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
-{
- unsigned long flags;
- int rc = 0;
-
- /* Invoke active balance to force migrate currently running task */
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (!rq->active_balance) {
- rq->active_balance = 1;
- rq->push_cpu = new_cpu;
- get_task_struct(p);
- rq->push_task = p;
- rc = 1;
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- return rc;
-}
-
-static DEFINE_RAW_SPINLOCK(migration_lock);
-
-static bool do_migration(int reason, int new_cpu, int cpu)
-{
- if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION)
- && same_cluster(new_cpu, cpu))
- return false;
-
- /* Inter cluster high irqload migrations are OK */
- return new_cpu != cpu;
-}
-
-/*
- * Check if currently running task should be migrated to a better cpu.
- *
- * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
- */
-void check_for_migration(struct rq *rq, struct task_struct *p)
-{
- int cpu = cpu_of(rq), new_cpu;
- int active_balance = 0, reason;
-
- reason = migration_needed(p, cpu);
- if (!reason)
- return;
-
- raw_spin_lock(&migration_lock);
- new_cpu = select_best_cpu(p, cpu, reason, 0);
-
- if (do_migration(reason, new_cpu, cpu)) {
- active_balance = kick_active_balance(rq, p, new_cpu);
- if (active_balance)
- mark_reserved(new_cpu);
- }
-
- raw_spin_unlock(&migration_lock);
-
- if (active_balance)
- stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
- &rq->active_balance_work);
-}
-
#ifdef CONFIG_CFS_BANDWIDTH
static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
@@ -5060,12 +4998,9 @@ static inline u64 sched_cfs_bandwidth_slice(void)
*/
void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
{
- u64 now;
-
if (cfs_b->quota == RUNTIME_INF)
return;
- now = sched_clock_cpu(smp_processor_id());
cfs_b->runtime = cfs_b->quota;
}
@@ -6062,6 +5997,11 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
dec_rq_hmp_stats(rq, p, 1);
}
+#ifdef CONFIG_SMP
+ if (energy_aware() && !se)
+ walt_dec_cumulative_runnable_avg(rq, p);
+#endif /* CONFIG_SMP */
+
hrtick_update(rq);
}
@@ -6472,28 +6412,79 @@ unsigned long capacity_curr_of(int cpu)
>> SCHED_CAPACITY_SHIFT;
}
+/*
+ * CPU candidates.
+ *
+ * These are labels to reference CPU candidates for an energy_diff.
+ * Currently we support only two possible candidates: the task's previous CPU
+ * and another candiate CPU.
+ * More advanced/aggressive EAS selection policies can consider more
+ * candidates.
+ */
+#define EAS_CPU_PRV 0
+#define EAS_CPU_NXT 1
+#define EAS_CPU_BKP 2
+#define EAS_CPU_CNT 3
+
+/*
+ * Returns the current capacity of cpu after applying both
+ * cpu and min freq scaling.
+ */
+unsigned long capacity_min_of(int cpu)
+{
+ if (!sched_feat(MIN_CAPACITY_CAPPING))
+ return 0;
+ return arch_scale_cpu_capacity(NULL, cpu) *
+ arch_scale_min_freq_capacity(NULL, cpu)
+ >> SCHED_CAPACITY_SHIFT;
+}
+
+/*
+ * energy_diff - supports the computation of the estimated energy impact in
+ * moving a "task"'s "util_delta" between different CPU candidates.
+ */
struct energy_env {
- struct sched_group *sg_top;
- struct sched_group *sg_cap;
- int cap_idx;
+ /* Utilization to move */
+ struct task_struct *p;
int util_delta;
- int src_cpu;
- int dst_cpu;
- int trg_cpu;
- int energy;
- int payoff;
- struct task_struct *task;
- struct {
- int before;
- int after;
- int delta;
- int diff;
- } nrg;
+
+ /* Mask of CPUs candidates to evaluate */
+ cpumask_t cpus_mask;
+
+ /* CPU candidates to evaluate */
struct {
- int before;
- int after;
- int delta;
- } cap;
+
+ /* CPU ID, must be in cpus_mask */
+ int cpu_id;
+
+ /*
+ * Index (into sched_group_energy::cap_states) of the OPP the
+ * CPU needs to run at if the task is placed on it.
+ * This includes the both active and blocked load, due to
+ * other tasks on this CPU, as well as the task's own
+ * utilization.
+ */
+ int cap_idx;
+ int cap;
+
+ /* Estimated system energy */
+ unsigned int energy;
+
+ /* Estimated energy variation wrt EAS_CPU_PRV */
+ int nrg_delta;
+
+ } cpu[EAS_CPU_CNT];
+
+ /*
+ * Index (into energy_env::cpu) of the morst energy efficient CPU for
+ * the specified energy_env::task
+ */
+ int next_idx;
+
+ /* Support data */
+ struct sched_group *sg_top;
+ struct sched_group *sg_cap;
+ struct sched_group *sg;
};
static int cpu_util_wake(int cpu, struct task_struct *p);
@@ -6521,24 +6512,33 @@ static unsigned long __cpu_norm_util(unsigned long util, unsigned long capacity)
return (util << SCHED_CAPACITY_SHIFT)/capacity;
}
-static unsigned long group_max_util(struct energy_env *eenv)
+static unsigned long group_max_util(struct energy_env *eenv, int cpu_idx)
{
unsigned long max_util = 0;
unsigned long util;
int cpu;
for_each_cpu(cpu, sched_group_cpus(eenv->sg_cap)) {
- util = cpu_util_wake(cpu, eenv->task);
+ util = cpu_util_wake(cpu, eenv->p);
/*
* If we are looking at the target CPU specified by the eenv,
* then we should add the (estimated) utilization of the task
* assuming we will wake it up on that CPU.
*/
- if (unlikely(cpu == eenv->trg_cpu))
+ if (unlikely(cpu == eenv->cpu[cpu_idx].cpu_id))
util += eenv->util_delta;
max_util = max(max_util, util);
+
+ /*
+ * Take into account any minimum frequency imposed
+ * elsewhere which limits the energy states available
+ * If the MIN_CAPACITY_CAPPING feature is not enabled
+ * capacity_min_of will return 0 (not capped).
+ */
+ max_util = max(max_util, capacity_min_of(cpu));
+
}
return max_util;
@@ -6556,21 +6556,21 @@ static unsigned long group_max_util(struct energy_env *eenv)
* estimate (more busy).
*/
static unsigned
-long group_norm_util(struct energy_env *eenv, struct sched_group *sg)
+long group_norm_util(struct energy_env *eenv, int cpu_idx)
{
- unsigned long capacity = sg->sge->cap_states[eenv->cap_idx].cap;
+ unsigned long capacity = eenv->cpu[cpu_idx].cap;
unsigned long util, util_sum = 0;
int cpu;
- for_each_cpu(cpu, sched_group_cpus(sg)) {
- util = cpu_util_wake(cpu, eenv->task);
+ for_each_cpu(cpu, sched_group_cpus(eenv->sg)) {
+ util = cpu_util_wake(cpu, eenv->p);
/*
* If we are looking at the target CPU specified by the eenv,
* then we should add the (estimated) utilization of the task
* assuming we will wake it up on that CPU.
*/
- if (unlikely(cpu == eenv->trg_cpu))
+ if (unlikely(cpu == eenv->cpu[cpu_idx].cpu_id))
util += eenv->util_delta;
util_sum += __cpu_norm_util(util, capacity);
@@ -6579,27 +6579,31 @@ long group_norm_util(struct energy_env *eenv, struct sched_group *sg)
return min_t(unsigned long, util_sum, SCHED_CAPACITY_SCALE);
}
-static int find_new_capacity(struct energy_env *eenv,
- const struct sched_group_energy * const sge)
+static int find_new_capacity(struct energy_env *eenv, int cpu_idx)
{
+ const struct sched_group_energy *sge = eenv->sg->sge;
int idx, max_idx = sge->nr_cap_states - 1;
- unsigned long util = group_max_util(eenv);
+ unsigned long util = group_max_util(eenv, cpu_idx);
/* default is max_cap if we don't find a match */
- eenv->cap_idx = max_idx;
+ eenv->cpu[cpu_idx].cap_idx = max_idx;
+ eenv->cpu[cpu_idx].cap = sge->cap_states[max_idx].cap;
for (idx = 0; idx < sge->nr_cap_states; idx++) {
if (sge->cap_states[idx].cap >= util) {
- eenv->cap_idx = idx;
+ /* Keep track of SG's capacity */
+ eenv->cpu[cpu_idx].cap_idx = idx;
+ eenv->cpu[cpu_idx].cap = sge->cap_states[idx].cap;
break;
}
}
- return eenv->cap_idx;
+ return eenv->cpu[cpu_idx].cap_idx;
}
-static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
+static int group_idle_state(struct energy_env *eenv, int cpu_idx)
{
+ struct sched_group *sg = eenv->sg;
int i, state = INT_MAX;
int src_in_grp, dst_in_grp;
long grp_util = 0;
@@ -6611,8 +6615,10 @@ static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
state++;
- src_in_grp = cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg));
- dst_in_grp = cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg));
+ src_in_grp = cpumask_test_cpu(eenv->cpu[EAS_CPU_PRV].cpu_id,
+ sched_group_cpus(sg));
+ dst_in_grp = cpumask_test_cpu(eenv->cpu[cpu_idx].cpu_id,
+ sched_group_cpus(sg));
if (src_in_grp == dst_in_grp) {
/* both CPUs under consideration are in the same group or not in
* either group, migration should leave idle state the same.
@@ -6625,8 +6631,8 @@ static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
* achievable when we move the task.
*/
for_each_cpu(i, sched_group_cpus(sg)) {
- grp_util += cpu_util_wake(i, eenv->task);
- if (unlikely(i == eenv->trg_cpu))
+ grp_util += cpu_util_wake(i, eenv->p);
+ if (unlikely(i == eenv->cpu[cpu_idx].cpu_id))
grp_util += eenv->util_delta;
}
@@ -6662,19 +6668,65 @@ end:
}
/*
- * sched_group_energy(): Computes the absolute energy consumption of cpus
- * belonging to the sched_group including shared resources shared only by
- * members of the group. Iterates over all cpus in the hierarchy below the
- * sched_group starting from the bottom working it's way up before going to
- * the next cpu until all cpus are covered at all levels. The current
- * implementation is likely to gather the same util statistics multiple times.
- * This can probably be done in a faster but more complex way.
- * Note: sched_group_energy() may fail when racing with sched_domain updates.
+ * calc_sg_energy: compute energy for the eenv's SG (i.e. eenv->sg).
+ *
+ * This works in iterations to compute the SG's energy for each CPU
+ * candidate defined by the energy_env's cpu array.
+ *
+ * NOTE: in the following computations for busy_energy and idle_energy we do
+ * not shift by SCHED_CAPACITY_SHIFT in order to reduce rounding errors.
+ * The required scaling will be performed just one time, by the calling
+ * functions, once we accumulated the contributons for all the SGs.
*/
-static int sched_group_energy(struct energy_env *eenv)
+static void calc_sg_energy(struct energy_env *eenv)
+{
+ struct sched_group *sg = eenv->sg;
+ int busy_energy, idle_energy;
+ unsigned int busy_power;
+ unsigned int idle_power;
+ unsigned long sg_util;
+ int cap_idx, idle_idx;
+ int total_energy = 0;
+ int cpu_idx;
+
+ for (cpu_idx = EAS_CPU_PRV; cpu_idx < EAS_CPU_CNT; ++cpu_idx) {
+
+
+ if (eenv->cpu[cpu_idx].cpu_id == -1)
+ continue;
+ /* Compute ACTIVE energy */
+ cap_idx = find_new_capacity(eenv, cpu_idx);
+ busy_power = sg->sge->cap_states[cap_idx].power;
+ /*
+ * in order to calculate cpu_norm_util, we need to know which
+ * capacity level the group will be at, so calculate that first
+ */
+ sg_util = group_norm_util(eenv, cpu_idx);
+
+ busy_energy = sg_util * busy_power;
+
+ /* Compute IDLE energy */
+ idle_idx = group_idle_state(eenv, cpu_idx);
+ idle_power = sg->sge->idle_states[idle_idx].power;
+
+ idle_energy = SCHED_CAPACITY_SCALE - sg_util;
+ idle_energy *= idle_power;
+
+ total_energy = busy_energy + idle_energy;
+ eenv->cpu[cpu_idx].energy += total_energy;
+ }
+}
+
+/*
+ * compute_energy() computes the absolute variation in energy consumption by
+ * moving eenv.util_delta from EAS_CPU_PRV to EAS_CPU_NXT.
+ *
+ * NOTE: compute_energy() may fail when racing with sched_domain updates, in
+ * which case we abort by returning -EINVAL.
+ */
+static int compute_energy(struct energy_env *eenv)
{
struct cpumask visit_cpus;
- u64 total_energy = 0;
int cpu_count;
WARN_ON(!eenv->sg_top->sge);
@@ -6716,41 +6768,18 @@ static int sched_group_energy(struct energy_env *eenv)
break;
do {
- unsigned long group_util;
- int sg_busy_energy, sg_idle_energy;
- int cap_idx, idle_idx;
-
+ eenv->sg_cap = sg;
if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight)
eenv->sg_cap = sg_shared_cap;
- else
- eenv->sg_cap = sg;
- cap_idx = find_new_capacity(eenv, sg->sge);
-
- if (sg->group_weight == 1) {
- /* Remove capacity of src CPU (before task move) */
- if (eenv->trg_cpu == eenv->src_cpu &&
- cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg))) {
- eenv->cap.before = sg->sge->cap_states[cap_idx].cap;
- eenv->cap.delta -= eenv->cap.before;
- }
- /* Add capacity of dst CPU (after task move) */
- if (eenv->trg_cpu == eenv->dst_cpu &&
- cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg))) {
- eenv->cap.after = sg->sge->cap_states[cap_idx].cap;
- eenv->cap.delta += eenv->cap.after;
- }
- }
-
- idle_idx = group_idle_state(eenv, sg);
- group_util = group_norm_util(eenv, sg);
-
- sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
- sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
- * sg->sge->idle_states[idle_idx].power);
-
- total_energy += sg_busy_energy + sg_idle_energy;
+ /*
+ * Compute the energy for all the candidate
+ * CPUs in the current visited SG.
+ */
+ eenv->sg = sg;
+ calc_sg_energy(eenv);
+ /* remove CPUs we have just visited */
if (!sd->child) {
/*
* cpu_count here is the number of
@@ -6791,7 +6820,6 @@ next_cpu:
continue;
}
- eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT;
return 0;
}
@@ -6800,181 +6828,101 @@ static inline bool cpu_in_sg(struct sched_group *sg, int cpu)
return cpu != -1 && cpumask_test_cpu(cpu, sched_group_cpus(sg));
}
-static inline unsigned long task_util(struct task_struct *p);
-
/*
- * energy_diff(): Estimate the energy impact of changing the utilization
- * distribution. eenv specifies the change: utilisation amount, source, and
- * destination cpu. Source or destination cpu may be -1 in which case the
- * utilization is removed from or added to the system (e.g. task wake-up). If
- * both are specified, the utilization is migrated.
+ * select_energy_cpu_idx(): estimate the energy impact of changing the
+ * utilization distribution.
+ *
+ * The eenv parameter specifies the changes: utilisation amount and a pair of
+ * possible CPU candidates (the previous CPU and a different target CPU).
+ *
+ * This function returns the index of a CPU candidate specified by the
+ * energy_env which corresponds to the first CPU saving energy.
+ * Thus, 0 (EAS_CPU_PRV) means that non of the CPU candidate is more energy
+ * efficient than running on prev_cpu. This is also the value returned in case
+ * of abort due to error conditions during the computations.
+ * A value greater than zero means that the first energy-efficient CPU is the
+ * one represented by eenv->cpu[eenv->next_idx].cpu_id.
*/
-static inline int __energy_diff(struct energy_env *eenv)
+static inline int select_energy_cpu_idx(struct energy_env *eenv)
{
struct sched_domain *sd;
struct sched_group *sg;
- int sd_cpu = -1, energy_before = 0, energy_after = 0;
- int diff, margin;
-
- struct energy_env eenv_before = {
- .util_delta = task_util(eenv->task),
- .src_cpu = eenv->src_cpu,
- .dst_cpu = eenv->dst_cpu,
- .trg_cpu = eenv->src_cpu,
- .nrg = { 0, 0, 0, 0},
- .cap = { 0, 0, 0 },
- .task = eenv->task,
- };
+ int sd_cpu = -1;
+ int cpu_idx;
+ int margin;
- if (eenv->src_cpu == eenv->dst_cpu)
- return 0;
-
- sd_cpu = (eenv->src_cpu != -1) ? eenv->src_cpu : eenv->dst_cpu;
+ sd_cpu = eenv->cpu[EAS_CPU_PRV].cpu_id;
sd = rcu_dereference(per_cpu(sd_ea, sd_cpu));
-
if (!sd)
- return 0; /* Error */
+ return EAS_CPU_PRV;
- sg = sd->groups;
+ cpumask_clear(&eenv->cpus_mask);
+ for (cpu_idx = EAS_CPU_PRV; cpu_idx < EAS_CPU_CNT; ++cpu_idx) {
+ int cpu = eenv->cpu[cpu_idx].cpu_id;
- do {
- if (cpu_in_sg(sg, eenv->src_cpu) || cpu_in_sg(sg, eenv->dst_cpu)) {
- eenv_before.sg_top = eenv->sg_top = sg;
+ if (cpu < 0)
+ continue;
+ cpumask_set_cpu(cpu, &eenv->cpus_mask);
+ }
- if (sched_group_energy(&eenv_before))
- return 0; /* Invalid result abort */
- energy_before += eenv_before.energy;
+ sg = sd->groups;
+ do {
+ /* Skip SGs which do not contains a candidate CPU */
+ if (!cpumask_intersects(&eenv->cpus_mask, sched_group_cpus(sg)))
+ continue;
- /* Keep track of SRC cpu (before) capacity */
- eenv->cap.before = eenv_before.cap.before;
- eenv->cap.delta = eenv_before.cap.delta;
+ eenv->sg_top = sg;
+ /* energy is unscaled to reduce rounding errors */
+ if (compute_energy(eenv) == -EINVAL)
+ return EAS_CPU_PRV;
- if (sched_group_energy(eenv))
- return 0; /* Invalid result abort */
- energy_after += eenv->energy;
- }
} while (sg = sg->next, sg != sd->groups);
- eenv->nrg.before = energy_before;
- eenv->nrg.after = energy_after;
- eenv->nrg.diff = eenv->nrg.after - eenv->nrg.before;
- eenv->payoff = 0;
-#ifndef CONFIG_SCHED_TUNE
- trace_sched_energy_diff(eenv->task,
- eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
- eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
- eenv->cap.before, eenv->cap.after, eenv->cap.delta,
- eenv->nrg.delta, eenv->payoff);
-#endif
+ /* Scale energy before comparisons */
+ for (cpu_idx = EAS_CPU_PRV; cpu_idx < EAS_CPU_CNT; ++cpu_idx)
+ eenv->cpu[cpu_idx].energy >>= SCHED_CAPACITY_SHIFT;
+
/*
- * Dead-zone margin preventing too many migrations.
+ * Compute the dead-zone margin used to prevent too many task
+ * migrations with negligible energy savings.
+ * An energy saving is considered meaningful if it reduces the energy
+ * consumption of EAS_CPU_PRV CPU candidate by at least ~1.56%
*/
+ margin = eenv->cpu[EAS_CPU_PRV].energy >> 6;
- margin = eenv->nrg.before >> 6; /* ~1.56% */
-
- diff = eenv->nrg.after - eenv->nrg.before;
-
- eenv->nrg.diff = (abs(diff) < margin) ? 0 : eenv->nrg.diff;
-
- return eenv->nrg.diff;
-}
-
-#ifdef CONFIG_SCHED_TUNE
-
-struct target_nrg schedtune_target_nrg;
-
-#ifdef CONFIG_CGROUP_SCHEDTUNE
-extern bool schedtune_initialized;
-#endif /* CONFIG_CGROUP_SCHEDTUNE */
-
-/*
- * System energy normalization
- * Returns the normalized value, in the range [0..SCHED_CAPACITY_SCALE],
- * corresponding to the specified energy variation.
- */
-static inline int
-normalize_energy(int energy_diff)
-{
- u32 normalized_nrg;
-
-#ifdef CONFIG_CGROUP_SCHEDTUNE
- /* during early setup, we don't know the extents */
- if (unlikely(!schedtune_initialized))
- return energy_diff < 0 ? -1 : 1 ;
-#endif /* CONFIG_CGROUP_SCHEDTUNE */
-
-#ifdef CONFIG_SCHED_DEBUG
- {
- int max_delta;
+ /*
+ * By default the EAS_CPU_PRV CPU is considered the most energy
+ * efficient, with a 0 energy variation.
+ */
+ eenv->next_idx = EAS_CPU_PRV;
- /* Check for boundaries */
- max_delta = schedtune_target_nrg.max_power;
- max_delta -= schedtune_target_nrg.min_power;
- WARN_ON(abs(energy_diff) >= max_delta);
+ /*
+ * Compare the other CPU candidates to find a CPU which can be
+ * more energy efficient then EAS_CPU_PRV
+ */
+ for (cpu_idx = EAS_CPU_NXT; cpu_idx < EAS_CPU_CNT; ++cpu_idx) {
+ /* Skip not valid scheduled candidates */
+ if (eenv->cpu[cpu_idx].cpu_id < 0)
+ continue;
+ /* Compute energy delta wrt EAS_CPU_PRV */
+ eenv->cpu[cpu_idx].nrg_delta =
+ eenv->cpu[cpu_idx].energy -
+ eenv->cpu[EAS_CPU_PRV].energy;
+ /* filter energy variations within the dead-zone margin */
+ if (abs(eenv->cpu[cpu_idx].nrg_delta) < margin)
+ eenv->cpu[cpu_idx].nrg_delta = 0;
+ /* update the schedule candidate with min(nrg_delta) */
+ if (eenv->cpu[cpu_idx].nrg_delta <
+ eenv->cpu[eenv->next_idx].nrg_delta) {
+ eenv->next_idx = cpu_idx;
+ if (sched_feat(FBT_STRICT_ORDER))
+ break;
+ }
}
-#endif
-
- /* Do scaling using positive numbers to increase the range */
- normalized_nrg = (energy_diff < 0) ? -energy_diff : energy_diff;
-
- /* Scale by energy magnitude */
- normalized_nrg <<= SCHED_CAPACITY_SHIFT;
-
- /* Normalize on max energy for target platform */
- normalized_nrg = reciprocal_divide(
- normalized_nrg, schedtune_target_nrg.rdiv);
- return (energy_diff < 0) ? -normalized_nrg : normalized_nrg;
+ return eenv->next_idx;
}
-static inline int
-energy_diff(struct energy_env *eenv)
-{
- int boost = schedtune_task_boost(eenv->task);
- int nrg_delta;
-
- /* Conpute "absolute" energy diff */
- __energy_diff(eenv);
-
- /* Return energy diff when boost margin is 0 */
- if (boost == 0) {
- trace_sched_energy_diff(eenv->task,
- eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
- eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
- eenv->cap.before, eenv->cap.after, eenv->cap.delta,
- 0, -eenv->nrg.diff);
- return eenv->nrg.diff;
- }
-
- /* Compute normalized energy diff */
- nrg_delta = normalize_energy(eenv->nrg.diff);
- eenv->nrg.delta = nrg_delta;
-
- eenv->payoff = schedtune_accept_deltas(
- eenv->nrg.delta,
- eenv->cap.delta,
- eenv->task);
-
- trace_sched_energy_diff(eenv->task,
- eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
- eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
- eenv->cap.before, eenv->cap.after, eenv->cap.delta,
- eenv->nrg.delta, eenv->payoff);
-
- /*
- * When SchedTune is enabled, the energy_diff() function will return
- * the computed energy payoff value. Since the energy_diff() return
- * value is expected to be negative by its callers, this evaluation
- * function return a negative value each time the evaluation return a
- * positive payoff, which is the condition for the acceptance of
- * a scheduling decision
- */
- return -eenv->payoff;
-}
-#else /* CONFIG_SCHED_TUNE */
-#define energy_diff(eenv) __energy_diff(eenv)
-#endif
-
/*
* Detect M:N waker/wakee relationships via a switching-frequency heuristic.
* A waker of many should wake a different task than the one last awakened
@@ -7069,18 +7017,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
return 1;
}
-static inline unsigned long task_util(struct task_struct *p)
-{
-#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- unsigned long demand = p->ravg.demand;
- return (demand << 10) / walt_ravg_window;
- }
-#endif
- return p->se.avg.util_avg;
-}
-
-static inline unsigned long boosted_task_util(struct task_struct *task);
+static inline unsigned long boosted_task_util(struct task_struct *p);
static inline bool __task_fits(struct task_struct *p, int cpu, int util)
{
@@ -7157,16 +7094,16 @@ schedtune_cpu_margin(unsigned long util, int cpu)
}
static inline long
-schedtune_task_margin(struct task_struct *task)
+schedtune_task_margin(struct task_struct *p)
{
- int boost = schedtune_task_boost(task);
+ int boost = schedtune_task_boost(p);
unsigned long util;
long margin;
if (boost == 0)
return 0;
- util = task_util(task);
+ util = task_util(p);
margin = schedtune_margin(util, boost);
return margin;
@@ -7181,7 +7118,7 @@ schedtune_cpu_margin(unsigned long util, int cpu)
}
static inline int
-schedtune_task_margin(struct task_struct *task)
+schedtune_task_margin(struct task_struct *p)
{
return 0;
}
@@ -7200,12 +7137,12 @@ boosted_cpu_util(int cpu)
}
static inline unsigned long
-boosted_task_util(struct task_struct *task)
+boosted_task_util(struct task_struct *p)
{
- unsigned long util = task_util(task);
- long margin = schedtune_task_margin(task);
+ unsigned long util = task_util(p);
+ long margin = schedtune_task_margin(p);
- trace_sched_boost_task(task, util, margin);
+ trace_sched_boost_task(p, util, margin);
return util + margin;
}
@@ -7575,6 +7512,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
unsigned long min_wake_util = ULONG_MAX;
unsigned long target_max_spare_cap = 0;
unsigned long best_active_util = ULONG_MAX;
+ unsigned long target_idle_max_spare_cap = 0;
int best_idle_cstate = INT_MAX;
struct sched_domain *sd;
struct sched_group *sg;
@@ -7610,7 +7548,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
unsigned long capacity_curr = capacity_curr_of(i);
unsigned long capacity_orig = capacity_orig_of(i);
- unsigned long wake_util, new_util;
+ unsigned long wake_util, new_util, min_capped_util;
if (!cpu_online(i))
continue;
@@ -7632,13 +7570,18 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
* than the one required to boost the task.
*/
new_util = max(min_util, new_util);
- if (new_util > capacity_orig)
- continue;
-#ifdef CONFIG_SCHED_WALT
- if (walt_cpu_high_irqload(i))
+ /*
+ * Include minimum capacity constraint:
+ * new_util contains the required utilization including
+ * boost. min_capped_util also takes into account a
+ * minimum capacity cap imposed on the CPU by external
+ * actors.
+ */
+ min_capped_util = max(new_util, capacity_min_of(i));
+
+ if (new_util > capacity_orig)
continue;
-#endif
/*
* Case A) Latency sensitive tasks
@@ -7759,6 +7702,12 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
/* Select idle CPU with lower cap_orig */
if (capacity_orig > best_idle_min_cap_orig)
continue;
+ /* Favor CPUs that won't end up running at a
+ * high OPP.
+ */
+ if ((capacity_orig - min_capped_util) <
+ target_idle_max_spare_cap)
+ continue;
/*
* Skip CPUs in deeper idle state, but only
@@ -7772,6 +7721,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
/* Keep track of best idle CPU */
best_idle_min_cap_orig = capacity_orig;
+ target_idle_max_spare_cap = capacity_orig -
+ min_capped_util;
best_idle_cstate = idle_idx;
best_idle_cpu = i;
continue;
@@ -7802,10 +7753,11 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
continue;
/* Favor CPUs with maximum spare capacity */
- if ((capacity_orig - new_util) < target_max_spare_cap)
+ if ((capacity_orig - min_capped_util) <
+ target_max_spare_cap)
continue;
- target_max_spare_cap = capacity_orig - new_util;
+ target_max_spare_cap = capacity_orig - min_capped_util;
target_capacity = capacity_orig;
target_cpu = i;
}
@@ -7877,9 +7829,11 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
- struct sched_domain *sd;
- int target_cpu = prev_cpu, tmp_target, tmp_backup;
bool boosted, prefer_idle;
+ struct sched_domain *sd;
+ int target_cpu;
+ int backup_cpu;
+ int next_cpu;
schedstat_inc(p, se.statistics.nr_wakeups_secb_attempts);
schedstat_inc(this_rq(), eas_stats.secb_attempts);
@@ -7894,7 +7848,6 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
}
}
- rcu_read_lock();
#ifdef CONFIG_CGROUP_SCHEDTUNE
boosted = schedtune_task_boost(p) > 0;
prefer_idle = schedtune_prefer_idle(p) > 0;
@@ -7903,31 +7856,49 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
prefer_idle = 0;
#endif
- sync_entity_load_avg(&p->se);
+ rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
+ if (!sd) {
+ target_cpu = prev_cpu;
+ goto unlock;
+ }
+
+ sync_entity_load_avg(&p->se);
+
/* Find a cpu with sufficient capacity */
- tmp_target = find_best_target(p, &tmp_backup, boosted, prefer_idle);
+ next_cpu = find_best_target(p, &backup_cpu, boosted, prefer_idle);
+ if (next_cpu == -1) {
+ target_cpu = prev_cpu;
+ goto unlock;
+ }
- if (!sd)
+ /* Unconditionally prefer IDLE CPUs for boosted/prefer_idle tasks */
+ if ((boosted || prefer_idle) && idle_cpu(next_cpu)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_idle_bt);
+ schedstat_inc(this_rq(), eas_stats.secb_idle_bt);
+ target_cpu = next_cpu;
goto unlock;
- if (tmp_target >= 0) {
- target_cpu = tmp_target;
- if ((boosted || prefer_idle) && idle_cpu(target_cpu)) {
- schedstat_inc(p, se.statistics.nr_wakeups_secb_idle_bt);
- schedstat_inc(this_rq(), eas_stats.secb_idle_bt);
- goto unlock;
- }
}
- if (target_cpu != prev_cpu) {
+ target_cpu = prev_cpu;
+ if (next_cpu != prev_cpu) {
int delta = 0;
struct energy_env eenv = {
+ .p = p,
.util_delta = task_util(p),
- .src_cpu = prev_cpu,
- .dst_cpu = target_cpu,
- .task = p,
- .trg_cpu = target_cpu,
+ /* Task's previous CPU candidate */
+ .cpu[EAS_CPU_PRV] = {
+ .cpu_id = prev_cpu,
+ },
+ /* Main alternative CPU candidate */
+ .cpu[EAS_CPU_NXT] = {
+ .cpu_id = next_cpu,
+ },
+ /* Backup alternative CPU candidate */
+ .cpu[EAS_CPU_BKP] = {
+ .cpu_id = backup_cpu,
+ },
};
@@ -7940,26 +7911,21 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
if (__cpu_overutilized(prev_cpu, delta)) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
+ target_cpu = next_cpu;
goto unlock;
}
- if (energy_diff(&eenv) >= 0) {
- /* No energy saving for target_cpu, try backup */
- target_cpu = tmp_backup;
- eenv.dst_cpu = target_cpu;
- eenv.trg_cpu = target_cpu;
- if (tmp_backup < 0 ||
- tmp_backup == prev_cpu ||
- energy_diff(&eenv) >= 0) {
- schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
- schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
- target_cpu = prev_cpu;
- goto unlock;
- }
+ /* Check if EAS_CPU_NXT is a more energy efficient CPU */
+ if (select_energy_cpu_idx(&eenv) != EAS_CPU_PRV) {
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_nrg_sav);
+ schedstat_inc(this_rq(), eas_stats.secb_nrg_sav);
+ target_cpu = eenv.cpu[eenv.next_idx].cpu_id;
+ goto unlock;
}
- schedstat_inc(p, se.statistics.nr_wakeups_secb_nrg_sav);
- schedstat_inc(this_rq(), eas_stats.secb_nrg_sav);
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
+ schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
+ target_cpu = prev_cpu;
goto unlock;
}
@@ -9339,6 +9305,9 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
cpu_rq(cpu)->cpu_capacity_orig = capacity;
+ capacity *= arch_scale_max_freq_capacity(sd, cpu);
+ capacity >>= SCHED_CAPACITY_SHIFT;
+
mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
raw_spin_lock_irqsave(&mcc->lock, flags);
@@ -10373,6 +10342,17 @@ static struct rq *find_busiest_queue(struct lb_env *env,
capacity = capacity_of(i);
+ /*
+ * For ASYM_CPUCAPACITY domains, don't pick a cpu that could
+ * eventually lead to active_balancing high->low capacity.
+ * Higher per-cpu capacity is considered better than balancing
+ * average load.
+ */
+ if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
+ capacity_of(env->dst_cpu) < capacity &&
+ rq->nr_running == 1)
+ continue;
+
wl = weighted_cpuload(i);
/*
@@ -11677,6 +11657,92 @@ static void rq_offline_fair(struct rq *rq)
unthrottle_offline_cfs_rqs(rq);
}
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ /* Invoke active balance to force migrate currently running task */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (!rq->active_balance) {
+ rq->active_balance = 1;
+ rq->push_cpu = new_cpu;
+ get_task_struct(p);
+ rq->push_task = p;
+ rc = 1;
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ return rc;
+}
+
+#ifdef CONFIG_SCHED_HMP
+static DEFINE_RAW_SPINLOCK(migration_lock);
+
+static bool do_migration(int reason, int new_cpu, int cpu)
+{
+ if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION)
+ && same_cluster(new_cpu, cpu))
+ return false;
+
+ /* Inter cluster high irqload migrations are OK */
+ return new_cpu != cpu;
+}
+
+/*
+ * Check if currently running task should be migrated to a better cpu.
+ *
+ * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
+ */
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+ int cpu = cpu_of(rq), new_cpu;
+ int active_balance = 0, reason;
+
+ reason = migration_needed(p, cpu);
+ if (!reason)
+ return;
+
+ raw_spin_lock(&migration_lock);
+ new_cpu = select_best_cpu(p, cpu, reason, 0);
+
+ if (do_migration(reason, new_cpu, cpu)) {
+ active_balance = kick_active_balance(rq, p, new_cpu);
+ if (active_balance)
+ mark_reserved(new_cpu);
+ }
+
+ raw_spin_unlock(&migration_lock);
+
+ if (active_balance)
+ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
+ &rq->active_balance_work);
+}
+#else
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+ int new_cpu;
+ int active_balance;
+ int cpu = task_cpu(p);
+
+ if (rq->misfit_task) {
+ if (rq->curr->state != TASK_RUNNING ||
+ rq->curr->nr_cpus_allowed == 1)
+ return;
+
+ new_cpu = select_energy_cpu_brute(p, cpu, 0);
+ if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
+ active_balance = kick_active_balance(rq, p, new_cpu);
+ if (active_balance)
+ stop_one_cpu_nowait(cpu,
+ active_load_balance_cpu_stop,
+ rq, &rq->active_balance_work);
+ }
+ }
+}
+#endif
+
#endif /* CONFIG_SMP */
/*
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index c30c48fde7e6..c3e301589515 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -78,3 +78,29 @@ SCHED_FEAT(ENERGY_AWARE, true)
#else
SCHED_FEAT(ENERGY_AWARE, false)
#endif
+
+/*
+ * Minimum capacity capping. Keep track of minimum capacity factor when
+ * minimum frequency available to a policy is modified.
+ * If enabled, this can be used to inform the scheduler about capacity
+ * restrictions.
+ */
+SCHED_FEAT(MIN_CAPACITY_CAPPING, true)
+
+/*
+ * Enforce the priority of candidates selected by find_best_target()
+ * ON: If the target CPU saves any energy, use that.
+ * OFF: Use whichever of target or backup saves most.
+ */
+SCHED_FEAT(FBT_STRICT_ORDER, true)
+
+/*
+ * Apply schedtune boost hold to tasks of all sched classes.
+ * If enabled, schedtune will hold the boost applied to a CPU
+ * for 50ms regardless of task activation - if the task is
+ * still running 50ms later, the boost hold expires and schedtune
+ * boost will expire immediately the task stops.
+ * If disabled, this behaviour will only apply to tasks of the
+ * RT class.
+ */
+SCHED_FEAT(SCHEDTUNE_BOOST_HOLD_ALL, false)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ac81704e14d9..9d7f6998edd5 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1439,6 +1439,25 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
}
/*
+ * Keep track of whether each cpu has an RT task that will
+ * soon schedule on that core. The problem this is intended
+ * to address is that we want to avoid entering a non-preemptible
+ * softirq handler if we are about to schedule a real-time
+ * task on that core. Ideally, we could just check whether
+ * the RT runqueue on that core had a runnable task, but the
+ * window between choosing to schedule a real-time task
+ * on a core and actually enqueueing it on that run-queue
+ * is large enough to lose races at an unacceptably high rate.
+ *
+ * This variable attempts to reduce that window by indicating
+ * when we have decided to schedule an RT task on a core
+ * but not yet enqueued it.
+ * This variable is a heuristic only: it is not guaranteed
+ * to be correct and may be updated without synchronization.
+ */
+DEFINE_PER_CPU(bool, incoming_rt_task);
+
+/*
* Adding/removing a task to/from a priority array:
*/
static void
@@ -1459,6 +1478,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
+ *per_cpu_ptr(&incoming_rt_task, cpu_of(rq)) = false;
if (!schedtune_task_boost(p))
return;
@@ -1551,8 +1571,19 @@ static void yield_task_rt(struct rq *rq)
requeue_task_rt(rq, rq->curr, 0);
}
+/*
+ * Return whether the given cpu has (or will shortly have) an RT task
+ * ready to run. NB: This is a heuristic and is subject to races.
+ */
+bool
+cpu_has_rt_task(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ return rq->rt.rt_nr_running > 0 || per_cpu(incoming_rt_task, cpu);
+}
+
#ifdef CONFIG_SMP
-static int find_lowest_rq(struct task_struct *task);
+static int find_lowest_rq(struct task_struct *task, int sync);
#ifdef CONFIG_SCHED_HMP
static int
@@ -1561,7 +1592,7 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
int target;
rcu_read_lock();
- target = find_lowest_rq(p);
+ target = find_lowest_rq(p, 0);
if (target != -1)
cpu = target;
rcu_read_unlock();
@@ -1573,8 +1604,10 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
/*
* Return whether the task on the given cpu is currently non-preemptible
* while handling a potentially long softint, or if the task is likely
- * to block preemptions soon because it is a ksoftirq thread that is
- * handling slow softints.
+ * to block preemptions soon because (a) it is a ksoftirq thread that is
+ * handling slow softints, (b) it is idle and therefore likely to start
+ * processing the irq's immediately, (c) the cpu is currently handling
+ * hard irq's and will soon move on to the softirq handler.
*/
bool
task_may_not_preempt(struct task_struct *task, int cpu)
@@ -1584,8 +1617,9 @@ task_may_not_preempt(struct task_struct *task, int cpu)
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
return ((softirqs & LONG_SOFTIRQ_MASK) &&
- (task == cpu_ksoftirqd ||
- task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
+ (task == cpu_ksoftirqd || is_idle_task(task) ||
+ (task_thread_info(task)->preempt_count
+ & (HARDIRQ_MASK | SOFTIRQ_MASK))));
}
/*
@@ -1618,9 +1652,11 @@ static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
int sibling_count_hint)
{
- struct task_struct *curr;
+ struct task_struct *curr, *tgt_task;
struct rq *rq;
bool may_not_preempt;
+ int target;
+ int sync = flags & WF_SYNC;
#ifdef CONFIG_SCHED_HMP
return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
@@ -1635,58 +1671,28 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
- /*
- * If the current task on @p's runqueue is a softirq task,
- * it may run without preemption for a time that is
- * ill-suited for a waiting RT task. Therefore, try to
- * wake this RT task on another runqueue.
- *
- * Also, if the current task on @p's runqueue is an RT task, then
- * it may run without preemption for a time that is
- * ill-suited for a waiting RT task. Therefore, try to
- * wake this RT task on another runqueue.
- *
- * Also, if the current task on @p's runqueue is an RT task, then
- * try to see if we can wake this RT task up on another
- * runqueue. Otherwise simply start this RT task
- * on its current runqueue.
- *
- * We want to avoid overloading runqueues. If the woken
- * task is a higher priority, then it will stay on this CPU
- * and the lower prio task should be moved to another CPU.
- * Even though this will probably make the lower prio task
- * lose its cache, we do not want to bounce a higher task
- * around just because it gave up its CPU, perhaps for a
- * lock?
- *
- * For equal prio tasks, we just let the scheduler sort it out.
- *
- * Otherwise, just let it ride on the affined RQ and the
- * post-schedule router will push the preempted task away
- *
- * This test is optimistic, if we get it wrong the load-balancer
- * will have to sort it out.
- */
may_not_preempt = task_may_not_preempt(curr, cpu);
- if (may_not_preempt ||
- (unlikely(rt_task(curr)) &&
- (curr->nr_cpus_allowed < 2 ||
- curr->prio <= p->prio))) {
- int target = find_lowest_rq(p);
+ target = find_lowest_rq(p, sync);
- /*
- * If cpu is non-preemptible, prefer remote cpu
- * even if it's running a higher-prio task.
- * Otherwise: Don't bother moving it if the
- * destination CPU is not running a lower priority task.
- */
- if (target != -1 &&
- (may_not_preempt ||
- p->prio < cpu_rq(target)->rt.highest_prio.curr))
- cpu = target;
+ /*
+ * Check once for losing a race with the other core's irq handler.
+ * This does not happen frequently, but it can avoid delaying
+ * the execution of the RT task in those cases.
+ */
+ if (target != -1) {
+ tgt_task = READ_ONCE(cpu_rq(target)->curr);
+ if (task_may_not_preempt(tgt_task, target))
+ target = find_lowest_rq(p, sync);
}
+ /*
+ * Possible race. Don't bother moving it if the
+ * destination CPU is not running a lower priority task.
+ */
+ if (target != -1 &&
+ (may_not_preempt || p->prio < cpu_rq(target)->rt.highest_prio.curr))
+ cpu = target;
+ *per_cpu_ptr(&incoming_rt_task, cpu) = true;
rcu_read_unlock();
-
out:
/*
* If previous CPU was different, make sure to cancel any active
@@ -1730,7 +1736,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
requeue_task_rt(rq, p, 1);
resched_curr(rq);
}
-
#endif /* CONFIG_SMP */
/*
@@ -1994,12 +1999,108 @@ retry:
}
#endif /* CONFIG_SCHED_HMP */
-static int find_lowest_rq(struct task_struct *task)
+static int find_best_rt_target(struct task_struct* task, int cpu,
+ struct cpumask* lowest_mask,
+ bool boosted, bool prefer_idle) {
+ int iter_cpu;
+ int target_cpu = -1;
+ int boosted_cpu = -1;
+ int backup_cpu = -1;
+ int boosted_orig_capacity = capacity_orig_of(0);
+ int backup_capacity = 0;
+ int best_idle_cpu = -1;
+ unsigned long target_util = 0;
+ unsigned long new_util;
+ /* We want to elect the best one based on task class,
+ * idleness, and utilization.
+ */
+ for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
+ int cur_capacity;
+ /*
+ * Iterate from higher cpus for boosted tasks.
+ */
+ int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
+ if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(task)))
+ continue;
+
+ new_util = cpu_util(i) + task_util(task);
+
+ if (new_util > capacity_orig_of(i))
+ continue;
+
+ /*
+ * Unconditionally favoring tasks that prefer idle cpus to
+ * improve latency.
+ */
+ if (idle_cpu(i) && prefer_idle
+ && cpumask_test_cpu(i, lowest_mask) && best_idle_cpu < 0) {
+ best_idle_cpu = i;
+ continue;
+ }
+
+ if (cpumask_test_cpu(i, lowest_mask)) {
+ /* Bias cpu selection towards cpu with higher original
+ * capacity if task is boosted.
+ * Assumption: Higher cpus are exclusively alloted for
+ * boosted tasks.
+ */
+ if (boosted && boosted_cpu < 0
+ && boosted_orig_capacity < capacity_orig_of(i)) {
+ boosted_cpu = i;
+ boosted_orig_capacity = capacity_orig_of(i);
+ }
+ cur_capacity = capacity_curr_of(i);
+ if (new_util < cur_capacity && cpu_rq(i)->nr_running) {
+ if(!boosted) {
+ /* Find a target cpu with highest utilization.*/
+ if (target_util < new_util) {
+ target_cpu = i;
+ target_util = new_util;
+ }
+ } else {
+ if (target_util == 0 || target_util > new_util) {
+ /* Find a target cpu with lowest utilization.*/
+ target_cpu = i;
+ target_util = new_util;
+ }
+ }
+ } else if (backup_capacity == 0 || backup_capacity < cur_capacity) {
+ /* Select a backup CPU with highest capacity.*/
+ backup_capacity = cur_capacity;
+ backup_cpu = i;
+ }
+ }
+ }
+
+ if (boosted && boosted_cpu >=0 && boosted_cpu > best_idle_cpu)
+ target_cpu = boosted_cpu;
+ else if (prefer_idle && best_idle_cpu >= 0)
+ target_cpu = best_idle_cpu;
+
+ if (target_cpu < 0) {
+ if (backup_cpu >= 0)
+ return backup_cpu;
+
+ /* Select current cpu if it is present in the mask.*/
+ if (cpumask_test_cpu(cpu, lowest_mask))
+ return cpu;
+
+ /* Pick a random cpu from lowest_mask */
+ target_cpu = cpumask_any(lowest_mask);
+ if (target_cpu < nr_cpu_ids)
+ return target_cpu;
+ return -1;
+ }
+ return target_cpu;
+}
+
+static int find_lowest_rq(struct task_struct *task, int sync)
{
struct sched_domain *sd;
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
+ bool boosted, prefer_idle;
#ifdef CONFIG_SCHED_HMP
return find_lowest_rq_hmp(task);
@@ -2012,64 +2113,88 @@ static int find_lowest_rq(struct task_struct *task)
if (task->nr_cpus_allowed == 1)
return -1; /* No other targets possible */
+ /* Constructing cpumask of lowest priorities */
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
return -1; /* No targets found */
- /*
- * At this point we have built a mask of cpus representing the
- * lowest priority tasks in the system. Now we want to elect
- * the best one based on our affinity and topology.
- *
- * We prioritize the last cpu that the task executed on since
- * it is most likely cache-hot in that location.
+ /* Return current cpu if WF_SYNC hint is set and present in
+ * lowest_mask. Improves data locality.
*/
- if (cpumask_test_cpu(cpu, lowest_mask))
- return cpu;
+ if (sysctl_sched_sync_hint_enable && sync) {
+ cpumask_t search_cpus;
+ cpumask_and(&search_cpus, tsk_cpus_allowed(task), lowest_mask);
+ if (cpumask_test_cpu(cpu, &search_cpus))
+ return cpu;
+ }
/*
- * Otherwise, we consult the sched_domains span maps to figure
- * out which cpu is logically closest to our hot cache data.
+ * At this point we have built a mask of cpus representing the
+ * lowest priority tasks in the system.
*/
- if (!cpumask_test_cpu(this_cpu, lowest_mask))
- this_cpu = -1; /* Skip this_cpu opt if not among lowest */
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_AFFINE) {
- int best_cpu;
-
- /*
- * "this_cpu" is cheaper to preempt than a
- * remote processor.
- */
- if (this_cpu != -1 &&
- cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
- rcu_read_unlock();
- return this_cpu;
- }
+ boosted = schedtune_task_boost(task) > 0;
+ prefer_idle = schedtune_prefer_idle(task) > 0;
+ if(boosted || prefer_idle) {
+ return find_best_rt_target(task, cpu, lowest_mask, boosted, prefer_idle);
+ } else {
+ /* Now we want to elect the best one based on on our affinity
+ * and topology.
+ * We prioritize the last cpu that the task executed on since
+ * it is most likely cache-hot in that location.
+ */
+ struct task_struct* curr;
+ if (!cpumask_test_cpu(this_cpu, lowest_mask))
+ this_cpu = -1; /* Skip this_cpu opt if not among lowest */
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_AFFINE) {
+ int best_cpu;
+ /*
+ * "this_cpu" is cheaper to preempt than a
+ * remote processor.
+ */
+ if (this_cpu != -1 &&
+ cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
+ curr = cpu_rq(this_cpu)->curr;
+ /* Ensuring that boosted/prefer idle
+ * tasks are not pre-empted even if low
+ * priority*/
+ if (!curr || (schedtune_task_boost(curr) == 0
+ && schedtune_prefer_idle(curr) == 0)) {
+ rcu_read_unlock();
+ return this_cpu;
+ }
+ }
- best_cpu = cpumask_first_and(lowest_mask,
- sched_domain_span(sd));
- if (best_cpu < nr_cpu_ids) {
- rcu_read_unlock();
- return best_cpu;
+ best_cpu = cpumask_first_and(lowest_mask,
+ sched_domain_span(sd));
+ if (best_cpu < nr_cpu_ids) {
+ curr = cpu_rq(best_cpu)->curr;
+ /* Ensuring that boosted/prefer idle
+ * tasks are not pre-empted even if low
+ * priority*/
+ if(!curr || (schedtune_task_boost(curr) == 0
+ && schedtune_prefer_idle(curr) == 0)) {
+ rcu_read_unlock();
+ return best_cpu;
+ }
+ }
}
}
- }
- rcu_read_unlock();
+ rcu_read_unlock();
- /*
- * And finally, if there were no matches within the domains
- * just give the caller *something* to work with from the compatible
- * locations.
- */
- if (this_cpu != -1)
- return this_cpu;
+ /* And finally, if there were no matches within the domains just
+ * give the caller *something* to work with from the compatible
+ * locations.
+ */
+ if (this_cpu != -1)
+ return this_cpu;
- cpu = cpumask_any(lowest_mask);
- if (cpu < nr_cpu_ids)
- return cpu;
- return -1;
+ cpu = cpumask_any(lowest_mask);
+ if (cpu < nr_cpu_ids)
+ return cpu;
+ return -1;
+ }
}
/* Will lock the rq it finds */
@@ -2080,7 +2205,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
int cpu;
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
- cpu = find_lowest_rq(task);
+ cpu = find_lowest_rq(task, 0);
if ((cpu == -1) || (cpu == rq->cpu))
break;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 78ba150f2016..fa4d0ab014b1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -33,8 +33,10 @@ extern long calc_load_fold_active(struct rq *this_rq);
#ifdef CONFIG_SMP
extern void update_cpu_load_active(struct rq *this_rq);
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
#else
static inline void update_cpu_load_active(struct rq *this_rq) { }
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
#endif
/*
@@ -1470,7 +1472,6 @@ static inline bool is_short_burst_task(struct task_struct *p)
p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
}
-extern void check_for_migration(struct rq *rq, struct task_struct *p);
extern void pre_big_task_count_change(const struct cpumask *cpus);
extern void post_big_task_count_change(const struct cpumask *cpus);
extern void set_hmp_defaults(void);
@@ -1730,7 +1731,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
return 1;
}
-static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
static inline void pre_big_task_count_change(void) { }
static inline void post_big_task_count_change(void) { }
static inline void set_hmp_defaults(void) { }
@@ -2357,6 +2357,26 @@ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
}
#endif
+#ifndef arch_scale_max_freq_capacity
+static __always_inline
+unsigned long arch_scale_max_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
+#ifndef arch_scale_min_freq_capacity
+static __always_inline
+unsigned long arch_scale_min_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ /*
+ * Multiplied with any capacity value, this scale factor will return
+ * 0, which represents an un-capped state
+ */
+ return 0;
+}
+#endif
+
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -2383,6 +2403,19 @@ extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int walt_ravg_window;
extern bool walt_disabled;
+static inline unsigned long task_util(struct task_struct *p)
+{
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+ unsigned long demand = p->ravg.demand;
+ return (demand << 10) / walt_ravg_window;
+ }
+#endif
+ return p->se.avg.util_avg;
+}
+
+
/*
* cpu_util returns the amount of capacity of a CPU that is used by CFS
* tasks. The unit of the return value must be the one of capacity so we can
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index d0ef97f484b1..728553403c2b 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -17,8 +17,11 @@ bool schedtune_initialized = false;
unsigned int sysctl_sched_cfs_boost __read_mostly;
+/* We hold schedtune boost in effect for at least this long */
+#define SCHEDTUNE_BOOST_HOLD_NS 50000000ULL
+
extern struct reciprocal_value schedtune_spc_rdiv;
-extern struct target_nrg schedtune_target_nrg;
+struct target_nrg schedtune_target_nrg;
/* Performance Boost region (B) threshold params */
static int perf_boost_idx;
@@ -260,11 +263,14 @@ struct boost_groups {
/* Maximum boost value for all RUNNABLE tasks on a CPU */
bool idle;
int boost_max;
+ u64 boost_ts;
struct {
/* The boost for tasks on that boost group */
int boost;
/* Count of RUNNABLE tasks on that boost group */
unsigned tasks;
+ /* Timestamp of boost activation */
+ u64 ts;
} group[BOOSTGROUPS_COUNT];
/* CPU's boost group locking */
raw_spinlock_t lock;
@@ -388,32 +394,52 @@ static inline void init_sched_boost(struct schedtune *st) { }
#endif /* CONFIG_SCHED_HMP */
+static inline bool schedtune_boost_timeout(u64 now, u64 ts)
+{
+ return ((now - ts) > SCHEDTUNE_BOOST_HOLD_NS);
+}
+
+static inline bool
+schedtune_boost_group_active(int idx, struct boost_groups* bg, u64 now)
+{
+ if (bg->group[idx].tasks)
+ return true;
+
+ return !schedtune_boost_timeout(now, bg->group[idx].ts);
+}
+
static void
-schedtune_cpu_update(int cpu)
+schedtune_cpu_update(int cpu, u64 now)
{
struct boost_groups *bg;
- int boost_max;
+ u64 boost_ts = now;
+ int boost_max = INT_MIN;
int idx;
bg = &per_cpu(cpu_boost_groups, cpu);
- /* The root boost group is always active */
- boost_max = bg->group[0].boost;
- for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
+ for (idx = 0; idx < BOOSTGROUPS_COUNT; ++idx) {
/*
* A boost group affects a CPU only if it has
- * RUNNABLE tasks on that CPU
+ * RUNNABLE tasks on that CPU or it has hold
+ * in effect from a previous task.
*/
- if (bg->group[idx].tasks == 0)
+ if (!schedtune_boost_group_active(idx, bg, now))
+ continue;
+
+ /* this boost group is active */
+ if (boost_max > bg->group[idx].boost)
continue;
- boost_max = max(boost_max, bg->group[idx].boost);
+ boost_max = bg->group[idx].boost;
+ boost_ts = bg->group[idx].ts;
}
- /* Ensures boost_max is non-negative when all cgroup boost values
- * are neagtive. Avoids under-accounting of cpu capacity which may cause
- * task stacking and frequency spikes.*/
- boost_max = max(boost_max, 0);
+
+ /* If there are no active boost groups on the CPU, set no boost */
+ if (boost_max == INT_MIN)
+ boost_max = 0;
bg->boost_max = boost_max;
+ bg->boost_ts = boost_ts;
}
static int
@@ -423,6 +449,7 @@ schedtune_boostgroup_update(int idx, int boost)
int cur_boost_max;
int old_boost;
int cpu;
+ u64 now;
/* Update per CPU boost groups */
for_each_possible_cpu(cpu) {
@@ -439,16 +466,22 @@ schedtune_boostgroup_update(int idx, int boost)
/* Update the boost value of this boost group */
bg->group[idx].boost = boost;
- /* Check if this update increase current max */
- if (boost > cur_boost_max && bg->group[idx].tasks) {
+ now = sched_clock_cpu(cpu);
+ /*
+ * Check if this update increase current max.
+ */
+ if (boost > cur_boost_max &&
+ schedtune_boost_group_active(idx, bg, now)) {
bg->boost_max = boost;
+ bg->boost_ts = bg->group[idx].ts;
+
trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max);
continue;
}
/* Check if this update has decreased current max */
if (cur_boost_max == old_boost && old_boost > boost) {
- schedtune_cpu_update(cpu);
+ schedtune_cpu_update(cpu, now);
trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max);
continue;
}
@@ -462,21 +495,38 @@ schedtune_boostgroup_update(int idx, int boost)
#define ENQUEUE_TASK 1
#define DEQUEUE_TASK -1
+static inline bool
+schedtune_update_timestamp(struct task_struct *p)
+{
+ if (sched_feat(SCHEDTUNE_BOOST_HOLD_ALL))
+ return true;
+
+ return task_has_rt_policy(p);
+}
+
static inline void
schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
{
struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
int tasks = bg->group[idx].tasks + task_count;
+ u64 now;
/* Update boosted tasks count while avoiding to make it negative */
bg->group[idx].tasks = max(0, tasks);
+ /* Update timeout on enqueue */
+ if (task_count > 0) {
+ now = sched_clock_cpu(cpu);
+ if (schedtune_update_timestamp(p))
+ bg->group[idx].ts = now;
+
+ /* Boost group activation or deactivation on that RQ */
+ if (bg->group[idx].tasks == 1)
+ schedtune_cpu_update(cpu, now);
+ }
trace_sched_tune_tasks_update(p, cpu, tasks, idx,
- bg->group[idx].boost, bg->boost_max);
-
- /* Boost group activation or deactivation on that RQ */
- if (tasks == 1 || tasks == 0)
- schedtune_cpu_update(cpu);
+ bg->group[idx].boost, bg->boost_max,
+ bg->group[idx].ts);
}
/*
@@ -529,6 +579,7 @@ int schedtune_can_attach(struct cgroup_taskset *tset)
int src_bg; /* Source boost group index */
int dst_bg; /* Destination boost group index */
int tasks;
+ u64 now;
if (!unlikely(schedtune_initialized))
return 0;
@@ -574,18 +625,19 @@ int schedtune_can_attach(struct cgroup_taskset *tset)
* current boost group.
*/
+ now = sched_clock_cpu(cpu);
+
/* Move task from src to dst boost group */
tasks = bg->group[src_bg].tasks - 1;
bg->group[src_bg].tasks = max(0, tasks);
bg->group[dst_bg].tasks += 1;
+ bg->group[dst_bg].ts = now;
+
+ /* update next time someone asks */
+ bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS;
raw_spin_unlock(&bg->lock);
unlock_rq_of(rq, task, &irq_flags);
-
- /* Update CPU boost group */
- if (bg->group[src_bg].tasks == 0 || bg->group[dst_bg].tasks == 1)
- schedtune_cpu_update(task_cpu(task));
-
}
return 0;
@@ -666,8 +718,15 @@ void schedtune_exit_task(struct task_struct *tsk)
int schedtune_cpu_boost(int cpu)
{
struct boost_groups *bg;
+ u64 now;
bg = &per_cpu(cpu_boost_groups, cpu);
+ now = sched_clock_cpu(cpu);
+
+ /* check to see if we have a hold in effect */
+ if (schedtune_boost_timeout(now, bg->boost_ts))
+ schedtune_cpu_update(cpu, now);
+
return bg->boost_max;
}
@@ -831,6 +890,7 @@ schedtune_boostgroup_init(struct schedtune *st)
bg = &per_cpu(cpu_boost_groups, cpu);
bg->group[st->idx].boost = 0;
bg->group[st->idx].tasks = 0;
+ bg->group[st->idx].ts = 0;
}
return 0;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 911606537808..0162ff4647b6 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -55,7 +55,7 @@ __read_mostly unsigned int walt_ravg_window =
static unsigned int sync_cpu;
static ktime_t ktime_last;
-static bool walt_ktime_suspended;
+static __read_mostly bool walt_ktime_suspended;
static inline void fixup_cum_window_demand(struct rq *rq, s64 delta)
{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d69b77fc7cc1..a4aefd8152b9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -252,7 +252,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
struct softirq_action *h;
bool in_hardirq;
__u32 deferred;
- __u32 pending;
+ __u32 pending, pending_now, pending_delay, pending_mask;
int softirq_bit;
/*
@@ -262,7 +262,21 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
*/
current->flags &= ~PF_MEMALLOC;
+ /*
+ * If this is not the ksoftirqd thread,
+ * and there is an RT task that is running or is waiting to run,
+ * delay handling the long-running softirq handlers by leaving
+ * them for the ksoftirqd thread.
+ */
+ if (current != __this_cpu_read(ksoftirqd) &&
+ cpu_has_rt_task(smp_processor_id()))
+ pending_mask = LONG_SOFTIRQ_MASK;
+ else
+ pending_mask = 0;
+
pending = local_softirq_pending();
+ pending_delay = pending & pending_mask;
+ pending_now = pending & ~pending_mask;
deferred = softirq_deferred_for_rt(pending);
account_irq_enter_time(current);
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
@@ -270,14 +284,14 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
restart:
/* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(deferred);
- __this_cpu_write(active_softirqs, pending);
+ __this_cpu_write(active_softirqs, pending_now);
+ set_softirq_pending(pending_delay);
local_irq_enable();
h = softirq_vec;
- while ((softirq_bit = ffs(pending))) {
+ while ((softirq_bit = ffs(pending_now))) {
unsigned int vec_nr;
int prev_count;
@@ -298,7 +312,7 @@ restart:
preempt_count_set(prev_count);
}
h++;
- pending >>= softirq_bit;
+ pending_now >>= softirq_bit;
}
__this_cpu_write(active_softirqs, 0);
@@ -307,10 +321,12 @@ restart:
pending = local_softirq_pending();
deferred = softirq_deferred_for_rt(pending);
+ pending_delay = pending & pending_mask;
+ pending_now = pending & ~pending_mask;
if (pending) {
- if (time_before(jiffies, end) && !need_resched() &&
- --max_restart)
+ if (pending_now && time_before(jiffies, end) &&
+ !need_resched() && --max_restart && !deferred)
goto restart;
}
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index baa82e3ab2c0..8a9ff85df78f 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1566,7 +1566,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
{
- if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+ if ((iter->ent->type != TRACE_BLK) ||
+ !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
return TRACE_TYPE_UNHANDLED;
return print_one_line(iter, true);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 17996354c745..a0c54d118c4e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -235,6 +235,10 @@ __setup("trace_clock=", set_trace_boot_clock);
static int __init set_tracepoint_printk(char *str)
{
+ /* Ignore the "tp_printk_stop_on_boot" param */
+ if (*str == '_')
+ return 0;
+
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
tracepoint_printk = 1;
return 1;
@@ -841,10 +845,12 @@ static int __init set_buf_size(char *str)
if (!str)
return 0;
buf_size = memparse(str, &str);
- /* nr_entries can not be zero */
- if (buf_size == 0)
- return 0;
- trace_buf_size = buf_size;
+ /*
+ * nr_entries can not be zero and the startup
+ * tests require some buffer space. Therefore
+ * ensure we have at least 4096 bytes of buffer.
+ */
+ trace_buf_size = max(4096UL, buf_size);
return 1;
}
__setup("trace_buf_size=", set_buf_size);
@@ -2537,8 +2543,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
* will point to the same string as current_trace->name.
*/
mutex_lock(&trace_types_lock);
- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
+ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
+ /* Close iter->trace before switching to the new current tracer */
+ if (iter->trace->close)
+ iter->trace->close(iter);
*iter->trace = *tr->current_trace;
+ /* Reopen the new current tracer */
+ if (iter->trace->open)
+ iter->trace->open(iter);
+ }
mutex_unlock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -4537,12 +4550,18 @@ static void tracing_set_nop(struct trace_array *tr)
tr->current_trace = &nop_trace;
}
+static bool tracer_options_updated;
+
static void add_tracer_options(struct trace_array *tr, struct tracer *t)
{
/* Only enable if the directory has been created already. */
if (!tr->dir)
return;
+ /* Only create trace option files after update_tracer_options finish */
+ if (!tracer_options_updated)
+ return;
+
create_trace_option_files(tr, t);
}
@@ -4959,7 +4978,20 @@ waitagain:
ret = print_trace_line(iter);
if (ret == TRACE_TYPE_PARTIAL_LINE) {
- /* don't print partial lines */
+ /*
+ * If one print_trace_line() fills entire trace_seq in one shot,
+ * trace_seq_to_user() will returns -EBUSY because save_len == 0,
+ * In this case, we need to consume it, otherwise, loop will peek
+ * this event next time, resulting in an infinite loop.
+ */
+ if (save_len == 0) {
+ iter->seq.full = 0;
+ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
+ trace_consume(iter);
+ break;
+ }
+
+ /* In other cases, don't print partial lines */
iter->seq.seq.len = save_len;
break;
}
@@ -6827,6 +6859,7 @@ static void __update_tracer_options(struct trace_array *tr)
static void update_tracer_options(struct trace_array *tr)
{
mutex_lock(&trace_types_lock);
+ tracer_options_updated = true;
__update_tracer_options(tr);
mutex_unlock(&trace_types_lock);
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index c00137ea939e..48c446d602f6 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -215,7 +215,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
-
+ else
+ iter->private = NULL;
}
static void irqsoff_trace_close(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 927fd4ad5846..528dc408dcf9 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -272,6 +272,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
+ else
+ iter->private = NULL;
}
static void wakeup_trace_close(struct trace_iterator *iter)
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 5c88204b6f1f..f46075b3d9e4 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -534,3 +534,23 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
return p - dst;
}
EXPORT_SYMBOL(string_escape_mem);
+
+/**
+ * memcpy_and_pad - Copy one buffer to another with padding
+ * @dest: Where to copy to
+ * @dest_len: The destination buffer size
+ * @src: Where to copy from
+ * @count: The number of bytes to copy
+ * @pad: Character to use for padding if space is left in destination.
+ */
+void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ int pad)
+{
+ if (dest_len > count) {
+ memcpy(dest, src, count);
+ memset(dest + count, pad, dest_len - count);
+ } else {
+ memcpy(dest, src, dest_len);
+ }
+}
+EXPORT_SYMBOL(memcpy_and_pad);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index c74e8744722b..e1b4844b5424 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -435,6 +435,41 @@ loop:
return 0;
}
+static int __bpf_fill_stxdw(struct bpf_test *self, int size)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1);
+ insn[1] = BPF_ST_MEM(size, R10, -40, 42);
+
+ for (i = 2; i < len - 2; i++)
+ insn[i] = BPF_STX_XADD(size, R10, R0, -40);
+
+ insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40);
+ insn[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_stxw(struct bpf_test *self)
+{
+ return __bpf_fill_stxdw(self, BPF_W);
+}
+
+static int bpf_fill_stxdw(struct bpf_test *self)
+{
+ return __bpf_fill_stxdw(self, BPF_DW);
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -892,6 +927,32 @@ static struct bpf_test tests[] = {
{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
},
{
+ "JGE (jt 0), test 1",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 4, 3, 3 },
+ { { 2, 0 }, { 3, 1 }, { 4, 1 } },
+ },
+ {
+ "JGE (jt 0), test 2",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 5, 3, 3 },
+ { { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
+ },
+ {
"JGE",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
@@ -4006,6 +4067,41 @@ static struct bpf_test tests[] = {
{ { 0, 0x22 } },
},
{
+ "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R1, R10),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ALU64_REG(BPF_MOV, R0, R10),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12 } },
+ },
+ {
+ "STX_XADD_W: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxw,
+ },
+ {
"STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
@@ -4018,6 +4114,41 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x22 } },
},
+ {
+ "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R1, R10),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ALU64_REG(BPF_MOV, R0, R10),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12 } },
+ },
+ {
+ "STX_XADD_DW: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxdw,
+ },
/* BPF_JMP | BPF_EXIT */
{
"JMP_EXIT",
@@ -4044,6 +4175,35 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JSLT | BPF_K */
+ {
+ "JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+ BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSGT | BPF_K */
{
"JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
@@ -4073,6 +4233,73 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JSLE | BPF_K */
+ {
+ "JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+ BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: value walk 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+ BPF_EXIT_INSN(), /* bad exit */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: value walk 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+ BPF_ALU64_IMM(BPF_SUB, R1, 2),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+ BPF_ALU64_IMM(BPF_SUB, R1, 2),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+ BPF_EXIT_INSN(), /* bad exit */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSGE | BPF_K */
{
"JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
@@ -4117,6 +4344,49 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JLT | BPF_K */
+ {
+ "JMP_JLT_K: if (2 < 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 2),
+ BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 1),
+ BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGE | BPF_K */
{
"JMP_JGE_K: if (3 >= 2) return 1",
@@ -4132,6 +4402,21 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JLE | BPF_K */
+ {
+ "JMP_JLE_K: if (2 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 2),
+ BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGT | BPF_K jump backwards */
{
"JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
@@ -4162,6 +4447,36 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JLT | BPF_K jump backwards */
+ {
+ "JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
+ .u.insns_int = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+ BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
+ BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_K: if (3 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JNE | BPF_K */
{
"JMP_JNE_K: if (3 != 2) return 1",
@@ -4252,6 +4567,37 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JSLT | BPF_X */
+ {
+ "JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSGE | BPF_X */
{
"JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
@@ -4283,6 +4629,37 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JSLE | BPF_X */
+ {
+ "JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGT | BPF_X */
{
"JMP_JGT_X: if (3 > 2) return 1",
@@ -4299,6 +4676,52 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, 1),
+ BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JLT | BPF_X */
+ {
+ "JMP_JLT_X: if (2 < 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, 1),
+ BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGE | BPF_X */
{
"JMP_JGE_X: if (3 >= 2) return 1",
@@ -4330,6 +4753,37 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JLE | BPF_X */
+ {
+ "JMP_JLE_X: if (2 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_X: if (3 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 3),
+ BPF_JMP_REG(BPF_JLE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
{
/* Mainly testing JIT + imm64 here. */
"JMP_JGE_X: ldimm64 test 1",
@@ -4375,6 +4829,50 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "JMP_JLE_X: ldimm64 test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 2),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xeeeeeeeeU } },
+ },
+ {
+ "JMP_JLE_X: ldimm64 test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 0),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffffU } },
+ },
+ {
+ "JMP_JLE_X: ldimm64 test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 4),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JNE | BPF_X */
{
"JMP_JNE_X: if (3 != 2) return 1",
diff --git a/net/Kconfig b/net/Kconfig
index 22fe472017fb..539a7a01383f 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -295,7 +295,7 @@ config BQL
config BPF_JIT
bool "enable BPF Just In Time compiler"
- depends on HAVE_BPF_JIT
+ depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
depends on MODULES
---help---
Berkeley Packet Filter filtering capabilities are normally handled
@@ -422,7 +422,15 @@ config DST_CACHE
endif # if NET
-# Used by archs to tell that they support BPF_JIT
-config HAVE_BPF_JIT
+# Used by archs to tell that they support BPF JIT compiler plus which flavour.
+# Only one of the two can be selected for a specific arch since eBPF JIT supersedes
+# the cBPF JIT.
+
+# Classic BPF JIT (cBPF)
+config HAVE_CBPF_JIT
+ bool
+
+# Extended BPF JIT (eBPF)
+config HAVE_EBPF_JIT
bool
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index f84cfd0d4c65..6ab9dda306b9 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -333,7 +333,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
ni->type = hdev->dev_type;
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
- memcpy(ni->name, hdev->name, 8);
+ memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
+ strnlen(hdev->name, sizeof(ni->name)), '\0');
opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
break;
diff --git a/net/core/dev.c b/net/core/dev.c
index 3d26340ef52b..79f7b6fe9ce1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -95,6 +95,7 @@
#include <linux/ethtool.h>
#include <linux/notifier.h>
#include <linux/skbuff.h>
+#include <linux/bpf.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
@@ -5819,7 +5820,7 @@ EXPORT_SYMBOL(netdev_lower_dev_get_private);
int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(struct net_device *dev))
+ bool (*type_check)(const struct net_device *dev))
{
struct net_device *lower = NULL;
struct list_head *iter;
@@ -6327,6 +6328,38 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
EXPORT_SYMBOL(dev_change_proto_down);
/**
+ * dev_change_xdp_fd - set or clear a bpf program for a device rx path
+ * @dev: device
+ * @fd: new program fd or negative value to clear
+ *
+ * Set or clear a bpf program for a device
+ */
+int dev_change_xdp_fd(struct net_device *dev, int fd)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ struct bpf_prog *prog = NULL;
+ struct netdev_xdp xdp = {};
+ int err;
+
+ if (!ops->ndo_xdp)
+ return -EOPNOTSUPP;
+ if (fd >= 0) {
+ prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ xdp.command = XDP_SETUP_PROG;
+ xdp.prog = prog;
+ err = ops->ndo_xdp(dev, &xdp);
+ if (err < 0 && prog)
+ bpf_prog_put(prog);
+
+ return err;
+}
+EXPORT_SYMBOL(dev_change_xdp_fd);
+
+/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
*
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 7e4e7deb2542..b0c4440e8514 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -613,6 +613,37 @@ void netdev_rss_key_fill(void *buffer, size_t len)
}
EXPORT_SYMBOL(netdev_rss_key_fill);
+static int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
+{
+ u32 dev_size, current_max = 0;
+ u32 *indir;
+ int ret;
+
+ if (!dev->ethtool_ops->get_rxfh_indir_size ||
+ !dev->ethtool_ops->get_rxfh)
+ return -EOPNOTSUPP;
+ dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+ if (dev_size == 0)
+ return -EOPNOTSUPP;
+
+ indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
+ if (!indir)
+ return -ENOMEM;
+
+ ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
+ if (ret)
+ goto out;
+
+ while (dev_size--)
+ current_max = max(current_max, indir[dev_size]);
+
+ *max = current_max;
+
+out:
+ kfree(indir);
+ return ret;
+}
+
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
@@ -709,6 +740,14 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
}
ret = ops->set_rxfh(dev, indir, NULL, ETH_RSS_HASH_NO_CHANGE);
+ if (ret)
+ goto out;
+
+ /* indicate whether rxfh was set to default */
+ if (user_size == 0)
+ dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+ else
+ dev->priv_flags |= IFF_RXFH_CONFIGURED;
out:
kfree(indir);
@@ -868,6 +907,14 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
}
ret = ops->set_rxfh(dev, indir, hkey, rxfh.hfunc);
+ if (ret)
+ goto out;
+
+ /* indicate whether rxfh was set to default */
+ if (rxfh.indir_size == 0)
+ dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+ else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
+ dev->priv_flags |= IFF_RXFH_CONFIGURED;
out:
kfree(rss_config);
@@ -1210,6 +1257,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
void __user *useraddr)
{
struct ethtool_channels channels;
+ u32 max_rx_in_use = 0;
if (!dev->ethtool_ops->set_channels)
return -EOPNOTSUPP;
@@ -1217,6 +1265,13 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
if (copy_from_user(&channels, useraddr, sizeof(channels)))
return -EFAULT;
+ /* ensure the new Rx count fits within the configured Rx flow
+ * indirection table settings */
+ if (netif_is_rxfh_configured(dev) &&
+ !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
+ (channels.combined_count + channels.rx_count) <= max_rx_in_use)
+ return -EINVAL;
+
return dev->ethtool_ops->set_channels(dev, &channels);
}
diff --git a/net/core/filter.c b/net/core/filter.c
index ebf7754810f0..573af321ee93 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -512,14 +512,27 @@ do_pass:
break;
}
- /* Convert JEQ into JNE when 'jump_true' is next insn. */
- if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
- insn->code = BPF_JMP | BPF_JNE | bpf_src;
+ /* Convert some jumps when 'jump_true' is next insn. */
+ if (fp->jt == 0) {
+ switch (BPF_OP(fp->code)) {
+ case BPF_JEQ:
+ insn->code = BPF_JMP | BPF_JNE | bpf_src;
+ break;
+ case BPF_JGT:
+ insn->code = BPF_JMP | BPF_JLE | bpf_src;
+ break;
+ case BPF_JGE:
+ insn->code = BPF_JMP | BPF_JLT | bpf_src;
+ break;
+ default:
+ goto jmp_rest;
+ }
+
target = i + fp->jf + 1;
BPF_EMIT_JMP;
break;
}
-
+jmp_rest:
/* Other jumps are mapped into two insns: Jxx and JA. */
target = i + fp->jt + 1;
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2a64de757be9..8d612cefb5f3 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -668,7 +668,7 @@ int netpoll_setup(struct netpoll *np)
int err;
rtnl_lock();
- if (np->dev_name)
+ if (np->dev_name[0])
ndev = __dev_get_by_name(net, np->dev_name);
if (!ndev) {
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ac0e8152c0f1..11d79958a767 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -465,7 +465,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
.fl4_dport = dccp_hdr(skb)->dccph_sport,
};
- security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
+ security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 0958721e4d3d..64f0e88fe0e8 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -202,7 +202,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
fl6.flowi6_oif = ireq->ir_iif;
fl6.fl6_dport = ireq->ir_rmt_port;
fl6.fl6_sport = htons(ireq->ir_num);
- security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+ security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
rcu_read_lock();
@@ -273,7 +273,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
fl6.flowi6_oif = inet6_iif(rxskb);
fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
- security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
+ security_skb_classify_flow(rxskb, flowi6_to_flowi_common(&fl6));
/* sk = NULL, but it is safe for now. RST socket required. */
dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
@@ -879,7 +879,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 358d9dabda1d..2c14d607a683 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -433,7 +433,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
- security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
+ security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
@@ -504,7 +504,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
route_lookup_dev = icmp_get_route_lookup_dev(skb_in);
fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev);
- security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
+ security_skb_classify_flow(skb_in, flowi4_to_flowi_common(fl4));
rt = __ip_route_output_key_hash(net, fl4,
icmp_multipath_hash_skb(skb_in));
if (IS_ERR(rt))
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f6f3ee843d12..fa9df2e6d330 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -446,7 +446,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, ireq->ir_rmt_port,
htons(ireq->ir_num), sk->sk_uid);
- security_req_classify_flow(req, flowi4_to_flowi(fl4));
+ security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
@@ -482,7 +482,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, ireq->ir_rmt_port,
htons(ireq->ir_num), sk->sk_uid);
- security_req_classify_flow(req, flowi4_to_flowi(fl4));
+ security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 0e464fae6434..a0739ef47227 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1624,7 +1624,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
daddr, saddr,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
arg->uid);
- security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
+ security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index f56e803c07f9..cc04672d4d8c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -808,7 +808,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl4.fl4_icmp_type = user_icmph.type;
fl4.fl4_icmp_code = user_icmph.code;
- security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index b75a24adb580..fc2fb170171b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -630,7 +630,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto done;
}
- security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index ba0301860985..6acc73d3c83d 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -382,7 +382,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
inet_sk_flowi_flags(sk),
opt->srr ? opt->faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
- security_req_classify_flow(req, flowi4_to_flowi(&fl4));
+ security_req_classify_flow(req, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt)) {
reqsk_free(req);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4976a1c9835f..1cf00a9d23f5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1076,7 +1076,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto out;
}
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3cd9ad455910..091a93e30e2b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -708,7 +708,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
fl6.flowi6_uid = sk->sk_uid;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
rcu_read_lock();
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index c41b717b0ef2..7e966fe1ffd6 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -174,7 +174,7 @@ ipv4_connected:
if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
fl6.flowi6_oif = np->mcast_oif;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
rcu_read_lock();
opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 463f533bd289..4e6755e99318 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -484,7 +484,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
fl6.fl6_icmp_type = type;
fl6.fl6_icmp_code = code;
fl6.flowi6_uid = sock_net_uid(net, NULL);
- security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+ security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
sk = icmpv6_xmit_lock(net);
if (!sk)
@@ -589,7 +589,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
fl6.flowi6_mark = mark;
fl6.flowi6_uid = sock_net_uid(net, NULL);
- security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+ security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
sk = icmpv6_xmit_lock(net);
if (!sk)
@@ -835,7 +835,7 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
fl6->fl6_icmp_type = type;
fl6->fl6_icmp_code = 0;
fl6->flowi6_oif = oif;
- security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
}
static int __net_init icmpv6_sk_init(struct net *net)
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index c7ce585ac17e..ceeb3d221db5 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -89,7 +89,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
fl6->fl6_dport = ireq->ir_rmt_port;
fl6->fl6_sport = htons(ireq->ir_num);
fl6->flowi6_uid = sk->sk_uid;
- security_req_classify_flow(req, flowi6_to_flowi(fl6));
+ security_req_classify_flow(req, flowi6_to_flowi_common(fl6));
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
if (IS_ERR(dst))
@@ -138,7 +138,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
fl6->fl6_sport = inet->inet_sport;
fl6->fl6_dport = inet->inet_dport;
fl6->flowi6_uid = sk->sk_uid;
- security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
rcu_read_lock();
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 7117e5bef412..96e91bbc9329 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -158,7 +158,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
- security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+ security_skb_classify_flow(oldskb, flowi6_to_flowi_common(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
if (dst == NULL || dst->error) {
dst_release(dst);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 40b835720722..9fced723d7e6 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -144,7 +144,7 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_uid = sk->sk_uid;
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr);
if (IS_ERR(dst))
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8614321b4c54..ebd64b99dc98 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -885,7 +885,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
if (hdrincl)
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fd58f7feec8a..decb94219a70 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2310,12 +2310,12 @@ static struct rt6_info *rt6_get_route_info(struct net_device *dev,
const struct in6_addr *prefix, int prefixlen,
const struct in6_addr *gwaddr)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev),
- addrconf_rt_table(dev, RT6_TABLE_INFO));
+ table = fib6_get_table(dev_net(dev), tb_id);
if (!table)
return NULL;
@@ -2354,7 +2354,7 @@ static struct rt6_info *rt6_add_route_info(struct net_device *dev,
.fc_nlinfo.nl_net = dev_net(dev),
};
- cfg.fc_table = l3mdev_fib_table_by_index(dev_net(dev), dev->ifindex) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
+ cfg.fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
cfg.fc_dst = *prefix;
cfg.fc_gateway = *gwaddr;
@@ -2370,11 +2370,11 @@ static struct rt6_info *rt6_add_route_info(struct net_device *dev,
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT);
struct rt6_info *rt;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev),
- addrconf_rt_table(dev, RT6_TABLE_MAIN));
+ table = fib6_get_table(dev_net(dev), tb_id);
if (!table)
return NULL;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 2133cc5e6a74..bc4f37ef9e1d 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -230,7 +230,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
fl6.fl6_dport = ireq->ir_rmt_port;
fl6.fl6_sport = inet_sk(sk)->inet_sport;
fl6.flowi6_uid = sk->sk_uid;
- security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+ security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst))
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 50091116fb55..9de573f89ea7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -244,7 +244,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
@@ -842,7 +842,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
- security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+ security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
/* Pass a socket to ip6_dst_lookup either it is for RST
* Underlying function will use this to retrieve the network
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ce3a100cbf68..e670c45829bd 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1295,7 +1295,7 @@ do_udp_sendmsg:
} else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8cc6a554b6f8..1a8a4e451a5f 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -629,7 +629,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 0226ccb0e5cd..8d33125518de 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1114,8 +1114,15 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
return;
- if (mesh_matches_local(sdata, &elems))
- mesh_neighbour_update(sdata, mgmt->sa, &elems);
+ if (mesh_matches_local(sdata, &elems)) {
+ mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n",
+ sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal);
+ if (!sdata->u.mesh.user_mpm ||
+ sdata->u.mesh.mshcfg.rssi_threshold == 0 ||
+ sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
+ mesh_neighbour_update(sdata, mgmt->sa, &elems,
+ rx_status);
+ }
if (ifmsh->sync_ops)
ifmsh->sync_ops->rx_bcn_presp(sdata,
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 4a8019f79fb2..7274e6719e8b 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -289,7 +289,8 @@ int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
/* Mesh plinks */
void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
- u8 *hw_addr, struct ieee802_11_elems *ie);
+ u8 *hw_addr, struct ieee802_11_elems *ie,
+ struct ieee80211_rx_status *rx_status);
bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
void mesh_plink_broken(struct sta_info *sta);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 2f7273db07c8..51b5d98f6c5d 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -489,7 +489,8 @@ __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
static struct sta_info *
mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
- struct ieee802_11_elems *elems)
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta = NULL;
@@ -497,11 +498,17 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
if (sdata->u.mesh.user_mpm ||
sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
if (mesh_peer_accepts_plinks(elems) &&
- mesh_plink_availables(sdata))
+ mesh_plink_availables(sdata)) {
+ int sig = 0;
+
+ if (ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM))
+ sig = rx_status->signal;
+
cfg80211_notify_new_peer_candidate(sdata->dev, addr,
elems->ie_start,
elems->total_len,
- GFP_KERNEL);
+ sig, GFP_KERNEL);
+ }
} else
sta = __mesh_sta_info_alloc(sdata, addr);
@@ -514,13 +521,15 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
* @sdata: local meshif
* @addr: peer's address
* @elems: IEs from beacon or mesh peering frame.
+ * @rx_status: rx status for the frame for signal reporting
*
* Return existing or newly allocated sta_info under RCU read lock.
* (re)initialize with given IEs.
*/
static struct sta_info *
mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
- u8 *addr, struct ieee802_11_elems *elems) __acquires(RCU)
+ u8 *addr, struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status) __acquires(RCU)
{
struct sta_info *sta = NULL;
@@ -531,7 +540,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
} else {
rcu_read_unlock();
/* can't run atomic */
- sta = mesh_sta_info_alloc(sdata, addr, elems);
+ sta = mesh_sta_info_alloc(sdata, addr, elems, rx_status);
if (!sta) {
rcu_read_lock();
return NULL;
@@ -552,17 +561,19 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
* @sdata: local meshif
* @addr: peer's address
* @elems: IEs from beacon or mesh peering frame
+ * @rx_status: rx status for the frame for signal reporting
*
* Initiates peering if appropriate.
*/
void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
u8 *hw_addr,
- struct ieee802_11_elems *elems)
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta;
u32 changed = 0;
- sta = mesh_sta_info_get(sdata, hw_addr, elems);
+ sta = mesh_sta_info_get(sdata, hw_addr, elems, rx_status);
if (!sta)
goto out;
@@ -1044,7 +1055,8 @@ out:
static void
mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
- struct ieee802_11_elems *elems)
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta;
@@ -1109,7 +1121,7 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
if (event == OPN_ACPT) {
rcu_read_unlock();
/* allocate sta entry if necessary and update info */
- sta = mesh_sta_info_get(sdata, mgmt->sa, elems);
+ sta = mesh_sta_info_get(sdata, mgmt->sa, elems, rx_status);
if (!sta) {
mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
goto unlock_rcu;
@@ -1175,5 +1187,5 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
return;
}
ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
- mesh_process_plink_frame(sdata, mgmt, &elems);
+ mesh_process_plink_frame(sdata, mgmt, &elems, rx_status);
}
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index d604c9f91b96..398fa066d249 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -114,6 +114,9 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
}
}
+ if (WARN_ON(connkeys && connkeys->def < 0))
+ return -EINVAL;
+
if (WARN_ON(wdev->connect_keys))
kzfree(wdev->connect_keys);
wdev->connect_keys = connkeys;
@@ -292,7 +295,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
wdev->wext.ibss.privacy = wdev->wext.default_key != -1;
- if (wdev->wext.keys) {
+ if (wdev->wext.keys && wdev->wext.keys->def != -1) {
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 9a796931e55e..4b61c19a7eb0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -889,6 +889,15 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
struct nlattr *key;
struct cfg80211_cached_keys *result;
int rem, err, def = 0;
+ bool have_key = false;
+
+ nla_for_each_nested(key, keys, rem) {
+ have_key = true;
+ break;
+ }
+
+ if (!have_key)
+ return NULL;
result = kzalloc(sizeof(*result), GFP_KERNEL);
if (!result)
@@ -934,6 +943,11 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
}
}
+ if (result->def < 0) {
+ err = -EINVAL;
+ goto error;
+ }
+
return result;
error:
kfree(result);
@@ -5872,7 +5886,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct mesh_config cfg;
+ struct mesh_config cfg = {};
u32 mask;
int err;
@@ -12802,7 +12816,8 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
}
void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
- const u8* ie, u8 ie_len, gfp_t gfp)
+ const u8 *ie, u8 ie_len,
+ int sig_dbm, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -12828,7 +12843,9 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
(ie_len && ie &&
- nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
+ nla_put(msg, NL80211_ATTR_IE, ie_len, ie)) ||
+ (sig_dbm &&
+ nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -13575,7 +13592,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
wdev->chandef = *chandef;
wdev->preset_chandef = *chandef;
- if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ if ((wdev->iftype == NL80211_IFTYPE_STATION ||
+ wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
!WARN_ON(!wdev->current_bss))
wdev->current_bss->pub.channel = chandef->chan;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index ed772d4937a9..593258220735 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -290,6 +290,15 @@ void cfg80211_conn_work(struct work_struct *work)
rtnl_unlock();
}
+static void cfg80211_step_auth_next(struct cfg80211_conn *conn,
+ struct cfg80211_bss *bss)
+{
+ memcpy(conn->bssid, bss->bssid, ETH_ALEN);
+ conn->params.bssid = conn->bssid;
+ conn->params.channel = bss->channel;
+ conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+}
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
{
@@ -307,10 +316,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
if (!bss)
return NULL;
- memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN);
- wdev->conn->params.bssid = wdev->conn->bssid;
- wdev->conn->params.channel = bss->channel;
- wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+ cfg80211_step_auth_next(wdev->conn, bss);
schedule_work(&rdev->conn_work);
return bss;
@@ -584,7 +590,12 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
wdev->conn->params.ssid_len = wdev->ssid_len;
/* see if we have the bss already */
- bss = cfg80211_get_conn_bss(wdev);
+ bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
+ wdev->conn->params.bssid,
+ wdev->conn->params.ssid,
+ wdev->conn->params.ssid_len,
+ wdev->conn_bss_type,
+ IEEE80211_PRIVACY(wdev->conn->params.privacy));
if (prev_bssid) {
memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
@@ -595,6 +606,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
if (bss) {
enum nl80211_timeout_reason treason;
+ cfg80211_step_auth_next(wdev->conn, bss);
err = cfg80211_conn_do_work(wdev, &treason);
cfg80211_put_bss(wdev->wiphy, bss);
} else {
@@ -1116,6 +1128,18 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
connect->crypto.ciphers_pairwise[0] = cipher;
}
}
+ } else {
+ if (WARN_ON(connkeys))
+ return -EINVAL;
+
+ /* connect can point to wdev->wext.connect which
+ * can hold key data from a previous connection
+ */
+ connect->key = NULL;
+ connect->key_len = 0;
+ connect->key_idx = 0;
+ connect->crypto.cipher_group = 0;
+ connect->crypto.n_ciphers_pairwise = 0;
}
wdev->connect_keys = connkeys;
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 98ff9d9e1aa9..8cc9a5f406ee 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -43,7 +43,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
if (!wdev->wext.connect.ssid_len)
return 0;
- if (wdev->wext.keys) {
+ if (wdev->wext.keys && wdev->wext.keys->def != -1) {
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 4875b5167858..27ececc3fa37 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -744,7 +744,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
if ((x->sel.family &&
(x->sel.family != family ||
!xfrm_selector_match(&x->sel, fl, family))) ||
- !security_xfrm_state_pol_flow_match(x, pol, fl))
+ !security_xfrm_state_pol_flow_match(x, pol,
+ &fl->u.__fl_common))
return;
if (!*best ||
@@ -759,7 +760,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
if ((!x->sel.family ||
(x->sel.family == family &&
xfrm_selector_match(&x->sel, fl, family))) &&
- security_xfrm_state_pol_flow_match(x, pol, fl))
+ security_xfrm_state_pol_flow_match(x, pol,
+ &fl->u.__fl_common))
*error = -ESRCH;
}
}
diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
index 5075ebf2d3b9..910ca1f7ce00 100755
--- a/scripts/kconfig/lxdialog/check-lxdialog.sh
+++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
@@ -47,7 +47,7 @@ trap "rm -f $tmp" 0 1 2 3 15
check() {
$cc -x c - -o $tmp 2>/dev/null <<'EOF'
#include CURSES_LOC
-main() {}
+int main() { return 0; }
EOF
if [ $? != 0 ]; then
echo " *** Unable to find the ncurses libraries or the" 1>&2
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 25cf0c2c0c79..e5113941c961 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -1116,8 +1116,7 @@ static void sym_check_print_recursive(struct symbol *last_sym)
if (stack->sym == last_sym)
fprintf(stderr, "%s:%d:error: recursive dependency detected!\n",
prop->file->name, prop->lineno);
- fprintf(stderr, "For a resolution refer to Documentation/kbuild/kconfig-language.txt\n");
- fprintf(stderr, "subsection \"Kconfig recursive dependency limitations\"\n");
+
if (stack->expr) {
fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n",
prop->file->name, prop->lineno,
@@ -1147,6 +1146,11 @@ static void sym_check_print_recursive(struct symbol *last_sym)
}
}
+ fprintf(stderr,
+ "For a resolution refer to Documentation/kbuild/kconfig-language.txt\n"
+ "subsection \"Kconfig recursive dependency limitations\"\n"
+ "\n");
+
if (check_top == &cv_stack)
dep_stack_remove();
}
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
index 2e5aa2fb6688..c49b30e66f46 100644
--- a/security/pfe/pfk.c
+++ b/security/pfe/pfk.c
@@ -198,6 +198,8 @@ static inline bool pfk_is_ready(void)
*/
static struct inode *pfk_bio_get_inode(const struct bio *bio)
{
+ struct inode *inode;
+
if (!bio)
return NULL;
if (!bio_has_data((struct bio *)bio))
@@ -207,11 +209,9 @@ static struct inode *pfk_bio_get_inode(const struct bio *bio)
if (!bio->bi_io_vec->bv_page)
return NULL;
- if (PageAnon(bio->bi_io_vec->bv_page)) {
- struct inode *inode;
-
- /* Using direct-io (O_DIRECT) without page cache */
- inode = dio_bio_get_inode((struct bio *)bio);
+ /* Using direct-io (O_DIRECT) without page cache */
+ inode = dio_bio_get_inode((struct bio *)bio);
+ if (inode) {
pr_debug("inode on direct-io, inode = 0x%p.\n", inode);
return inode;
diff --git a/security/security.c b/security/security.c
index 6546dffd1112..9c59f6f83e66 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1320,15 +1320,16 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk)
}
EXPORT_SYMBOL(security_sk_clone);
-void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic)
{
- call_void_hook(sk_getsecid, sk, &fl->flowi_secid);
+ call_void_hook(sk_getsecid, sk, &flic->flowic_secid);
}
EXPORT_SYMBOL(security_sk_classify_flow);
-void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic)
{
- call_void_hook(req_classify_flow, req, fl);
+ call_void_hook(req_classify_flow, req, flic);
}
EXPORT_SYMBOL(security_req_classify_flow);
@@ -1471,7 +1472,7 @@ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- const struct flowi *fl)
+ const struct flowi_common *flic)
{
struct security_hook_list *hp;
int rc = 1;
@@ -1487,7 +1488,7 @@ int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
*/
list_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
list) {
- rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl);
+ rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic);
break;
}
return rc;
@@ -1498,9 +1499,9 @@ int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
}
-void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic)
{
- int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid,
+ int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid,
0);
BUG_ON(rc);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 3d24f86b423f..3bac79428c9b 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4835,9 +4835,9 @@ static void selinux_secmark_refcount_dec(void)
}
static void selinux_req_classify_flow(const struct request_sock *req,
- struct flowi *fl)
+ struct flowi_common *flic)
{
- fl->flowi_secid = req->secid;
+ flic->flowic_secid = req->secid;
}
static int selinux_tun_dev_alloc_security(void **security)
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 1450f85b946d..0f929b420059 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -25,7 +25,7 @@ int selinux_xfrm_state_delete(struct xfrm_state *x);
int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- const struct flowi *fl);
+ const struct flowi_common *flic);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
extern atomic_t selinux_xfrm_refcount;
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 56e354fcdfc6..fda680555451 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -174,9 +174,10 @@ int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
*/
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- const struct flowi *fl)
+ const struct flowi_common *flic)
{
u32 state_sid;
+ u32 flic_sid;
if (!xp->security)
if (x->security)
@@ -195,14 +196,15 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
return 0;
state_sid = x->security->ctx_sid;
+ flic_sid = flic->flowic_secid;
- if (fl->flowi_secid != state_sid)
+ if (flic_sid != state_sid)
return 0;
/* We don't need a separate SA Vs. policy polmatch check since the SA
* is now of the same label as the flow and a flow Vs. policy polmatch
* check had already happened in selinux_xfrm_policy_lookup() above. */
- return (avc_has_perm(fl->flowi_secid, state_sid,
+ return (avc_has_perm(flic_sid, state_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
NULL) ? 0 : 1);
}
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 11fdc1d9797e..a7a1b1c4cad9 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -108,17 +108,6 @@ static void snd_rawmidi_input_event_work(struct work_struct *work)
runtime->event(runtime->substream);
}
-/* buffer refcount management: call with runtime->lock held */
-static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime)
-{
- runtime->buffer_ref++;
-}
-
-static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime)
-{
- runtime->buffer_ref--;
-}
-
static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime;
@@ -676,6 +665,7 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
runtime->avail = runtime->buffer_size;
+ runtime->appl_ptr = runtime->hw_ptr = 0;
spin_unlock_irqrestore(&runtime->lock, flags);
if (oldbuf != newbuf)
kfree(oldbuf);
@@ -714,6 +704,7 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
+ runtime->appl_ptr = runtime->hw_ptr = 0;
spin_unlock_irqrestore(&runtime->lock, flags);
if (oldbuf != newbuf)
kfree(oldbuf);
@@ -988,12 +979,10 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
long result = 0, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
unsigned long appl_ptr;
- int err = 0;
if (userbuf)
mutex_lock(&runtime->realloc_mutex);
spin_lock_irqsave(&runtime->lock, flags);
- snd_rawmidi_buffer_ref(runtime);
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
@@ -1012,21 +1001,20 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
if (userbuf) {
spin_unlock_irqrestore(&runtime->lock, flags);
if (copy_to_user(userbuf + result,
- runtime->buffer + appl_ptr, count1))
- err = -EFAULT;
+ runtime->buffer + appl_ptr, count1)) {
+ mutex_unlock(&runtime->realloc_mutex);
+ return result > 0 ? result : -EFAULT;
+ }
+
spin_lock_irqsave(&runtime->lock, flags);
- if (err)
- goto out;
}
result += count1;
count -= count1;
}
- out:
- snd_rawmidi_buffer_unref(runtime);
spin_unlock_irqrestore(&runtime->lock, flags);
if (userbuf)
mutex_unlock(&runtime->realloc_mutex);
- return result > 0 ? result : err;
+ return result;
}
long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
@@ -1301,7 +1289,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
return -EAGAIN;
}
}
- snd_rawmidi_buffer_ref(runtime);
while (count > 0 && runtime->avail > 0) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
@@ -1333,7 +1320,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
}
__end:
count1 = runtime->avail < runtime->buffer_size;
- snd_rawmidi_buffer_unref(runtime);
spin_unlock_irqrestore(&runtime->lock, flags);
if (userbuf)
mutex_unlock(&runtime->realloc_mutex);
@@ -1653,10 +1639,8 @@ static int snd_rawmidi_free(struct snd_rawmidi *rmidi)
snd_info_free_entry(rmidi->proc_entry);
rmidi->proc_entry = NULL;
- mutex_lock(&register_mutex);
if (rmidi->ops && rmidi->ops->dev_unregister)
rmidi->ops->dev_unregister(rmidi);
- mutex_unlock(&register_mutex);
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]);
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]);
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index 0ca9d72b2273..6c3dca831fd0 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -179,7 +179,6 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev)
mutex_unlock(&sound_oss_mutex);
return -ENOENT;
}
- unregister_sound_special(minor);
switch (SNDRV_MINOR_OSS_DEVICE(minor)) {
case SNDRV_MINOR_OSS_PCM:
track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_AUDIO);
@@ -191,12 +190,18 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev)
track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1);
break;
}
- if (track2 >= 0) {
- unregister_sound_special(track2);
+ if (track2 >= 0)
snd_oss_minors[track2] = NULL;
- }
snd_oss_minors[minor] = NULL;
mutex_unlock(&sound_oss_mutex);
+
+ /* call unregister_sound_special() outside sound_oss_mutex;
+ * otherwise may deadlock, as it can trigger the release of a card
+ */
+ unregister_sound_special(minor);
+ if (track2 >= 0)
+ unregister_sound_special(track2);
+
kfree(mptr);
return 0;
}
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 04f1c7b1276c..69b4c0ecd83f 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -2899,9 +2899,6 @@ int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
return ret;
}
- set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
- mbhc->button_jack.jack->input_dev->propbit);
-
INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
wcd_mbhc_fw_read);
INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd_btn_lpress_fn);
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index 2012e4617ee1..b6a50742f501 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -5503,9 +5503,6 @@ int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
return ret;
}
- set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
- mbhc->button_jack.jack->input_dev->propbit);
-
INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
wcd9xxx_mbhc_fw_read);
INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd9xxx_btn_lpress_fn);
diff --git a/sound/soc/codecs/wcd_cpe_services.c b/sound/soc/codecs/wcd_cpe_services.c
index 18cd44d3d9c7..142dafe8490c 100644
--- a/sound/soc/codecs/wcd_cpe_services.c
+++ b/sound/soc/codecs/wcd_cpe_services.c
@@ -664,7 +664,7 @@ static void cpe_notify_cmi_client(struct cpe_info *t_info, u8 *payload,
service = CMI_HDR_GET_SERVICE(hdr);
notif.event = (enum cmi_api_event)CPE_SVC_CMI_MSG;
- notif.result = result;
+ notif.result = (enum cmi_api_result)result;
notif.message = payload;
CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
@@ -1355,7 +1355,8 @@ static enum cpe_process_result cpe_mt_process_cmd(
cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
CPE_SS_MSG_SEND_INBOX);
- rc = cpe_send_msg_to_inbox(t_info, 0, m);
+ rc = (enum cpe_process_result)cpe_send_msg_to_inbox(t_info, 0,
+ m);
break;
case CPE_CMD_SEND_MSG_COMPLETE:
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c
index b02ab78684fb..6dc8289ffce1 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-host-voice-v2.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -644,6 +645,12 @@ static int hpcm_start_vocpcm(char *pcm_id, struct hpcm_drv *prtd,
}
}
+ if (*no_of_tp != no_of_tp_req && *no_of_tp > 2) {
+ pr_err("%s:: Invalid hpcm start request\n", __func__);
+ memset(&prtd->start_cmd, 0, sizeof(struct start_cmd));
+ return -EINVAL;
+ }
+
if ((prtd->mixer_conf.tx.enable || prtd->mixer_conf.rx.enable) &&
*no_of_tp == no_of_tp_req) {
voc_send_cvp_start_vocpcm(voc_get_session_id(sess_name),
@@ -743,6 +750,13 @@ void hpcm_notify_evt_processing(uint8_t *data, char *session,
return;
}
+ if (prtd->mixer_conf.sess_indx < VOICE_INDEX ||
+ prtd->mixer_conf.sess_indx >= MAX_SESSION) {
+ pr_err("%s:: Invalid session idx %d\n",
+ __func__, prtd->mixer_conf.sess_indx);
+ return;
+ }
+
if (notify_evt->tap_point == VSS_IVPCM_TAP_POINT_TX_DEFAULT) {
tp = &prtd->session[prtd->mixer_conf.sess_indx].tx_tap_point;
tmd = &prtd->mixer_conf.tx;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index 5f4225e675ad..bf2620c999b1 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -1,5 +1,5 @@
-/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1004,6 +1004,14 @@ static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
goto fail;
}
+ if ((size == 0 || size < prtd->pcm_count) && ((offset + size) < prtd->pcm_count)) {
+ memset(bufptr + offset + size, 0, prtd->pcm_count - size);
+ if (fbytes > prtd->pcm_count)
+ size = xfer = prtd->pcm_count;
+ else
+ size = xfer = fbytes;
+ }
+
if (copy_to_user(buf, bufptr+offset, xfer)) {
pr_err("Failed to copy buf to user\n");
ret = -EFAULT;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 27f90c270e13..96c59cad19c3 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -2072,6 +2072,11 @@ static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
session_id = msm_pcm_routing_get_voc_sessionid(val);
+ if (!session_id) {
+ pr_err("%s: Invalid session_id %x\n", __func__, session_id);
+ return;
+ }
+
pr_debug("%s: FE DAI 0x%x session_id 0x%x\n",
__func__, val, session_id);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index b2387a746f61..38aaa6cb8d30 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -371,6 +372,13 @@ static void voip_process_ul_pkt(uint8_t *voc_pkt,
switch (prtd->mode) {
case MODE_AMR_WB:
case MODE_AMR: {
+ if (pkt_len <= DSP_FRAME_HDR_LEN) {
+ pr_err("%s: pkt_len %d is < required len\n",
+ __func__, pkt_len);
+ spin_unlock_irqrestore(&prtd->dsp_ul_lock,
+ dsp_flags);
+ return;
+ }
/* Remove the DSP frame info header. Header format:
* Bits 0-3: Frame rate
* Bits 4-7: Frame type
@@ -391,6 +399,13 @@ static void voip_process_ul_pkt(uint8_t *voc_pkt,
case MODE_4GV_NB:
case MODE_4GV_WB:
case MODE_4GV_NW: {
+ if (pkt_len <= DSP_FRAME_HDR_LEN) {
+ pr_err("%s: pkt_len %d is < required len\n",
+ __func__, pkt_len);
+ spin_unlock_irqrestore(&prtd->dsp_ul_lock,
+ dsp_flags);
+ return;
+ }
/* Remove the DSP frame info header.
* Header format:
* Bits 0-3: frame rate
@@ -428,6 +443,14 @@ static void voip_process_ul_pkt(uint8_t *voc_pkt,
buf_node->frame.frm_hdr.timestamp = timestamp;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+ if (pkt_len <= 2 * DSP_FRAME_HDR_LEN) {
+ pr_err("%s: pkt_len %d is < required len\n",
+ __func__, pkt_len);
+ spin_unlock_irqrestore(&prtd->dsp_ul_lock,
+ dsp_flags);
+ return;
+ }
+
/* There are two frames in the buffer. Length of the
* first frame:
*/
@@ -463,6 +486,15 @@ static void voip_process_ul_pkt(uint8_t *voc_pkt,
buf_node->frame.frm_hdr.timestamp = timestamp;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+ if (pkt_len <= 2 * DSP_FRAME_HDR_LEN) {
+ pr_err(
+ "%s: pkt_len %d is < required len\n",
+ __func__, pkt_len);
+ spin_unlock_irqrestore(
+ &prtd->dsp_ul_lock,
+ dsp_flags);
+ return;
+ }
/* There are two frames in the buffer. Length
* of the second frame:
*/
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 610604fcfe15..21370dbadb5b 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -5475,6 +5475,13 @@ static int afe_sidetone_iir(u16 tx_port_id)
pr_debug("%s: adding 2 to size:%d\n", __func__, size);
size = size + 2;
}
+
+ if (size > MAX_SIDETONE_IIR_DATA_SIZE) {
+ pr_err("%s: iir_config size is out of bounds:%d\n", __func__, size);
+ mutex_unlock(&this_afe.cal_data[cal_index]->lock);
+ ret = -EINVAL;
+ goto done;
+ }
memcpy(&filter_data.iir_config, &st_iir_cal_info->iir_config, size);
mutex_unlock(&this_afe.cal_data[cal_index]->lock);
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 2f8c1e80ce32..70ae79f5f6d1 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -11,6 +12,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/mutex.h>
@@ -2274,6 +2276,15 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
config_debug_fs_read_cb();
+ if (data->payload_size != (READDONE_IDX_SEQ_ID + 1) * sizeof(uint32_t)) {
+ pr_err("%s: payload size of %d is less than expected size\n",
+ __func__, data->payload_size);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock),
+ flags);
+ return -EINVAL;
+ }
+
dev_vdbg(ac->dev, "%s: ReadDone: status=%d buff_add=0x%x act_size=%d offset=%d\n",
__func__, payload[READDONE_IDX_STATUS],
payload[READDONE_IDX_BUFADD_LSW],
@@ -2380,6 +2391,14 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
__func__, data->payload_size);
break;
case ASM_SESSION_CMDRSP_GET_MTMX_STRTR_PARAMS_V2:
+ payload_size = sizeof(struct asm_mtmx_strtr_get_params_cmdrsp);
+ if (data->payload_size < payload_size) {
+ pr_err("%s: insufficient payload size = %d\n",
+ __func__, data->payload_size);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
+ return -EINVAL;
+ }
q6asm_process_mtmx_get_param_rsp(ac, (void *) payload);
break;
case ASM_STREAM_PP_EVENT:
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index 417c5d0a12d1..8e1cb3a8b43b 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -87,7 +88,7 @@ static struct generic_get_data_ *generic_get_data;
static int parse_fwk_version_info(uint32_t *payload, uint16_t payload_size)
{
size_t ver_size;
- int num_services;
+ uint16_t num_services;
pr_debug("%s: Payload info num services %d\n",
__func__, payload[4]);
diff --git a/sound/soc/msm/qdsp6v2/q6lsm.c b/sound/soc/msm/qdsp6v2/q6lsm.c
index 90822aada3b6..807a4df9a2ba 100644
--- a/sound/soc/msm/qdsp6v2/q6lsm.c
+++ b/sound/soc/msm/qdsp6v2/q6lsm.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2020, Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -338,6 +339,10 @@ static int q6lsm_apr_send_pkt(struct lsm_client *client, void *handle,
struct apr_hdr *msg_hdr = (struct apr_hdr *) data;
pr_debug("%s: enter wait %d\n", __func__, wait);
+ if (mmap_handle_p) {
+ pr_err("%s: Invalid mmap_handle\n", __func__);
+ return -EINVAL;
+ }
if (wait)
mutex_lock(&lsm_common.apr_lock);
if (mmap_p) {
@@ -381,6 +386,7 @@ static int q6lsm_apr_send_pkt(struct lsm_client *client, void *handle,
if (wait)
mutex_unlock(&lsm_common.apr_lock);
+ mmap_handle_p = NULL;
pr_debug("%s: leave ret %d\n", __func__, ret);
return ret;
}
@@ -1395,7 +1401,8 @@ static int q6lsm_mmapcallback(struct apr_client_data *data, void *priv)
case LSM_SESSION_CMDRSP_SHARED_MEM_MAP_REGIONS:
if (atomic_read(&client->cmd_state) == CMD_STATE_WAIT_RESP) {
spin_lock_irqsave(&mmap_lock, flags);
- *mmap_handle_p = command;
+ if (mmap_handle_p)
+ *mmap_handle_p = command;
/* spin_unlock_irqrestore implies barrier */
spin_unlock_irqrestore(&mmap_lock, flags);
atomic_set(&client->cmd_state, CMD_STATE_CLEARED);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index e437a1c7985f..996567ff3a0e 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -2545,6 +2545,13 @@ static int voice_send_cvs_register_cal_cmd(struct voice_data *v)
goto unlock;
}
+ if (col_data->cal_data.size > MAX_COL_INFO_SIZE) {
+ pr_err("%s: Invalid cal data size %zu!\n",
+ __func__, col_data->cal_data.size);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
memcpy(&cvs_reg_cal_cmd.cvs_cal_data.column_info[0],
(void *) &((struct audio_cal_info_voc_col *)
col_data->cal_info)->data,
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index d77f4829f1e0..b7a5ad26ef45 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -263,7 +263,11 @@ n0 ping -W 1 -c 1 192.168.241.2
n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
ip2 link del wg0
ip2 link del wg1
-! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
+read _ _ tx_bytes_before < <(n0 wg show wg1 transfer)
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false
+sleep 1
+read _ _ tx_bytes_after < <(n0 wg show wg1 transfer)
+(( tx_bytes_after - tx_bytes_before < 70000 ))
ip0 link del wg1
ip1 link del wg0
@@ -587,6 +591,28 @@ ip0 link set wg0 up
kill $ncat_pid
ip0 link del wg0
+# Ensure that dst_cache references don't outlive netns lifetime
+ip1 link add dev wg0 type wireguard
+ip2 link add dev wg0 type wireguard
+configure_peers
+ip1 link add veth1 type veth peer name veth2
+ip1 link set veth2 netns $netns2
+ip1 addr add fd00:aa::1/64 dev veth1
+ip2 addr add fd00:aa::2/64 dev veth2
+ip1 link set veth1 up
+ip2 link set veth2 up
+waitiface $netns1 veth1
+waitiface $netns2 veth2
+ip1 -6 route add default dev veth1 via fd00:aa::2
+ip2 -6 route add default dev veth2 via fd00:aa::1
+n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2
+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1
+n1 ping6 -c 1 fd00::2
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
# Ensure there aren't circular reference loops
ip1 link add wg1 type wireguard
ip2 link add wg2 type wireguard
@@ -605,7 +631,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
done < /dev/kmsg
alldeleted=1
for object in "${!objects[@]}"; do
- if [[ ${objects["$object"]} != *createddestroyed ]]; then
+ if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then
echo "Error: $object: merely ${objects["$object"]}" >&3
alldeleted=0
fi
diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
index 2dab4f57516d..d35ce15111b8 100644
--- a/tools/testing/selftests/wireguard/qemu/Makefile
+++ b/tools/testing/selftests/wireguard/qemu/Makefile
@@ -68,8 +68,10 @@ CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST)
export CROSS_COMPILE=$(CBUILD)-
STRIP := $(CBUILD)-strip
endif
+QEMU_VPORT_RESULT :=
ifeq ($(ARCH),aarch64)
QEMU_ARCH := aarch64
+QEMU_VPORT_RESULT := virtio-serial-device
KERNEL_ARCH := arm64
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image
ifeq ($(HOST_ARCH),$(ARCH))
@@ -80,6 +82,7 @@ CFLAGS += -march=armv8-a -mtune=cortex-a53
endif
else ifeq ($(ARCH),aarch64_be)
QEMU_ARCH := aarch64
+QEMU_VPORT_RESULT := virtio-serial-device
KERNEL_ARCH := arm64
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image
ifeq ($(HOST_ARCH),$(ARCH))
@@ -90,6 +93,7 @@ CFLAGS += -march=armv8-a -mtune=cortex-a53
endif
else ifeq ($(ARCH),arm)
QEMU_ARCH := arm
+QEMU_VPORT_RESULT := virtio-serial-device
KERNEL_ARCH := arm
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage
ifeq ($(HOST_ARCH),$(ARCH))
@@ -100,6 +104,7 @@ CFLAGS += -march=armv7-a -mtune=cortex-a15 -mabi=aapcs-linux
endif
else ifeq ($(ARCH),armeb)
QEMU_ARCH := arm
+QEMU_VPORT_RESULT := virtio-serial-device
KERNEL_ARCH := arm
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage
ifeq ($(HOST_ARCH),$(ARCH))
@@ -199,7 +204,7 @@ KERNEL_ARCH := m68k
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/m68k.config)
ifeq ($(HOST_ARCH),$(ARCH))
-QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -smp 1 -append $(KERNEL_CMDLINE)
+QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -append $(KERNEL_CMDLINE)
else
QEMU_MACHINE := -machine q800 -smp 1 -append $(KERNEL_CMDLINE)
endif
@@ -212,6 +217,7 @@ MUSL_CC := $(BUILD_PATH)/musl-gcc
export CC := $(MUSL_CC)
USERSPACE_DEPS := $(MUSL_CC) $(BUILD_PATH)/include/.installed $(BUILD_PATH)/include/linux/.installed
+comma := ,
build: $(KERNEL_BZIMAGE)
qemu: $(KERNEL_BZIMAGE)
rm -f $(BUILD_PATH)/result
@@ -222,7 +228,8 @@ qemu: $(KERNEL_BZIMAGE)
$(QEMU_MACHINE) \
-m $$(grep -q CONFIG_DEBUG_KMEMLEAK=y $(KERNEL_BUILD_PATH)/.config && echo 1G || echo 256M) \
-serial stdio \
- -serial file:$(BUILD_PATH)/result \
+ -chardev file,path=$(BUILD_PATH)/result,id=result \
+ $(if $(QEMU_VPORT_RESULT),-device $(QEMU_VPORT_RESULT) -device virtserialport$(comma)chardev=result,-serial chardev:result) \
-no-reboot \
-monitor none \
-kernel $<
diff --git a/tools/testing/selftests/wireguard/qemu/arch/aarch64.config b/tools/testing/selftests/wireguard/qemu/arch/aarch64.config
index 3d063bb247bb..09016880ce03 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/aarch64.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/aarch64.config
@@ -1,5 +1,8 @@
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1"
+CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1"
CONFIG_FRAME_WARN=1280
diff --git a/tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config b/tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config
index dbdc7e406a7b..19ff66e4c602 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config
@@ -1,6 +1,9 @@
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1"
+CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1"
CONFIG_FRAME_WARN=1280
diff --git a/tools/testing/selftests/wireguard/qemu/arch/arm.config b/tools/testing/selftests/wireguard/qemu/arch/arm.config
index 148f49905418..fc7959bef9c2 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/arm.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/arm.config
@@ -4,6 +4,9 @@ CONFIG_ARCH_VIRT=y
CONFIG_THUMB2_KERNEL=n
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1"
+CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/armeb.config b/tools/testing/selftests/wireguard/qemu/arch/armeb.config
index bd76b07d00a2..f3066be81c19 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/armeb.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/armeb.config
@@ -4,7 +4,10 @@ CONFIG_ARCH_VIRT=y
CONFIG_THUMB2_KERNEL=n
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1"
+CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1"
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/i686.config b/tools/testing/selftests/wireguard/qemu/arch/i686.config
index a85025d7206e..6d90892a85a2 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/i686.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/i686.config
@@ -1,5 +1,6 @@
+CONFIG_ACPI=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/m68k.config b/tools/testing/selftests/wireguard/qemu/arch/m68k.config
index 62a15bdb877e..82c925e49beb 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/m68k.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/m68k.config
@@ -5,5 +5,5 @@ CONFIG_MAC=y
CONFIG_SERIAL_PMACZILOG=y
CONFIG_SERIAL_PMACZILOG_TTYS=y
CONFIG_SERIAL_PMACZILOG_CONSOLE=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/mips.config b/tools/testing/selftests/wireguard/qemu/arch/mips.config
index df71d6b95546..d7ec63c17b30 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/mips.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/mips.config
@@ -7,5 +7,5 @@ CONFIG_POWER_RESET_SYSCON=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/mips64.config b/tools/testing/selftests/wireguard/qemu/arch/mips64.config
index 90c783f725c4..0994947e3392 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/mips64.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/mips64.config
@@ -10,5 +10,5 @@ CONFIG_POWER_RESET_SYSCON=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1280
diff --git a/tools/testing/selftests/wireguard/qemu/arch/mips64el.config b/tools/testing/selftests/wireguard/qemu/arch/mips64el.config
index 435b0b43e00c..591184342f47 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/mips64el.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/mips64el.config
@@ -11,5 +11,5 @@ CONFIG_POWER_RESET_SYSCON=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1280
diff --git a/tools/testing/selftests/wireguard/qemu/arch/mipsel.config b/tools/testing/selftests/wireguard/qemu/arch/mipsel.config
index 62bb50c4a85f..18a498293737 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/mipsel.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/mipsel.config
@@ -8,5 +8,5 @@ CONFIG_POWER_RESET_SYSCON=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/powerpc.config b/tools/testing/selftests/wireguard/qemu/arch/powerpc.config
index 57957093b71b..5e04882e8e35 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/powerpc.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc.config
@@ -6,5 +6,5 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_MATH_EMULATION=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config b/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config
index f52f1e2bc7f6..8148b9d1220a 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config
@@ -7,7 +7,7 @@ CONFIG_PPC_RADIX_MMU=y
CONFIG_HVC_CONSOLE=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=hvc0 wg.success=hvc1"
+CONFIG_CMDLINE="console=hvc0 wg.success=hvc1 panic_on_warn=1"
CONFIG_SECTION_MISMATCH_WARN_ONLY=y
CONFIG_FRAME_WARN=1280
CONFIG_THREAD_SHIFT=14
diff --git a/tools/testing/selftests/wireguard/qemu/arch/x86_64.config b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
index 00a1ef4869d5..efa00693e08b 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
@@ -1,5 +1,6 @@
+CONFIG_ACPI=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1280
diff --git a/tools/testing/selftests/wireguard/qemu/init.c b/tools/testing/selftests/wireguard/qemu/init.c
index c9698120ac9d..20c85dc8b39f 100644
--- a/tools/testing/selftests/wireguard/qemu/init.c
+++ b/tools/testing/selftests/wireguard/qemu/init.c
@@ -122,12 +122,6 @@ static void enable_logging(void)
panic("write(exception-trace)");
close(fd);
}
- fd = open("/proc/sys/kernel/panic_on_warn", O_WRONLY);
- if (fd >= 0) {
- if (write(fd, "1\n", 2) != 2)
- panic("write(panic_on_warn)");
- close(fd);
- }
}
static void kmod_selftests(void)