diff options
476 files changed, 3866 insertions, 2565 deletions
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt index 4650a00ed012..9bc271cdc9a8 100644 --- a/Documentation/networking/l2tp.txt +++ b/Documentation/networking/l2tp.txt @@ -177,10 +177,10 @@ setsockopt on the PPPoX socket to set a debug mask. The following debug mask bits are available: -PPPOL2TP_MSG_DEBUG verbose debug (if compiled in) -PPPOL2TP_MSG_CONTROL userspace - kernel interface -PPPOL2TP_MSG_SEQ sequence numbers handling -PPPOL2TP_MSG_DATA data packets +L2TP_MSG_DEBUG verbose debug (if compiled in) +L2TP_MSG_CONTROL userspace - kernel interface +L2TP_MSG_SEQ sequence numbers handling +L2TP_MSG_DATA data packets If enabled, files under a l2tp debugfs directory can be used to dump kernel state about L2TP tunnels and sessions. To access it, the @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 222 +SUBLEVEL = 226 EXTRAVERSION = NAME = Blurry Fish Butt @@ -639,7 +639,6 @@ ARCH_CFLAGS := include arch/$(SRCARCH)/Makefile KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) -KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) @@ -663,6 +662,7 @@ endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) # check for 'asm goto' ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) @@ -820,6 +820,17 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable stringop warnings in gcc 8+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) +# We'll want to enable this eventually, but it's not going away for 5.7 at least +KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) + +# Another good warning that we'll want to enable eventually +KBUILD_CFLAGS += $(call cc-disable-warning, restrict) + +# Enabled with W=2, disabled by default as noisy +KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) + # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c index 99e8d4796c96..92c0d460815b 100644 --- a/arch/alpha/kernel/pci-sysfs.c +++ b/arch/alpha/kernel/pci-sysfs.c @@ -77,10 +77,10 @@ static int pci_mmap_resource(struct kobject *kobj, if (i >= PCI_ROM_RESOURCE) return -ENODEV; - if (!__pci_mmap_fits(pdev, i, vma, sparse)) + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) return -EINVAL; - if (iomem_is_exclusive(res->start)) + if (!__pci_mmap_fits(pdev, i, vma, sparse)) return -EINVAL; pcibios_resource_to_bus(pdev->bus, &bar, res); diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 67273ababc30..6fe9b43b9f7e 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -166,6 +166,7 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += \ kirkwood-ds109.dtb \ kirkwood-ds110jv10.dtb \ kirkwood-ds111.dtb \ + kirkwood-ds112.dtb \ kirkwood-ds209.dtb \ kirkwood-ds210.dtb \ kirkwood-ds212.dtb \ diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts index 7c869fe3c30b..3baf5c4eec5b 100644 --- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts +++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts @@ -81,8 +81,8 @@ imx27-phycard-s-rdk { pinctrl_i2c1: i2c1grp { fsl,pins = < - MX27_PAD_I2C2_SDA__I2C2_SDA 0x0 - MX27_PAD_I2C2_SCL__I2C2_SCL 0x0 + MX27_PAD_I2C_DATA__I2C_DATA 0x0 + MX27_PAD_I2C_CLK__I2C_CLK 0x0 >; }; diff --git a/arch/arm/boot/dts/kirkwood-ds112.dts b/arch/arm/boot/dts/kirkwood-ds112.dts index bf4143c6cb8f..b84af3da8c84 100644 --- a/arch/arm/boot/dts/kirkwood-ds112.dts +++ b/arch/arm/boot/dts/kirkwood-ds112.dts @@ -14,7 +14,7 @@ #include "kirkwood-synology.dtsi" / { - model = "Synology DS111"; + model = "Synology DS112"; compatible = "synology,ds111", "marvell,kirkwood"; memory { diff --git a/arch/arm/boot/dts/kirkwood-lswvl.dts b/arch/arm/boot/dts/kirkwood-lswvl.dts index 09eed3cea0af..36eec7392ab4 100644 --- a/arch/arm/boot/dts/kirkwood-lswvl.dts +++ b/arch/arm/boot/dts/kirkwood-lswvl.dts @@ -1,7 +1,8 @@ /* * Device Tree file for Buffalo Linkstation LS-WVL/VL * - * Copyright (C) 2015, rogershimizu@gmail.com + * Copyright (C) 2015, 2016 + * Roger Shimizu <rogershimizu@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -156,21 +157,21 @@ button@1 { label = "Function Button"; linux,code = <KEY_OPTION>; - gpios = <&gpio0 45 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 13 GPIO_ACTIVE_LOW>; }; button@2 { label = "Power-on Switch"; linux,code = <KEY_RESERVED>; linux,input-type = <5>; - gpios = <&gpio0 46 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; }; button@3 { label = "Power-auto Switch"; linux,code = <KEY_ESC>; linux,input-type = <5>; - gpios = <&gpio0 47 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 15 GPIO_ACTIVE_LOW>; }; }; @@ -185,38 +186,38 @@ led@1 { label = "lswvl:red:alarm"; - gpios = <&gpio0 36 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>; }; led@2 { label = "lswvl:red:func"; - gpios = <&gpio0 37 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>; }; led@3 { label = "lswvl:amber:info"; - gpios = <&gpio0 38 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; }; led@4 { label = "lswvl:blue:func"; - gpios = <&gpio0 39 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; }; led@5 { label = "lswvl:blue:power"; - gpios = <&gpio0 40 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; default-state = "keep"; }; led@6 { label = "lswvl:red:hdderr0"; - gpios = <&gpio0 34 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; }; led@7 { label = "lswvl:red:hdderr1"; - gpios = <&gpio0 35 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; }; }; @@ -233,7 +234,7 @@ 3250 1 5000 0>; - alarm-gpios = <&gpio0 43 GPIO_ACTIVE_HIGH>; + alarm-gpios = <&gpio1 11 GPIO_ACTIVE_HIGH>; }; restart_poweroff { diff --git a/arch/arm/boot/dts/kirkwood-lswxl.dts b/arch/arm/boot/dts/kirkwood-lswxl.dts index f5db16a08597..b13ec20a7088 100644 --- a/arch/arm/boot/dts/kirkwood-lswxl.dts +++ b/arch/arm/boot/dts/kirkwood-lswxl.dts @@ -1,7 +1,8 @@ /* * Device Tree file for Buffalo Linkstation LS-WXL/WSXL * - * Copyright (C) 2015, rogershimizu@gmail.com + * Copyright (C) 2015, 2016 + * Roger Shimizu <rogershimizu@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -156,21 +157,21 @@ button@1 { label = "Function Button"; linux,code = <KEY_OPTION>; - gpios = <&gpio1 41 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 9 GPIO_ACTIVE_LOW>; }; button@2 { label = "Power-on Switch"; linux,code = <KEY_RESERVED>; linux,input-type = <5>; - gpios = <&gpio1 42 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 10 GPIO_ACTIVE_LOW>; }; button@3 { label = "Power-auto Switch"; linux,code = <KEY_ESC>; linux,input-type = <5>; - gpios = <&gpio1 43 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 11 GPIO_ACTIVE_LOW>; }; }; @@ -185,12 +186,12 @@ led@1 { label = "lswxl:blue:func"; - gpios = <&gpio1 36 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; }; led@2 { label = "lswxl:red:alarm"; - gpios = <&gpio1 49 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 17 GPIO_ACTIVE_LOW>; }; led@3 { @@ -200,23 +201,23 @@ led@4 { label = "lswxl:blue:power"; - gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; + default-state = "keep"; }; led@5 { label = "lswxl:red:func"; - gpios = <&gpio1 5 GPIO_ACTIVE_LOW>; - default-state = "keep"; + gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; }; led@6 { label = "lswxl:red:hdderr0"; - gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; + gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>; }; led@7 { label = "lswxl:red:hdderr1"; - gpios = <&gpio1 3 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>; }; }; @@ -225,15 +226,15 @@ pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>; pinctrl-names = "default"; - gpios = <&gpio0 47 GPIO_ACTIVE_LOW - &gpio0 48 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 16 GPIO_ACTIVE_LOW + &gpio1 15 GPIO_ACTIVE_LOW>; gpio-fan,speed-map = <0 3 1500 2 3250 1 5000 0>; - alarm-gpios = <&gpio1 49 GPIO_ACTIVE_HIGH>; + alarm-gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>; }; restart_poweroff { @@ -256,7 +257,7 @@ enable-active-high; regulator-always-on; regulator-boot-on; - gpio = <&gpio0 37 GPIO_ACTIVE_HIGH>; + gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>; }; hdd_power0: regulator@2 { compatible = "regulator-fixed"; diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts index 3daec912b4bf..aae8a7aceab7 100644 --- a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts +++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts @@ -1,7 +1,8 @@ /* * Device Tree file for Buffalo Linkstation LS-WTGL * - * Copyright (C) 2015, Roger Shimizu <rogershimizu@gmail.com> + * Copyright (C) 2015, 2016 + * Roger Shimizu <rogershimizu@gmail.com> * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual @@ -69,8 +70,6 @@ internal-regs { pinctrl: pinctrl@10000 { - pinctrl-0 = <&pmx_usb_power &pmx_power_hdd - &pmx_fan_low &pmx_fan_high &pmx_fan_lock>; pinctrl-names = "default"; pmx_led_power: pmx-leds { @@ -162,6 +161,7 @@ led@1 { label = "lswtgl:blue:power"; gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; + default-state = "keep"; }; led@2 { @@ -188,7 +188,7 @@ 3250 1 5000 0>; - alarm-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>; + alarm-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; }; restart_poweroff { @@ -228,6 +228,37 @@ }; }; +&devbus_bootcs { + status = "okay"; + devbus,keep-config; + + flash@0 { + compatible = "jedec-flash"; + reg = <0 0x40000>; + bank-width = <1>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + header@0 { + reg = <0 0x30000>; + read-only; + }; + + uboot@30000 { + reg = <0x30000 0xF000>; + read-only; + }; + + uboot_env@3F000 { + reg = <0x3F000 0x1000>; + }; + }; + }; +}; + &mdio { status = "okay"; diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts index 105d9c95de4a..5c76dcc89df5 100644 --- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts +++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts @@ -180,7 +180,7 @@ }; &extal1_clk { - clock-frequency = <25000000>; + clock-frequency = <24000000>; }; &extal2_clk { clock-frequency = <48000000>; diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi index e14cb1438216..2c43e12eb99a 100644 --- a/arch/arm/boot/dts/r8a7740.dtsi +++ b/arch/arm/boot/dts/r8a7740.dtsi @@ -461,7 +461,7 @@ cpg_clocks: cpg_clocks@e6150000 { compatible = "renesas,r8a7740-cpg-clocks"; reg = <0xe6150000 0x10000>; - clocks = <&extal1_clk>, <&extalr_clk>; + clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>; #clock-cells = <1>; clock-output-names = "system", "pllc0", "pllc1", "pllc2", "r", diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index cc414382dab4..561b2ba6bc28 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -162,8 +162,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) preempt_enable(); #endif - if (!ret) - *oval = oldval; + /* + * Store unconditionally. If ret != 0 the extra store is the least + * of the worries but GCC cannot figure out that __futex_atomic_op() + * is either setting ret to -EFAULT or storing the old value in + * oldval which results in a uninitialized warning at the call site. + */ + *oval = oldval; return ret; } diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 8ceda2844c4f..9aa659e4c46e 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -562,6 +562,7 @@ config SOC_IMX7D select ARM_GIC select HAVE_IMX_ANATOP select HAVE_IMX_MMDC + select HAVE_IMX_SRC help This enables support for Freescale i.MX7 Dual processor. diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 1bc87c29467b..fcb48eb3ecdd 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2207,15 +2207,15 @@ static int _idle(struct omap_hwmod *oh) pr_debug("omap_hwmod: %s: idling\n", oh->name); + if (_are_all_hardreset_lines_asserted(oh)) + return 0; + if (oh->_state != _HWMOD_STATE_ENABLED) { WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n", oh->name); return -EINVAL; } - if (_are_all_hardreset_lines_asserted(oh)) - return 0; - if (oh->class->sysc) _idle_sysc(oh); _del_initiator_dep(oh, mpu_oh); @@ -2262,6 +2262,9 @@ static int _shutdown(struct omap_hwmod *oh) int ret, i; u8 prev_state; + if (_are_all_hardreset_lines_asserted(oh)) + return 0; + if (oh->_state != _HWMOD_STATE_IDLE && oh->_state != _HWMOD_STATE_ENABLED) { WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n", @@ -2269,9 +2272,6 @@ static int _shutdown(struct omap_hwmod *oh) return -EINVAL; } - if (_are_all_hardreset_lines_asserted(oh)) - return 0; - pr_debug("omap_hwmod: %s: disabling\n", oh->name); if (oh->class->pre_shutdown) { diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig index df5f2c48cbfe..fab59810920a 100644 --- a/arch/arm64/configs/cuttlefish_defconfig +++ b/arch/arm64/configs/cuttlefish_defconfig @@ -16,6 +16,7 @@ CONFIG_CGROUP_SCHEDTUNE=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_RT_GROUP_SCHED=y +CONFIG_NAMESPACES=y CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y CONFIG_DEFAULT_USE_ENERGY_AWARE=y diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 6297140dd84f..fb413052e10a 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -482,6 +482,7 @@ emit_cond_jmp: case BPF_JGE: jmp_cond = A64_COND_CS; break; + case BPF_JSET: case BPF_JNE: jmp_cond = A64_COND_NE; break; diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi index 8b9432cc062b..27b2b8e08503 100644 --- a/arch/mips/boot/dts/brcm/bcm7435.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi @@ -7,7 +7,7 @@ #address-cells = <1>; #size-cells = <0>; - mips-hpt-frequency = <163125000>; + mips-hpt-frequency = <175625000>; cpu@0 { compatible = "brcm,bmips5200"; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index ed7c4f1fc6a0..9189730bd517 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -1220,7 +1220,7 @@ static int octeon_irq_gpio_map(struct irq_domain *d, line = (hw + gpiod->base_hwirq) >> 6; bit = (hw + gpiod->base_hwirq) & 63; - if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || + if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index cd7101fb6227..6b9c608cdff1 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -251,6 +251,17 @@ static void octeon_crash_shutdown(struct pt_regs *regs) default_machine_crash_shutdown(regs); } +#ifdef CONFIG_SMP +void octeon_crash_smp_send_stop(void) +{ + int cpu; + + /* disable watchdogs */ + for_each_online_cpu(cpu) + cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0); +} +#endif + #endif /* CONFIG_KEXEC */ #ifdef CONFIG_CAVIUM_RESERVE32 @@ -864,6 +875,9 @@ void __init prom_init(void) _machine_kexec_shutdown = octeon_shutdown; _machine_crash_shutdown = octeon_crash_shutdown; _machine_kexec_prepare = octeon_kexec_prepare; +#ifdef CONFIG_SMP + _crash_smp_send_stop = octeon_crash_smp_send_stop; +#endif #endif octeon_user_io_init(); diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 42412ba0f3bf..ac7a59c3e11b 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -239,6 +239,7 @@ static int octeon_cpu_disable(void) return -ENOTSUPP; set_cpu_online(cpu, false); + calculate_cpu_foreign_map(); cpumask_clear_cpu(cpu, &cpu_callin_map); octeon_fixup_irqs(); diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index dc72fdc73719..ce8f514d4858 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -420,6 +420,7 @@ extern const char *__elf_platform; #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) #endif +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \ diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h index b6a4d4aa548f..cfdbe66575f4 100644 --- a/arch/mips/include/asm/kexec.h +++ b/arch/mips/include/asm/kexec.h @@ -45,6 +45,7 @@ extern const unsigned char kexec_smp_wait[]; extern unsigned long secondary_kexec_args[4]; extern void (*relocated_kexec_smp_wait) (void *); extern atomic_t kexec_ready_to_reboot; +extern void (*_crash_smp_send_stop)(void); #endif #endif diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 82852dfd8dab..5ce0fcc81e87 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -63,6 +63,8 @@ extern cpumask_t cpu_coherent_mask; extern void asmlinkage smp_bootstrap(void); +extern void calculate_cpu_foreign_map(void); + /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h index c9c7195272c4..45ba259a3618 100644 --- a/arch/mips/include/uapi/asm/auxvec.h +++ b/arch/mips/include/uapi/asm/auxvec.h @@ -14,4 +14,6 @@ /* Location of VDSO image. */ #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + #endif /* __ASM_AUXVEC_H */ diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S index 86495072a922..d9495f3f3fad 100644 --- a/arch/mips/kernel/bmips_vec.S +++ b/arch/mips/kernel/bmips_vec.S @@ -93,7 +93,8 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) #if defined(CONFIG_CPU_BMIPS5000) mfc0 k0, CP0_PRID li k1, PRID_IMP_BMIPS5000 - andi k0, 0xff00 + /* mask with PRID_IMP_BMIPS5000 to cover both variants */ + andi k0, PRID_IMP_BMIPS5000 bne k0, k1, 1f /* if we're not on core 0, this must be the SMP boot signal */ @@ -166,10 +167,12 @@ bmips_smp_entry: 2: #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */ #if defined(CONFIG_CPU_BMIPS5000) - /* set exception vector base */ + /* mask with PRID_IMP_BMIPS5000 to cover both variants */ li k1, PRID_IMP_BMIPS5000 + andi k0, PRID_IMP_BMIPS5000 bne k0, k1, 3f + /* set exception vector base */ la k0, ebase lw k0, 0(k0) mtc0 k0, $15, 1 @@ -263,6 +266,8 @@ LEAF(bmips_enable_xks01) #endif /* CONFIG_CPU_BMIPS4380 */ #if defined(CONFIG_CPU_BMIPS5000) li t1, PRID_IMP_BMIPS5000 + /* mask with PRID_IMP_BMIPS5000 to cover both variants */ + andi t2, PRID_IMP_BMIPS5000 bne t2, t1, 2f mfc0 t0, $22, 5 diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 56f166a48fbc..fa3750b3b3a8 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -685,21 +685,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, } lose_fpu(1); /* Save FPU state for the emulator. */ reg = insn.i_format.rt; - bit = 0; - switch (insn.i_format.rs) { - case bc1eqz_op: - /* Test bit 0 */ - if (get_fpr32(¤t->thread.fpu.fpr[reg], 0) - & 0x1) - bit = 1; - break; - case bc1nez_op: - /* Test bit 0 */ - if (!(get_fpr32(¤t->thread.fpu.fpr[reg], 0) - & 0x1)) - bit = 1; - break; - } + bit = get_fpr32(¤t->thread.fpu.fpr[reg], 0) & 0x1; + if (insn.i_format.rs == bc1eqz_op) + bit = !bit; own_fpu(1); if (bit) epc = epc + 4 + diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index e38442d5cd6e..ee6671f0a18b 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1447,7 +1447,10 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_BMIPS5000: case PRID_IMP_BMIPS5200: c->cputype = CPU_BMIPS5000; - __cpu_name[cpu] = "Broadcom BMIPS5000"; + if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_BMIPS5200) + __cpu_name[cpu] = "Broadcom BMIPS5200"; + else + __cpu_name[cpu] = "Broadcom BMIPS5000"; set_elf_platform(cpu, "bmips5000"); c->options |= MIPS_CPU_ULRI; break; diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 93c46c9cebb7..e757f36cea6f 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -50,9 +50,14 @@ static void crash_shutdown_secondary(void *passed_regs) static void crash_kexec_prepare_cpus(void) { + static int cpus_stopped; unsigned int msecs; + unsigned int ncpus; - unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ + if (cpus_stopped) + return; + + ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ dump_send_ipi(crash_shutdown_secondary); smp_wmb(); @@ -67,6 +72,17 @@ static void crash_kexec_prepare_cpus(void) cpu_relax(); mdelay(1); } + + cpus_stopped = 1; +} + +/* Override the weak function in kernel/panic.c */ +void crash_smp_send_stop(void) +{ + if (_crash_smp_send_stop) + _crash_smp_send_stop(); + + crash_kexec_prepare_cpus(); } #else /* !defined(CONFIG_SMP) */ diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 92bc066e47a3..32b567e88b02 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -25,6 +25,7 @@ void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; #ifdef CONFIG_SMP void (*relocated_kexec_smp_wait) (void *); atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); +void (*_crash_smp_send_stop)(void) = NULL; #endif int diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 9bc1191b1ab0..93391b0c7338 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -825,6 +825,16 @@ static const struct mips_perf_event mipsxxcore_event_map2 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, }; +static const struct mips_perf_event i6400_event_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD }, + /* These only count dcache, not icache */ + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD }, +}; + static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD }, @@ -1015,6 +1025,46 @@ static const struct mips_perf_event mipsxxcore_cache_map2 }, }; +static const struct mips_perf_event i6400_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD }, + [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD }, + [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD }, + [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD }, + }, +}, +[C(DTLB)] = { + /* Can't distinguish read & write */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD }, + [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD }, + [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD }, + }, +}, +[C(BPU)] = { + /* Conditional branches / mispredicted */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD }, + [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD }, + }, +}, +}; + static const struct mips_perf_event loongson3_cache_map [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -1556,7 +1606,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) #endif break; case CPU_P5600: - case CPU_I6400: /* 8-bit event numbers */ raw_id = config & 0x1ff; base_id = raw_id & 0xff; @@ -1569,6 +1618,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) raw_event.range = P; #endif break; + case CPU_I6400: + /* 8-bit event numbers */ + base_id = config & 0xff; + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + break; case CPU_1004K: if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; @@ -1720,8 +1774,8 @@ init_hw_perf_events(void) break; case CPU_I6400: mipspmu.name = "mips/I6400"; - mipspmu.general_event_map = &mipsxxcore_event_map2; - mipspmu.cache_event_map = &mipsxxcore_cache_map2; + mipspmu.general_event_map = &i6400_event_map; + mipspmu.cache_event_map = &i6400_cache_map; break; case CPU_1004K: mipspmu.name = "mips/1004K"; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 816c4b281c0f..5915bbe74929 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -670,9 +670,6 @@ static const struct pt_regs_offset regoffset_table[] = { REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr), REG_OFFSET_NAME(c0_cause, cp0_cause), REG_OFFSET_NAME(c0_epc, cp0_epc), -#ifdef CONFIG_MIPS_MT_SMTC - REG_OFFSET_NAME(c0_tcstatus, cp0_tcstatus), -#endif #ifdef CONFIG_CPU_CAVIUM_OCTEON REG_OFFSET_NAME(mpl0, mpl[0]), REG_OFFSET_NAME(mpl1, mpl[1]), diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 29b0c5f978e4..7ee8c6269b22 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -35,7 +35,6 @@ NESTED(handle_sys, PT_SIZE, sp) lw t1, PT_EPC(sp) # skip syscall on return - subu v0, v0, __NR_O32_Linux # check syscall number addiu t1, 4 # skip to next instruction sw t1, PT_EPC(sp) @@ -89,6 +88,7 @@ loads_done: and t0, t1 bnez t0, syscall_trace_entry # -> yes syscall_common: + subu v0, v0, __NR_O32_Linux # check syscall number sltiu t0, v0, __NR_O32_Linux_syscalls + 1 beqz t0, illegal_syscall @@ -118,24 +118,23 @@ o32_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, v0 move a0, sp /* * syscall number is in v0 unless we called syscall(__NR_###) * where the real syscall number is in a0 */ - addiu a1, v0, __NR_O32_Linux - bnez v0, 1f /* __NR_syscall at offset 0 */ + move a1, v0 + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ lw a1, PT_R4(sp) 1: jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall - move v0, s0 # restore syscall - RESTORE_STATIC + lw v0, PT_R2(sp) # Restore syscall (maybe modified) lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a6323a969919..01779c315bc6 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -82,15 +82,14 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, v0 move a0, sp move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall - move v0, s0 RESTORE_STATIC + ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index e0fdca8d3abe..0d22a5cc0b8b 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -42,9 +42,6 @@ NESTED(handle_sysn32, PT_SIZE, sp) #endif beqz t0, not_n32_scall - dsll t0, v0, 3 # offset into table - ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0) - sd a3, PT_R26(sp) # save a3 for syscall restarting li t1, _TIF_WORK_SYSCALL_ENTRY @@ -53,6 +50,9 @@ NESTED(handle_sysn32, PT_SIZE, sp) bnez t0, n32_syscall_trace_entry syscall_common: + dsll t0, v0, 3 # offset into table + ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0) + jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -71,21 +71,25 @@ syscall_common: n32_syscall_trace_entry: SAVE_STATIC - move s0, t2 move a0, sp move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall - move t2, s0 RESTORE_STATIC + ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) + + dsubu t2, v0, __NR_N32_Linux # check (new) syscall number + sltiu t0, t2, __NR_N32_Linux_syscalls + 1 + beqz t0, not_n32_scall + j syscall_common 1: j syscall_exit diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 4faff3e77b25..a5cc2b2823d2 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -52,9 +52,6 @@ NESTED(handle_sys, PT_SIZE, sp) sll a2, a2, 0 sll a3, a3, 0 - dsll t0, v0, 3 # offset into table - ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0) - sd a3, PT_R26(sp) # save a3 for syscall restarting /* @@ -88,6 +85,9 @@ loads_done: bnez t0, trace_a_syscall syscall_common: + dsll t0, v0, 3 # offset into table + ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0) + jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -112,7 +112,6 @@ trace_a_syscall: sd a6, PT_R10(sp) sd a7, PT_R11(sp) # For indirect syscalls - move s0, t2 # Save syscall pointer move a0, sp /* * absolute syscall number is in v0 unless we called syscall(__NR_###) @@ -133,8 +132,8 @@ trace_a_syscall: bltz v0, 1f # seccomp failed? Skip syscall - move t2, s0 RESTORE_STATIC + ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) @@ -143,6 +142,11 @@ trace_a_syscall: ld a5, PT_R9(sp) ld a6, PT_R10(sp) ld a7, PT_R11(sp) # For indirect syscalls + + dsubu t0, v0, __NR_O32_Linux # check (new) syscall number + sltiu t0, t0, __NR_O32_Linux_syscalls + 1 + beqz t0, not_o32_scall + j syscall_common 1: j syscall_exit diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index ded8f4ff0cdc..5c104b13c90e 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -735,7 +735,7 @@ static void __init request_crashkernel(struct resource *res) #define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER) #define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) -#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_EXTEND) +#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) static void __init arch_mem_init(char **cmdline_p) { diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index a62d24169d75..a00c4699ca10 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -362,6 +362,7 @@ static int bmips_cpu_disable(void) pr_info("SMP: CPU%d is offline\n", cpu); set_cpu_online(cpu, false); + calculate_cpu_foreign_map(); cpumask_clear_cpu(cpu, &cpu_callin_map); clear_c0_status(IE_IRQ5); diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 1b78309fb493..1c8a5517adb7 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -392,6 +392,7 @@ static int cps_cpu_disable(void) atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); smp_mb__after_atomic(); set_cpu_online(cpu, false); + calculate_cpu_foreign_map(); cpumask_clear_cpu(cpu, &cpu_callin_map); return 0; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 1ef11f46db5a..91eb386c5353 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -118,7 +118,7 @@ static inline void set_cpu_core_map(int cpu) * Calculate a new cpu_foreign_map mask whenever a * new cpu appears or disappears. */ -static inline void calculate_cpu_foreign_map(void) +void calculate_cpu_foreign_map(void) { int i, k, core_present; cpumask_t temp_foreign_map; diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c index 521121bdebff..4974bfc2c5c8 100644 --- a/arch/mips/kvm/dyntrans.c +++ b/arch/mips/kvm/dyntrans.c @@ -82,7 +82,7 @@ int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { mfc0_inst = CLEAR_TEMPLATE; - mfc0_inst |= ((rt & 0x1f) << 16); + mfc0_inst |= ((rt & 0x1f) << 11); } else { mfc0_inst = LW_TEMPLATE; mfc0_inst |= ((rt & 0x1f) << 16); diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index 509832a9836c..2525b6d38f58 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -417,6 +417,7 @@ static int loongson3_cpu_disable(void) return -EBUSY; set_cpu_online(cpu, false); + calculate_cpu_foreign_map(); cpumask_clear_cpu(cpu, &cpu_callin_map); local_irq_save(flags); fixup_irqs(); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index ebb5e3bfe12a..56edbe569fd0 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -1199,8 +1199,7 @@ emul: if (!cpu_has_mips_r6 || delay_slot(xcp)) return SIGILL; - likely = 0; - cond = 0; + cond = likely = 0; fpr = ¤t->thread.fpu.fpr[MIPSInst_RT(ir)]; bit0 = get_fpr32(fpr, 0) & 0x1; switch (MIPSInst_RS(ir)) { diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 52cb3e09d172..8116650e14ab 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -447,6 +447,11 @@ static inline void local_r4k___flush_cache_all(void * args) r4k_blast_scache(); break; + case CPU_BMIPS5000: + r4k_blast_scache(); + __sync(); + break; + default: r4k_blast_dcache(); r4k_blast_icache(); @@ -1371,6 +1376,12 @@ static void probe_pcache(void) c->icache.flags |= MIPS_CACHE_IC_F_DC; break; + case CPU_BMIPS5000: + c->icache.flags |= MIPS_CACHE_IC_F_DC; + /* Cache aliases are handled in hardware; allow HIGHMEM */ + c->dcache.flags &= ~MIPS_CACHE_ALIASES; + break; + case CPU_LOONGSON2: /* * LOONGSON2 has 4 way icache, but when using indexed cache op, @@ -1813,8 +1824,6 @@ void r4k_cache_init(void) flush_icache_range = (void *)b5k_instruction_hazard; local_flush_icache_range = (void *)b5k_instruction_hazard; - /* Cache aliases are handled in hardware; allow HIGHMEM */ - current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES; /* Optimization: an L2 flush implicitly flushes the L1 */ current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES; diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 9ac1efcfbcc7..78f900c59276 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c @@ -161,7 +161,7 @@ static void rm7k_tc_disable(void) local_irq_save(flags); blast_rm7k_tcache(); clear_c0_config(RM7K_CONF_TE); - local_irq_save(flags); + local_irq_restore(flags); } static void rm7k_sc_disable(void) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index b5a0234da765..6397947eae4d 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -2329,9 +2329,7 @@ static void config_htw_params(void) if (CONFIG_PGTABLE_LEVELS >= 3) pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; - /* If XPA has been enabled, PTEs are 64-bit in size. */ - if (config_enabled(CONFIG_64BITS) || (read_c0_pagegrain() & PG_ELPA)) - pwsize |= 1; + pwsize |= ilog2(sizeof(pte_t)/4) << MIPS_PWSIZE_PTEW_SHIFT; write_c0_pwsize(pwsize); diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index c0c1e9529dbd..742daf8351b9 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1207,7 +1207,7 @@ void bpf_jit_compile(struct bpf_prog *fp) memset(&ctx, 0, sizeof(ctx)); - ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL); + ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); if (ctx.offsets == NULL) return; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 63741f2e8d01..d72f00310683 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -604,7 +604,7 @@ void __init mem_init(void) > BITS_PER_LONG); high_memory = __va((max_pfn << PAGE_SHIFT)); - set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); + set_max_mapnr(max_low_pfn); free_all_bootmem(); #ifdef CONFIG_PA11 diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index da3c4c3f4ec8..d4936615a756 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled, mce->in_use = 1; mce->initiator = MCE_INITIATOR_CPU; - if (handled) + /* Mark it recovered if we have handled it and MSR(RI=1). */ + if (handled && (regs->msr & MSR_RI)) mce->disposition = MCE_DISPOSITION_RECOVERED; else mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index a38d7293460d..985b5be3bcf6 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -82,10 +82,16 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) const __be32 *addrs; u32 i; int proplen; + bool mark_unset = false; addrs = of_get_property(node, "assigned-addresses", &proplen); - if (!addrs) - return; + if (!addrs || !proplen) { + addrs = of_get_property(node, "reg", &proplen); + if (!addrs || !proplen) + return; + mark_unset = true; + } + pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs); for (; proplen >= 20; proplen -= 20, addrs += 5) { flags = pci_parse_of_flags(of_read_number(addrs, 1), 0); @@ -110,6 +116,8 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) continue; } res->flags = flags; + if (mark_unset) + res->flags |= IORESOURCE_UNSET; res->name = pci_name(dev); region.start = base; region.end = base + size - 1; diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 2d2860711e07..55e831238485 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -352,8 +352,6 @@ _GLOBAL(__tm_recheckpoint) */ subi r7, r7, STACK_FRAME_OVERHEAD - SET_SCRATCH0(r1) - mfmsr r6 /* R4 = original MSR to indicate whether thread used FP/Vector etc. */ @@ -482,6 +480,7 @@ restore_gprs: * until we turn MSR RI back on. */ + SET_SCRATCH0(r1) ld r5, -8(r1) ld r1, -16(r1) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index d3787618315f..56125dbdb1e2 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -401,6 +401,7 @@ static int opal_recover_mce(struct pt_regs *regs, if (!(regs->msr & MSR_RI)) { /* If MSR_RI isn't set, we cannot recover */ + pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n"); recovered = 0; } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { /* Platform corrected itself */ diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 575c9afeba9b..217b60246cbb 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -331,7 +331,8 @@ sysenter_past_esp: * Return back to the vDSO, which will pop ecx and edx. * Don't bother with DS and ES (they already contain __USER_DS). */ - ENABLE_INTERRUPTS_SYSEXIT + sti + sysexit .pushsection .fixup, "ax" 2: movl $0, PT_FS(%esp) @@ -554,11 +555,6 @@ ENTRY(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) END(native_iret) - -ENTRY(native_irq_enable_sysexit) - sti - sysexit -END(native_irq_enable_sysexit) #endif ENTRY(overflow) diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index 3d1ec41ae09a..20370c6db74b 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h @@ -6,8 +6,6 @@ #ifndef _ASM_X86_MACH_DEFAULT_APM_H #define _ASM_X86_MACH_DEFAULT_APM_H -#include <asm/nospec-branch.h> - #ifdef APM_ZERO_SEGS # define APM_DO_ZERO_SEGS \ "pushl %%ds\n\t" \ @@ -33,7 +31,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ - firmware_restrict_branch_speculation_start(); __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" @@ -46,7 +43,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, "=S" (*esi) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); - firmware_restrict_branch_speculation_end(); } static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, @@ -59,7 +55,6 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ - firmware_restrict_branch_speculation_start(); __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" @@ -72,7 +67,6 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, "=S" (si) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); - firmware_restrict_branch_speculation_end(); return error; } diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h index fe884e18fa6e..c7854a098b6b 100644 --- a/arch/x86/include/asm/dma.h +++ b/arch/x86/include/asm/dma.h @@ -73,7 +73,7 @@ #define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT) /* 4GB broken PCI/AGP hardware bus master zone */ -#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) #ifdef CONFIG_X86_32 /* The maximum address that we can perform a DMA transfer to on this platform */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index c759b3cca663..b4c5099cafee 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -938,13 +938,6 @@ extern void default_banner(void); push %ecx; push %edx; \ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ pop %edx; pop %ecx - -#define ENABLE_INTERRUPTS_SYSEXIT \ - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ - CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) - - #else /* !CONFIG_X86_32 */ /* diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 3d44191185f8..cc0e5a666c9e 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -162,15 +162,6 @@ struct pv_cpu_ops { u64 (*read_pmc)(int counter); -#ifdef CONFIG_X86_32 - /* - * Atomically enable interrupts and return to userspace. This - * is only used in 32-bit kernels. 64-bit kernels use - * usergs_sysret32 instead. - */ - void (*irq_enable_sysexit)(void); -#endif - /* * Switch to usermode gs and return to 64-bit usermode using * sysret. Only used in 64-bit kernels to return to 64-bit diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 58505f01962f..743bd2d77e51 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -54,8 +54,13 @@ /* * Initialize the stackprotector canary value. * - * NOTE: this must only be called from functions that never return, + * NOTE: this must only be called from functions that never return * and it must always be inlined. + * + * In addition, it should be called from a compilation unit for which + * stack protector is disabled. Alternatively, the caller should not end + * with a function call which gets tail-call optimized as that would + * lead to checking a modified canary value. */ static __always_inline void boot_init_stack_canary(void) { diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 4a139465f1d4..7554075414d4 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -648,9 +648,9 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode) l = li; } addr1 = (base << shift) + - f * (unsigned long)(1 << m_io); + f * (1ULL << m_io); addr2 = (base << shift) + - (l + 1) * (unsigned long)(1 << m_io); + (l + 1) * (1ULL << m_io); pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", id, fi, li, lnasid, addr1, addr2); if (max_io < l) diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 052c9c3026cc..dfdbe01ef9f2 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -239,6 +239,7 @@ #include <asm/olpc.h> #include <asm/paravirt.h> #include <asm/reboot.h> +#include <asm/nospec-branch.h> #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) extern int (*console_blank_hook)(int); @@ -613,11 +614,13 @@ static long __apm_bios_call(void *_call) gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); + firmware_restrict_branch_speculation_start(); APM_DO_SAVE_SEGS; apm_bios_call_asm(call->func, call->ebx, call->ecx, &call->eax, &call->ebx, &call->ecx, &call->edx, &call->esi); APM_DO_RESTORE_SEGS; + firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); @@ -689,10 +692,12 @@ static long __apm_bios_call_simple(void *_call) gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); + firmware_restrict_branch_speculation_start(); APM_DO_SAVE_SEGS; error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, &call->eax); APM_DO_RESTORE_SEGS; + firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 439df975bc7a..84a7524b202c 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -65,9 +65,6 @@ void common(void) { OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); OFFSET(PV_CPU_iret, pv_cpu_ops, iret); -#ifdef CONFIG_X86_32 - OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); -#endif OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); #endif diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index fbf2edc3eb35..b983d3dc4e6c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1550,6 +1550,7 @@ static void __init filter_events(struct attribute **attrs) { struct device_attribute *d; struct perf_pmu_events_attr *pmu_attr; + int offset = 0; int i, j; for (i = 0; attrs[i]; i++) { @@ -1558,7 +1559,7 @@ static void __init filter_events(struct attribute **attrs) /* str trumps id */ if (pmu_attr->event_str) continue; - if (x86_pmu.event_map(i)) + if (x86_pmu.event_map(i + offset)) continue; for (j = i; attrs[j]; j++) @@ -1566,6 +1567,14 @@ static void __init filter_events(struct attribute **attrs) /* Check the shifted attr. */ i--; + + /* + * event_map() is index based, the attrs array is organized + * by increasing event index. If we shift the events, then + * we need to compensate for the event_map(), otherwise + * we are looking up the wrong event in the map + */ + offset++; } } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 632195b41688..2cd05f34c0b6 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -168,9 +168,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, ret = paravirt_patch_ident_64(insnbuf, len); else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || -#ifdef CONFIG_X86_32 - type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || -#endif type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) /* If operation requires a jmp, then jmp */ @@ -226,7 +223,6 @@ static u64 native_steal_clock(int cpu) /* These are in entry.S */ extern void native_iret(void); -extern void native_irq_enable_sysexit(void); extern void native_usergs_sysret32(void); extern void native_usergs_sysret64(void); @@ -385,9 +381,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .load_sp0 = native_load_sp0, -#if defined(CONFIG_X86_32) - .irq_enable_sysexit = native_irq_enable_sysexit, -#endif #ifdef CONFIG_X86_64 #ifdef CONFIG_IA32_EMULATION .usergs_sysret32 = native_usergs_sysret32, diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index c89f50a76e97..158dc0650d5d 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -5,7 +5,6 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); DEF_NATIVE(pv_cpu_ops, iret, "iret"); -DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit"); DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); @@ -46,7 +45,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, PATCH_SITE(pv_irq_ops, restore_fl); PATCH_SITE(pv_irq_ops, save_fl); PATCH_SITE(pv_cpu_ops, iret); - PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); PATCH_SITE(pv_mmu_ops, read_cr2); PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 0677bf8d3a42..03c6a8cf33c4 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -12,7 +12,6 @@ DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); DEF_NATIVE(pv_cpu_ops, clts, "clts"); DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); -DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit"); DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 618565fecb1c..1a79d451cd34 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -128,7 +128,7 @@ void release_thread(struct task_struct *dead_task) if (dead_task->mm->context.ldt) { pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", dead_task->comm, - dead_task->mm->context.ldt, + dead_task->mm->context.ldt->entries, dead_task->mm->context.ldt->size); BUG(); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c017f1c71560..0512af683871 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -243,6 +243,14 @@ static void notrace start_secondary(void *unused) wmb(); cpu_startup_entry(CPUHP_ONLINE); + + /* + * Prevent tail call to cpu_startup_entry() because the stack protector + * guard has been changed a couple of function calls up, in + * boot_init_stack_canary() and must not be checked before tail calling + * another function. + */ + prevent_tail_call_optimization(); } void __init smp_store_boot_cpu_info(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3adc255e69cb..aac60d1605ff 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2941,7 +2941,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; - if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) + if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 82fd84d5e1aa..79aff24eed65 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1240,10 +1240,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .iret = xen_iret, #ifdef CONFIG_X86_64 - .usergs_sysret32 = xen_sysret32, .usergs_sysret64 = xen_sysret64, -#else - .irq_enable_sysexit = xen_sysexit, #endif .load_tr_desc = paravirt_nop, diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 29e50d1229bc..ee48506ca151 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -116,6 +116,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu) #endif cpu_bringup(); cpu_startup_entry(CPUHP_ONLINE); + prevent_tail_call_optimization(); } static void xen_smp_intr_free(unsigned int cpu) diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index fd92a64d748e..feb6d40a0860 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -35,20 +35,6 @@ check_events: ret /* - * We can't use sysexit directly, because we're not running in ring0. - * But we can easily fake it up using iret. Assuming xen_sysexit is - * jumped to with a standard stack frame, we can just strip it back to - * a standard iret frame and use iret. - */ -ENTRY(xen_sysexit) - movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */ - orl $X86_EFLAGS_IF, PT_EFLAGS(%esp) - lea PT_EIP(%esp), %esp - - jmp xen_iret -ENDPROC(xen_sysexit) - -/* * This is run where a normal iret would be run, with the same stack setup: * 8: eflags * 4: cs diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 1399423f3418..4140b070f2e9 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -139,9 +139,6 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long); /* These are not functions, and cannot be called normally */ __visible void xen_iret(void); -#ifdef CONFIG_X86_32 -__visible void xen_sysexit(void); -#endif __visible void xen_sysret32(void); __visible void xen_sysret64(void); __visible void xen_adjust_exception_frame(void); diff --git a/block/blk-core.c b/block/blk-core.c index ea1f8715c8f7..96433980aee4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -728,6 +728,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) kobject_init(&q->kobj, &blk_queue_ktype); +#ifdef CONFIG_BLK_DEV_IO_TRACE + mutex_init(&q->blk_trace_mutex); +#endif mutex_init(&q->sysfs_lock); spin_lock_init(&q->__queue_lock); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index a07ca3488d96..c1c654319287 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -481,6 +481,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, struct blk_mq_hw_ctx *hctx; int i; + /* + * Avoid potential races with things like queue removal. + */ + if (!percpu_ref_tryget(&q->q_usage_counter)) + return; queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; @@ -497,7 +502,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, false); } - + blk_queue_exit(q); } static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) diff --git a/block/blk-mq.c b/block/blk-mq.c index d65ddc18d7e4..18d0a0270470 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -628,6 +628,22 @@ static void blk_mq_rq_timer(unsigned long priv) }; int i; + /* A deadlock might occur if a request is stuck requiring a + * timeout at the same time a queue freeze is waiting + * completion, since the timeout code would not be able to + * acquire the queue reference here. + * + * That's why we don't use blk_queue_enter here; instead, we use + * percpu_ref_tryget directly, because we need to be able to + * obtain a reference even in the short window between the queue + * starting to freeze, by dropping the first reference in + * blk_mq_freeze_queue_start, and the moment the last request is + * consumed, marked by the instant q_usage_counter reaches + * zero. + */ + if (!percpu_ref_tryget(&q->q_usage_counter)) + return; + blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); if (data.next_set) { @@ -642,6 +658,7 @@ static void blk_mq_rq_timer(unsigned long priv) blk_mq_tag_idle(hctx); } } + blk_queue_exit(q); } /* @@ -1491,7 +1508,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, int to_do; void *p; - while (left < order_to_size(this_order - 1) && this_order) + while (this_order && left < order_to_size(this_order - 1)) this_order--; do { diff --git a/block/blk-timeout.c b/block/blk-timeout.c index aa40aa93381b..2bc03df554a6 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -134,6 +134,8 @@ void blk_rq_timed_out_timer(unsigned long data) struct request *rq, *tmp; int next_set = 0; + if (blk_queue_enter(q, GFP_NOWAIT)) + return; spin_lock_irqsave(q->queue_lock, flags); list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) @@ -143,6 +145,7 @@ void blk_rq_timed_out_timer(unsigned long data) mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); + blk_queue_exit(q); } /** diff --git a/crypto/lrw.c b/crypto/lrw.c index d38a382b09eb..fc3d4fec8ddd 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -377,7 +377,7 @@ out_put_alg: return inst; } -static void free(struct crypto_instance *inst) +static void free_inst(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); @@ -386,7 +386,7 @@ static void free(struct crypto_instance *inst) static struct crypto_template crypto_tmpl = { .name = "lrw", .alloc = alloc, - .free = free, + .free = free_inst, .module = THIS_MODULE, }; diff --git a/crypto/xts.c b/crypto/xts.c index f6fd43f100c8..4ee09c440d12 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -334,7 +334,7 @@ out_put_alg: return inst; } -static void free(struct crypto_instance *inst) +static void free_inst(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); @@ -343,7 +343,7 @@ static void free(struct crypto_instance *inst) static struct crypto_template crypto_tmpl = { .name = "xts", .alloc = alloc, - .free = free, + .free = free_inst, .module = THIS_MODULE, }; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index e54e6170981b..a17337fd8f37 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -704,8 +704,13 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, } switch (action) { - case BUS_NOTIFY_ADD_DEVICE: + case BUS_NOTIFY_BOUND_DRIVER: pdev->dev.pm_domain = &acpi_lpss_pm_domain; + break; + case BUS_NOTIFY_UNBOUND_DRIVER: + pdev->dev.pm_domain = NULL; + break; + case BUS_NOTIFY_ADD_DEVICE: if (pdata->dev_desc->flags & LPSS_LTR) return sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group); @@ -713,7 +718,6 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, case BUS_NOTIFY_DEL_DEVICE: if (pdata->dev_desc->flags & LPSS_LTR) sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); - pdev->dev.pm_domain = NULL; break; default: break; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 8c5503c0bad7..0936b68eff80 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -289,17 +289,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"), }, }, - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */ - /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */ - .callback = video_detect_force_native, - .ident = "HP Pavilion dv6", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"), - }, - }, - { }, }; diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 902034991517..7a7faca0ddcd 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -924,15 +924,13 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap, struct ata_taskfile *tf, u8 tag, u32 cmd_issued) { - unsigned long flags; struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, ata_get_cmd_descript(tf->command), tag); - spin_lock_irqsave(&ap->host->lock, flags); hsdevp->cmd_issued[tag] = cmd_issued; - spin_unlock_irqrestore(&ap->host->lock, flags); + /* * Clear SError before executing a new command. * sata_dwc_scr_write and read can not be used here. Clearing the PM diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index c1093c0d4dea..991d07b219cb 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -1303,15 +1303,17 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device, /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(struct fw_desc *desc) { - struct firmware *fw; + struct firmware *fw = NULL; long timeout; int ret; if (!desc->firmware_p) return -EINVAL; - if (!desc->name || desc->name[0] == '\0') - return -EINVAL; + if (!desc->name || desc->name[0] == '\0') { + ret = -EINVAL; + goto out; + } ret = _request_firmware_prepare(&fw, desc); if (ret <= 0) /* error or already assigned */ diff --git a/drivers/base/isa.c b/drivers/base/isa.c index 901d8185309e..372d10af2600 100644 --- a/drivers/base/isa.c +++ b/drivers/base/isa.c @@ -180,4 +180,4 @@ static int __init isa_bus_init(void) return error; } -device_initcall(isa_bus_init); +postcore_initcall(isa_bus_init); diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 71ea2a3af293..9c6843af1c6b 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1112,7 +1112,8 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) */ if (btmrvl_sdio_verify_fw_download(card, pollnum)) { BT_ERR("FW failed to be active in time!"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto done; } sdio_release_host(card->func); diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c index 7ba0ae060d61..66115ef979b1 100644 --- a/drivers/char/hw_random/exynos-rng.c +++ b/drivers/char/hw_random/exynos-rng.c @@ -155,6 +155,14 @@ static int exynos_rng_probe(struct platform_device *pdev) return ret; } +static int exynos_rng_remove(struct platform_device *pdev) +{ + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -212,6 +220,7 @@ static struct platform_driver exynos_rng_driver = { .of_match_table = exynos_rng_dt_match, }, .probe = exynos_rng_probe, + .remove = exynos_rng_remove, }; module_platform_driver(exynos_rng_driver); diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c index 335322dc403f..9bc801f3a7ba 100644 --- a/drivers/clk/clk-gpio.c +++ b/drivers/clk/clk-gpio.c @@ -287,12 +287,14 @@ static void __init of_gpio_clk_setup(struct device_node *node, const char **parent_names; int i, num_parents; + num_parents = of_clk_get_parent_count(node); + if (num_parents < 0) + return; + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return; - num_parents = of_clk_get_parent_count(node); - parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL); if (!parent_names) return; diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c index fe7806506bf3..e9fb8a111f71 100644 --- a/drivers/clk/clk-multiplier.c +++ b/drivers/clk/clk-multiplier.c @@ -54,14 +54,28 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, u8 width, unsigned long flags) { + struct clk_multiplier *mult = to_clk_multiplier(hw); unsigned long orig_parent_rate = *best_parent_rate; unsigned long parent_rate, current_rate, best_rate = ~0; unsigned int i, bestmult = 0; + unsigned int maxmult = (1 << width) - 1; + + if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { + bestmult = rate / orig_parent_rate; + + /* Make sure we don't end up with a 0 multiplier */ + if ((bestmult == 0) && + !(mult->flags & CLK_MULTIPLIER_ZERO_BYPASS)) + bestmult = 1; - if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) - return rate / *best_parent_rate; + /* Make sure we don't overflow the multiplier */ + if (bestmult > maxmult) + bestmult = maxmult; + + return bestmult; + } - for (i = 1; i < ((1 << width) - 1); i++) { + for (i = 1; i < maxmult; i++) { if (rate == orig_parent_rate * i) { /* * This is the best case for us if we have a diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c index b134a8b15e2c..5fea58713293 100644 --- a/drivers/clk/clk-xgene.c +++ b/drivers/clk/clk-xgene.c @@ -218,22 +218,20 @@ static int xgene_clk_enable(struct clk_hw *hw) struct xgene_clk *pclk = to_xgene_clk(hw); unsigned long flags = 0; u32 data; - phys_addr_t reg; if (pclk->lock) spin_lock_irqsave(pclk->lock, flags); if (pclk->param.csr_reg != NULL) { pr_debug("%s clock enabled\n", clk_hw_get_name(hw)); - reg = __pa(pclk->param.csr_reg); /* First enable the clock */ data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_clk_offset); data |= pclk->param.reg_clk_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_clk_offset); - pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n", - clk_hw_get_name(hw), ®, + pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n", + clk_hw_get_name(hw), pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, data); @@ -243,8 +241,8 @@ static int xgene_clk_enable(struct clk_hw *hw) data &= ~pclk->param.reg_csr_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_csr_offset); - pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n", - clk_hw_get_name(hw), ®, + pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n", + clk_hw_get_name(hw), pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, data); } diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index 6addf8f58b97..cbecbd584624 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -76,9 +76,9 @@ static int clk_pllv3_prepare(struct clk_hw *hw) val = readl_relaxed(pll->base); if (pll->powerup_set) - val |= BM_PLL_POWER; + val |= pll->powerdown; else - val &= ~BM_PLL_POWER; + val &= ~pll->powerdown; writel_relaxed(val, pll->base); return clk_pllv3_wait_lock(pll); @@ -91,9 +91,9 @@ static void clk_pllv3_unprepare(struct clk_hw *hw) val = readl_relaxed(pll->base); if (pll->powerup_set) - val &= ~BM_PLL_POWER; + val &= ~pll->powerdown; else - val |= BM_PLL_POWER; + val |= pll->powerdown; writel_relaxed(val, pll->base); } diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index 2b289581d570..b513a2bbfcc5 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw, #define ROCKCHIP_MMC_DEGREE_MASK 0x3 #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) -#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1 -#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1 #define PSECS_PER_SEC 1000000000000LL @@ -183,15 +181,6 @@ struct clk *rockchip_clk_register_mmc(const char *name, mmc_clock->reg = reg; mmc_clock->shift = shift; - /* - * Assert init_state to soft reset the CLKGEN - * for mmc tuning phase and degree - */ - if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT) - writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET, - ROCKCHIP_MMC_INIT_STATE_RESET, - mmc_clock->shift), mmc_clock->reg); - clk = clk_register(NULL, &mmc_clock->hw); if (IS_ERR(clk)) goto err_free; diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index 576cd0354d48..ccb324d97160 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -549,19 +549,20 @@ static int clk_fs660c32_vco_get_params(unsigned long input, return 0; } -static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, unsigned long rate - , unsigned long *prate) +static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *prate) { struct stm_fs params; - if (!clk_fs660c32_vco_get_params(*prate, rate, ¶ms)) - clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate); + if (clk_fs660c32_vco_get_params(*prate, rate, ¶ms)) + return rate; - pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", + clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate); + + pr_debug("%s: %s new rate %ld [ndiv=%u]\n", __func__, clk_hw_get_name(hw), - rate, (unsigned int)params.sdiv, - (unsigned int)params.mdiv, - (unsigned int)params.pe, (unsigned int)params.nsdiv); + rate, (unsigned int)params.ndiv); return rate; } diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c index 0e9119fae760..fa53bf6bd041 100644 --- a/drivers/clk/ti/dpll3xxx.c +++ b/drivers/clk/ti/dpll3xxx.c @@ -437,7 +437,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw) parent = clk_hw_get_parent(hw); - if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) { + if (clk_hw_get_rate(hw) == + clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) { WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); r = _omap3_noncore_dpll_bypass(clk); } else { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 496a73e5bc29..ce93f49e03a0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2285,10 +2285,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, return ret; } - up_write(&policy->rwsem); ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); - down_write(&policy->rwsem); - if (ret) { pr_err("%s: Failed to Exit Governor: %s (%d)\n", __func__, old_gov->name, ret); @@ -2304,9 +2301,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, if (!ret) goto out; - up_write(&policy->rwsem); __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); - down_write(&policy->rwsem); } /* new governor failed, so re-start old one */ diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 17521fcf226f..3cca3055ebd4 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -2439,7 +2439,13 @@ static struct platform_driver edma_driver = { }, }; +static int edma_tptc_probe(struct platform_device *pdev) +{ + return 0; +} + static struct platform_driver edma_tptc_driver = { + .probe = edma_tptc_probe, .driver = { .name = "edma3-tptc", .of_match_table = edma_tptc_of_ids, diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 3df0422607d5..ac9aede1bfbe 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -364,6 +364,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, size); tdmac->desc_arr = NULL; + if (tdmac->status == DMA_ERROR) + tdmac->status = DMA_COMPLETE; return; } diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 113605f6fe20..32517003e118 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -877,6 +877,7 @@ static int pch_dma_probe(struct pci_dev *pdev, } pci_set_master(pdev); + pd->dma.dev = &pdev->dev; err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); if (err) { @@ -892,7 +893,6 @@ static int pch_dma_probe(struct pci_dev *pdev, goto err_free_irq; } - pd->dma.dev = &pdev->dev; INIT_LIST_HEAD(&pd->dma.channels); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e449f22c8f29..edec88b21ff1 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1256,7 +1256,7 @@ retry: goto fail; plane = mode_set->crtc->primary; - plane_mask |= drm_plane_index(plane); + plane_mask |= (1 << drm_plane_index(plane)); plane->old_fb = plane->fb; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index ce5adef21a00..ec8fe4bebb3a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -289,14 +289,15 @@ static void put_pages(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->pages) { - if (msm_obj->flags & MSM_BO_LOCKED) { - unprotect_pages(msm_obj); - msm_obj->flags &= ~MSM_BO_LOCKED; - } + if (msm_obj->sgt) { + if (msm_obj->flags & MSM_BO_LOCKED) { + unprotect_pages(msm_obj); + msm_obj->flags &= ~MSM_BO_LOCKED; + } - if (msm_obj->sgt) sg_free_table(msm_obj->sgt); - kfree(msm_obj->sgt); + kfree(msm_obj->sgt); + } if (use_pages(obj)) { if (msm_obj->flags & MSM_BO_SVM) { diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index d8066ac1e764..ae09f004a33f 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -529,8 +529,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, /* no need to add a release to the fence for this surface bo, since it is only released when we ask to destroy the surface and it would never signal otherwise */ - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); surf->hw_surf_alloc = true; spin_lock(&qdev->surf_id_idr_lock); @@ -572,9 +572,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, cmd->surface_id = id; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); - qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5edebf495c07..0d6cc396cc16 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -292,8 +292,8 @@ qxl_hide_cursor(struct qxl_device *qdev) cmd->type = QXL_CURSOR_HIDE; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); return 0; } @@ -390,8 +390,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, cmd->u.set.visible = 1; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); /* finish with the userspace bo */ ret = qxl_bo_reserve(user_bo, false); @@ -450,8 +450,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 6e6c76080d6a..47da124b7ebf 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -245,8 +245,8 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, qxl_bo_physical_address(qdev, dimage->bo, 0); qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_free_palette: if (palette_bo) @@ -352,9 +352,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, goto out_release_backoff; rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); - if (!rects) + if (!rects) { + ret = -EINVAL; goto out_release_backoff; - + } drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable->clip.type = SPICE_CLIP_TYPE_RECTS; @@ -385,8 +386,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, } qxl_bo_kunmap(clips_bo); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_release_backoff: if (ret) @@ -436,8 +437,8 @@ void qxl_draw_copyarea(struct qxl_device *qdev, drawable->u.copy_bits.src_pos.y = sy; qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_free_release: if (ret) @@ -480,8 +481,8 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_free_release: if (ret) diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index 7fbcc35e8ad3..c89c10055641 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -210,7 +210,8 @@ qxl_image_init_helper(struct qxl_device *qdev, break; default: DRM_ERROR("unsupported image bit depth\n"); - return -EINVAL; /* TODO: cleanup */ + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); + return -EINVAL; } image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; image->u.bitmap.x = width; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 7c2e78201ead..4d852449a7d1 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -257,11 +257,8 @@ static int qxl_process_single_command(struct qxl_device *qdev, apply_surf_reloc(qdev, &reloc_info[i]); } + qxl_release_fence_buffer_objects(release); ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); - if (ret) - qxl_release_backoff_reserve_list(release); - else - qxl_release_fence_buffer_objects(release); out_free_bos: out_free_release: diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index ef4120f4b173..e4ac19eeeae0 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2119,7 +2119,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, { HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 020a4bfa2629..1721449541e4 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -319,6 +319,7 @@ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002 #define USB_VENDOR_ID_ELAN 0x04f3 @@ -906,7 +907,6 @@ #define USB_VENDOR_ID_VALVE 0x28de #define USB_DEVICE_ID_STEAM_CONTROLLER 0x1102 #define USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS 0x1142 -#define USB_DEVICE_ID_STEAM_CONTROLLER_BT 0x1106 #define USB_VENDOR_ID_STEELSERIES 0x1038 #define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 9de379c1b3fd..56c4a81d3ea2 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1300,6 +1300,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, + { .driver_data = MT_CLS_EGALAX, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 44e1eefc5b24..a4a6c90c8134 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev, if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver connected"); + /* If using a wireless adaptor ask for connection status */ + steam->connected = false; steam_request_conn_status(steam); } else { + /* A wired connection is always present */ + steam->connected = true; ret = steam_register(steam); if (ret) { hid_err(hdev, diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index 1505ee6e6605..24b2766a6d34 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c @@ -80,11 +80,10 @@ static ssize_t hvt_op_write(struct file *file, const char __user *buf, hvt = container_of(file->f_op, struct hvutil_transport, fops); - inmsg = kzalloc(count, GFP_KERNEL); - if (copy_from_user(inmsg, buf, count)) { - kfree(inmsg); - return -EFAULT; - } + inmsg = memdup_user(buf, count); + if (IS_ERR(inmsg)) + return PTR_ERR(inmsg); + if (hvt->on_msg(inmsg, count)) return -EFAULT; kfree(inmsg); diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index e56b774e7cf9..7584f292e2fd 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -22,6 +22,7 @@ /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */ +#include <linux/cdev.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> @@ -46,10 +47,11 @@ struct i2c_dev { struct list_head list; struct i2c_adapter *adap; - struct device *dev; + struct device dev; + struct cdev cdev; }; -#define I2C_MINORS 256 +#define I2C_MINORS MINORMASK static LIST_HEAD(i2c_dev_list); static DEFINE_SPINLOCK(i2c_dev_list_lock); @@ -89,12 +91,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) return i2c_dev; } -static void return_i2c_dev(struct i2c_dev *i2c_dev) +static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) { spin_lock(&i2c_dev_list_lock); list_del(&i2c_dev->list); spin_unlock(&i2c_dev_list_lock); - kfree(i2c_dev); + if (del_cdev) + cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); + put_device(&i2c_dev->dev); } static ssize_t name_show(struct device *dev, @@ -490,13 +494,8 @@ static int i2cdev_open(struct inode *inode, struct file *file) unsigned int minor = iminor(inode); struct i2c_client *client; struct i2c_adapter *adap; - struct i2c_dev *i2c_dev; - - i2c_dev = i2c_dev_get_by_minor(minor); - if (!i2c_dev) - return -ENODEV; - adap = i2c_get_adapter(i2c_dev->adap->nr); + adap = i2c_get_adapter(minor); if (!adap) return -ENODEV; @@ -545,6 +544,14 @@ static const struct file_operations i2cdev_fops = { static struct class *i2c_dev_class; +static void i2cdev_dev_release(struct device *dev) +{ + struct i2c_dev *i2c_dev; + + i2c_dev = container_of(dev, struct i2c_dev, dev); + kfree(i2c_dev); +} + static int i2cdev_attach_adapter(struct device *dev, void *dummy) { struct i2c_adapter *adap; @@ -559,21 +566,25 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) if (IS_ERR(i2c_dev)) return PTR_ERR(i2c_dev); - /* register this i2c device with the driver core */ - i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, - MKDEV(I2C_MAJOR, adap->nr), NULL, - "i2c-%d", adap->nr); - if (IS_ERR(i2c_dev->dev)) { - res = PTR_ERR(i2c_dev->dev); - goto error; + cdev_init(&i2c_dev->cdev, &i2cdev_fops); + i2c_dev->cdev.owner = THIS_MODULE; + + device_initialize(&i2c_dev->dev); + i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); + i2c_dev->dev.class = i2c_dev_class; + i2c_dev->dev.parent = &adap->dev; + i2c_dev->dev.release = i2cdev_dev_release; + dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); + + res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); + if (res) { + put_i2c_dev(i2c_dev, false); + return res; } pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", adap->name, adap->nr); return 0; -error: - return_i2c_dev(i2c_dev); - return res; } static int i2cdev_detach_adapter(struct device *dev, void *dummy) @@ -589,8 +600,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) if (!i2c_dev) /* attach_adapter must have failed */ return 0; - return_i2c_dev(i2c_dev); - device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); + put_i2c_dev(i2c_dev, true); pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); return 0; @@ -627,7 +637,7 @@ static int __init i2c_dev_init(void) printk(KERN_INFO "i2c /dev entries driver\n"); - res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops); + res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c"); if (res) goto out; @@ -651,7 +661,7 @@ static int __init i2c_dev_init(void) out_unreg_class: class_destroy(i2c_dev_class); out_unreg_chrdev: - unregister_chrdev(I2C_MAJOR, "i2c"); + unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); out: printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__); return res; @@ -662,7 +672,7 @@ static void __exit i2c_dev_exit(void) bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); i2c_for_each_dev(NULL, i2cdev_detach_adapter); class_destroy(i2c_dev_class); - unregister_chrdev(I2C_MAJOR, "i2c"); + unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index 91d34ed756ea..fe0c5a155e21 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -579,7 +579,7 @@ static const struct iio_info ad7797_info = { .read_raw = &ad7793_read_raw, .write_raw = &ad7793_write_raw, .write_raw_get_fmt = &ad7793_write_raw_get_fmt, - .attrs = &ad7793_attribute_group, + .attrs = &ad7797_attribute_group, .validate_trigger = ad_sd_validate_trigger, .driver_module = THIS_MODULE, }; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 68835de07e07..a8349100854e 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -293,9 +293,9 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, fl6.saddr = src_in->sin6_addr; fl6.flowi6_oif = addr->bound_dev_if; - ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); - if (ret < 0) - goto put; + dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); if (ipv6_addr_any(&fl6.saddr)) { ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index de1c61b417d6..ada2e5009c86 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -327,7 +327,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) kfree(cq->sw_queue); dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), (1UL << (cq->size_log2)) - * sizeof(struct t3_cqe), cq->queue, + * sizeof(struct t3_cqe) + 1, cq->queue, dma_unmap_addr(cq, mapping)); cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); return err; diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index fc21bdbb8b32..005ea5524e09 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -107,6 +107,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr return ERR_PTR(ret); ah->av.eth.gid_index = ret; ah->av.eth.vlan = cpu_to_be16(vlan_tag); + ah->av.eth.hop_limit = ah_attr->grh.hop_limit; if (ah_attr->static_rate) { ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 348828271cb0..ecd461ee6dbe 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2156,6 +2156,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, int send_size; int header_size; int spc; + int err; int i; if (wr->wr.opcode != IB_WR_SEND) @@ -2190,7 +2191,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, sqp->ud_header.lrh.virtual_lane = 0; sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); - ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); + if (err) + return err; sqp->ud_header.bth.pkey = cpu_to_be16(pkey); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); @@ -2423,9 +2426,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, } sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) - ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, + &pkey); else - ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, + &pkey); + if (err) + return err; + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index dbd5adc62c3f..1b731ab63ede 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -905,7 +905,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, { struct mlx5_ib_dev *dev = container_of(device, struct mlx5_ib_dev, ib_dev.dev); - return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), + return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index eac5f5eff8d2..b8e71187c700 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -271,8 +271,10 @@ static int sq_overhead(enum ib_qp_type qp_type) /* fall through */ case IB_QPT_RC: size += sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_atomic_seg) + - sizeof(struct mlx5_wqe_raddr_seg); + max(sizeof(struct mlx5_wqe_atomic_seg) + + sizeof(struct mlx5_wqe_raddr_seg), + sizeof(struct mlx5_wqe_umr_ctrl_seg) + + sizeof(struct mlx5_mkey_seg)); break; case IB_QPT_XRC_TGT: @@ -280,9 +282,9 @@ static int sq_overhead(enum ib_qp_type qp_type) case IB_QPT_UC: size += sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_raddr_seg) + - sizeof(struct mlx5_wqe_umr_ctrl_seg) + - sizeof(struct mlx5_mkey_seg); + max(sizeof(struct mlx5_wqe_raddr_seg), + sizeof(struct mlx5_wqe_umr_ctrl_seg) + + sizeof(struct mlx5_mkey_seg)); break; case IB_QPT_UD: diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 3ae82202cdb5..b33565f4409f 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -703,7 +703,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping linkcontrol sysfs info, (err %d) port %u\n", ret, port_num); - goto bail; + goto bail_link; } kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); @@ -713,7 +713,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping sl2vl sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_link; + goto bail_sl; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); @@ -723,7 +723,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping diag_counters sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sl; + goto bail_diagc; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); @@ -736,7 +736,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_diagc; + goto bail_cc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); @@ -818,6 +818,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) &cc_table_bin_attr); kobject_put(&ppd->pport_cc_kobj); } + kobject_put(&ppd->diagc_kobj); kobject_put(&ppd->sl2vl_kobj); kobject_put(&ppd->pport_kobj); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index ffd88af80de3..bf39ce88360a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -945,19 +945,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) */ priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; - - /* - * Update the broadcast address in the priv->broadcast object, - * in case it already exists, otherwise no one will do that. - */ - if (priv->broadcast) { - spin_lock_irq(&priv->lock); - memcpy(priv->broadcast->mcmember.mgid.raw, - priv->dev->broadcast + 4, - sizeof(union ib_gid)); - spin_unlock_irq(&priv->lock); - } - return 0; } diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index e9ae3d500a55..700f018df668 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -342,20 +342,6 @@ static int evdev_fasync(int fd, struct file *file, int on) return fasync_helper(fd, file, on, &client->fasync); } -static int evdev_flush(struct file *file, fl_owner_t id) -{ - struct evdev_client *client = file->private_data; - struct evdev *evdev = client->evdev; - - mutex_lock(&evdev->mutex); - - if (evdev->exist && !client->revoked) - input_flush_device(&evdev->handle, file); - - mutex_unlock(&evdev->mutex); - return 0; -} - static void evdev_free(struct device *dev) { struct evdev *evdev = container_of(dev, struct evdev, dev); @@ -469,6 +455,10 @@ static int evdev_release(struct inode *inode, struct file *file) unsigned int i; mutex_lock(&evdev->mutex); + + if (evdev->exist && !client->revoked) + input_flush_device(&evdev->handle, file); + evdev_ungrab(evdev, client); mutex_unlock(&evdev->mutex); @@ -1331,7 +1321,6 @@ static const struct file_operations evdev_fops = { .compat_ioctl = evdev_ioctl_compat, #endif .fasync = evdev_fasync, - .flush = evdev_flush, .llseek = no_llseek, }; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 26476a64e663..54a6691d7d87 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -476,6 +476,16 @@ static const u8 xboxone_fw2015_init[] = { }; /* + * This packet is required for Xbox One S (0x045e:0x02ea) + * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to + * initialize the controller that was previously used in + * Bluetooth mode. + */ +static const u8 xboxone_s_init[] = { + 0x05, 0x20, 0x00, 0x0f, 0x06 +}; + +/* * This packet is required for the Titanfall 2 Xbox One pads * (0x0e6f:0x0165) to finish initialization and for Hori pads * (0x0f0d:0x0067) to make the analog sticks work. @@ -533,6 +543,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), + XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), + XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index c93dd193a496..6ca05326754f 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -105,7 +105,7 @@ static void gpio_keys_syscore_resume(void); * Return value of this function can be used to allocate bitmap * large enough to hold all bits for given type. */ -static inline int get_n_events_by_type(int type) +static int get_n_events_by_type(int type) { BUG_ON(type != EV_SW && type != EV_KEY); @@ -113,6 +113,22 @@ static inline int get_n_events_by_type(int type) } /** + * get_bm_events_by_type() - returns bitmap of supported events per @type + * @input: input device from which bitmap is retrieved + * @type: type of button (%EV_KEY, %EV_SW) + * + * Return value of this function can be used to allocate bitmap + * large enough to hold all bits for given type. + */ +static const unsigned long *get_bm_events_by_type(struct input_dev *dev, + int type) +{ + BUG_ON(type != EV_SW && type != EV_KEY); + + return (type == EV_KEY) ? dev->keybit : dev->swbit; +} + +/** * gpio_keys_disable_button() - disables given GPIO button * @bdata: button data for button to be disabled * @@ -222,6 +238,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, const char *buf, unsigned int type) { int n_events = get_n_events_by_type(type); + const unsigned long *bitmap = get_bm_events_by_type(ddata->input, type); unsigned long *bits; ssize_t error; int i; @@ -235,6 +252,11 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, goto out; /* First validate */ + if (!bitmap_subset(bits, bitmap, n_events)) { + error = -EINVAL; + goto out; + } + for (i = 0; i < ddata->pdata->nbuttons; i++) { struct gpio_button_data *bdata = &ddata->data[i]; @@ -248,11 +270,6 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, } } - if (i == ddata->pdata->nbuttons) { - error = -EINVAL; - goto out; - } - mutex_lock(&ddata->disable_lock); for (i = 0; i < ddata->pdata->nbuttons; i++) { diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index a4e76084a2af..fd1e79013cf8 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -738,6 +738,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, }, + { + /* Lenovo ThinkPad Twist S230u */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index a9d97d577a7e..5e535d0bbe16 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev, int error; error = device_property_read_u32(dev, "threshold", &val); - if (!error) - reg_addr->reg_threshold = val; + if (!error) { + edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val); + tsdata->threshold = val; + } error = device_property_read_u32(dev, "gain", &val); - if (!error) - reg_addr->reg_gain = val; + if (!error) { + edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val); + tsdata->gain = val; + } error = device_property_read_u32(dev, "offset", &val); - if (!error) - reg_addr->reg_offset = val; + if (!error) { + edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val); + tsdata->offset = val; + } } static void diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 2c41107240de..499402a975b3 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -197,6 +197,7 @@ static const struct usb_device_id usbtouch_devices[] = { #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH + {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES}, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 041c42fb511f..899cee22f535 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -152,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) } } -static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, +static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, dma_addr_t dma_limit) { + struct iova_domain *iovad = domain->iova_cookie; unsigned long shift = iova_shift(iovad); unsigned long length = iova_align(iovad, size) >> shift; + if (domain->geometry.force_aperture) + dma_limit = min(dma_limit, domain->geometry.aperture_end); /* * Enforce size-alignment to be safe - there could perhaps be an * attribute to control this per-device, or at least per-domain... @@ -297,7 +300,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, if (!pages) return NULL; - iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); + iova = __alloc_iova(domain, size, dev->coherent_dma_mask); if (!iova) goto out_free_pages; @@ -369,7 +372,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, phys_addr_t phys = page_to_phys(page) + offset; size_t iova_off = iova_offset(iovad, phys); size_t len = iova_align(iovad, size + iova_off); - struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); + struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); if (!iova) return DMA_ERROR_CODE; @@ -483,7 +486,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } - iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); + iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); if (!iova) goto out_restore_sg; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 449877d148da..413c038b13ba 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -209,7 +209,7 @@ again: mutex_lock(&iommu_group_mutex); ida_remove(&iommu_group_ida, group->id); mutex_unlock(&iommu_group_mutex); - kfree(group); + kobject_put(&group->kobj); return ERR_PTR(ret); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ce007d22cf35..13945aee9e01 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2202,7 +2202,7 @@ static void dm_request_fn(struct request_queue *q) goto out; delay_and_out: - blk_delay_queue(q, HZ / 100); + blk_delay_queue(q, 10); out: dm_put_live_table(md, srcu_idx); } diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 7b39440192d6..0ca9506f4654 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -24,6 +24,7 @@ #include <linux/export.h> #include <linux/ioctl.h> #include <linux/media.h> +#include <linux/slab.h> #include <linux/types.h> #include <media/media-device.h> @@ -234,7 +235,7 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct media_devnode *devnode = media_devnode_data(filp); - struct media_device *dev = to_media_device(devnode); + struct media_device *dev = devnode->media_dev; long ret; switch (cmd) { @@ -303,7 +304,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct media_devnode *devnode = media_devnode_data(filp); - struct media_device *dev = to_media_device(devnode); + struct media_device *dev = devnode->media_dev; long ret; switch (cmd) { @@ -344,7 +345,8 @@ static const struct media_file_operations media_device_fops = { static ssize_t show_model(struct device *cd, struct device_attribute *attr, char *buf) { - struct media_device *mdev = to_media_device(to_media_devnode(cd)); + struct media_devnode *devnode = to_media_devnode(cd); + struct media_device *mdev = devnode->media_dev; return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model); } @@ -372,6 +374,7 @@ static void media_device_release(struct media_devnode *mdev) int __must_check __media_device_register(struct media_device *mdev, struct module *owner) { + struct media_devnode *devnode; int ret; if (WARN_ON(mdev->dev == NULL || mdev->model[0] == 0)) @@ -382,17 +385,28 @@ int __must_check __media_device_register(struct media_device *mdev, spin_lock_init(&mdev->lock); mutex_init(&mdev->graph_mutex); + devnode = kzalloc(sizeof(*devnode), GFP_KERNEL); + if (!devnode) + return -ENOMEM; + /* Register the device node. */ - mdev->devnode.fops = &media_device_fops; - mdev->devnode.parent = mdev->dev; - mdev->devnode.release = media_device_release; - ret = media_devnode_register(&mdev->devnode, owner); - if (ret < 0) + mdev->devnode = devnode; + devnode->fops = &media_device_fops; + devnode->parent = mdev->dev; + devnode->release = media_device_release; + ret = media_devnode_register(mdev, devnode, owner); + if (ret < 0) { + /* devnode free is handled in media_devnode_*() */ + mdev->devnode = NULL; return ret; + } - ret = device_create_file(&mdev->devnode.dev, &dev_attr_model); + ret = device_create_file(&devnode->dev, &dev_attr_model); if (ret < 0) { - media_devnode_unregister(&mdev->devnode); + /* devnode free is handled in media_devnode_*() */ + mdev->devnode = NULL; + media_devnode_unregister_prepare(devnode); + media_devnode_unregister(devnode); return ret; } @@ -410,11 +424,16 @@ void media_device_unregister(struct media_device *mdev) struct media_entity *entity; struct media_entity *next; + /* Clear the devnode register bit to avoid races with media dev open */ + media_devnode_unregister_prepare(mdev->devnode); + list_for_each_entry_safe(entity, next, &mdev->entities, list) media_device_unregister_entity(entity); - device_remove_file(&mdev->devnode.dev, &dev_attr_model); - media_devnode_unregister(&mdev->devnode); + device_remove_file(&mdev->devnode->dev, &dev_attr_model); + media_devnode_unregister(mdev->devnode); + /* devnode free is handled in media_devnode_*() */ + mdev->devnode = NULL; } EXPORT_SYMBOL_GPL(media_device_unregister); diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c index ebf9626e5ae5..e887120d19aa 100644 --- a/drivers/media/media-devnode.c +++ b/drivers/media/media-devnode.c @@ -44,6 +44,7 @@ #include <linux/uaccess.h> #include <media/media-devnode.h> +#include <media/media-device.h> #define MEDIA_NUM_DEVICES 256 #define MEDIA_NAME "media" @@ -59,21 +60,19 @@ static DECLARE_BITMAP(media_devnode_nums, MEDIA_NUM_DEVICES); /* Called when the last user of the media device exits. */ static void media_devnode_release(struct device *cd) { - struct media_devnode *mdev = to_media_devnode(cd); + struct media_devnode *devnode = to_media_devnode(cd); mutex_lock(&media_devnode_lock); - - /* Delete the cdev on this minor as well */ - cdev_del(&mdev->cdev); - /* Mark device node number as free */ - clear_bit(mdev->minor, media_devnode_nums); - + clear_bit(devnode->minor, media_devnode_nums); mutex_unlock(&media_devnode_lock); /* Release media_devnode and perform other cleanups as needed. */ - if (mdev->release) - mdev->release(mdev); + if (devnode->release) + devnode->release(devnode); + + kfree(devnode); + pr_debug("%s: Media Devnode Deallocated\n", __func__); } static struct bus_type media_bus_type = { @@ -83,37 +82,37 @@ static struct bus_type media_bus_type = { static ssize_t media_read(struct file *filp, char __user *buf, size_t sz, loff_t *off) { - struct media_devnode *mdev = media_devnode_data(filp); + struct media_devnode *devnode = media_devnode_data(filp); - if (!mdev->fops->read) + if (!devnode->fops->read) return -EINVAL; - if (!media_devnode_is_registered(mdev)) + if (!media_devnode_is_registered(devnode)) return -EIO; - return mdev->fops->read(filp, buf, sz, off); + return devnode->fops->read(filp, buf, sz, off); } static ssize_t media_write(struct file *filp, const char __user *buf, size_t sz, loff_t *off) { - struct media_devnode *mdev = media_devnode_data(filp); + struct media_devnode *devnode = media_devnode_data(filp); - if (!mdev->fops->write) + if (!devnode->fops->write) return -EINVAL; - if (!media_devnode_is_registered(mdev)) + if (!media_devnode_is_registered(devnode)) return -EIO; - return mdev->fops->write(filp, buf, sz, off); + return devnode->fops->write(filp, buf, sz, off); } static unsigned int media_poll(struct file *filp, struct poll_table_struct *poll) { - struct media_devnode *mdev = media_devnode_data(filp); + struct media_devnode *devnode = media_devnode_data(filp); - if (!media_devnode_is_registered(mdev)) + if (!media_devnode_is_registered(devnode)) return POLLERR | POLLHUP; - if (!mdev->fops->poll) + if (!devnode->fops->poll) return DEFAULT_POLLMASK; - return mdev->fops->poll(filp, poll); + return devnode->fops->poll(filp, poll); } static long @@ -121,12 +120,12 @@ __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg, long (*ioctl_func)(struct file *filp, unsigned int cmd, unsigned long arg)) { - struct media_devnode *mdev = media_devnode_data(filp); + struct media_devnode *devnode = media_devnode_data(filp); if (!ioctl_func) return -ENOTTY; - if (!media_devnode_is_registered(mdev)) + if (!media_devnode_is_registered(devnode)) return -EIO; return ioctl_func(filp, cmd, arg); @@ -134,9 +133,9 @@ __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg, static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - struct media_devnode *mdev = media_devnode_data(filp); + struct media_devnode *devnode = media_devnode_data(filp); - return __media_ioctl(filp, cmd, arg, mdev->fops->ioctl); + return __media_ioctl(filp, cmd, arg, devnode->fops->ioctl); } #ifdef CONFIG_COMPAT @@ -144,9 +143,9 @@ static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static long media_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - struct media_devnode *mdev = media_devnode_data(filp); + struct media_devnode *devnode = media_devnode_data(filp); - return __media_ioctl(filp, cmd, arg, mdev->fops->compat_ioctl); + return __media_ioctl(filp, cmd, arg, devnode->fops->compat_ioctl); } #endif /* CONFIG_COMPAT */ @@ -154,7 +153,7 @@ static long media_compat_ioctl(struct file *filp, unsigned int cmd, /* Override for the open function */ static int media_open(struct inode *inode, struct file *filp) { - struct media_devnode *mdev; + struct media_devnode *devnode; int ret; /* Check if the media device is available. This needs to be done with @@ -164,23 +163,24 @@ static int media_open(struct inode *inode, struct file *filp) * a crash. */ mutex_lock(&media_devnode_lock); - mdev = container_of(inode->i_cdev, struct media_devnode, cdev); + devnode = container_of(inode->i_cdev, struct media_devnode, cdev); /* return ENXIO if the media device has been removed already or if it is not registered anymore. */ - if (!media_devnode_is_registered(mdev)) { + if (!media_devnode_is_registered(devnode)) { mutex_unlock(&media_devnode_lock); return -ENXIO; } /* and increase the device refcount */ - get_device(&mdev->dev); + get_device(&devnode->dev); mutex_unlock(&media_devnode_lock); - filp->private_data = mdev; + filp->private_data = devnode; - if (mdev->fops->open) { - ret = mdev->fops->open(filp); + if (devnode->fops->open) { + ret = devnode->fops->open(filp); if (ret) { - put_device(&mdev->dev); + put_device(&devnode->dev); + filp->private_data = NULL; return ret; } } @@ -191,15 +191,18 @@ static int media_open(struct inode *inode, struct file *filp) /* Override for the release function */ static int media_release(struct inode *inode, struct file *filp) { - struct media_devnode *mdev = media_devnode_data(filp); + struct media_devnode *devnode = media_devnode_data(filp); + + if (devnode->fops->release) + devnode->fops->release(filp); - if (mdev->fops->release) - mdev->fops->release(filp); + filp->private_data = NULL; /* decrease the refcount unconditionally since the release() return value is ignored. */ - put_device(&mdev->dev); - filp->private_data = NULL; + put_device(&devnode->dev); + + pr_debug("%s: Media Release\n", __func__); return 0; } @@ -219,7 +222,8 @@ static const struct file_operations media_devnode_fops = { /** * media_devnode_register - register a media device node - * @mdev: media device node structure we want to register + * @media_dev: struct media_device we want to register a device node + * @devnode: media device node structure we want to register * * The registration code assigns minor numbers and registers the new device node * with the kernel. An error is returned if no free minor number can be found, @@ -231,7 +235,8 @@ static const struct file_operations media_devnode_fops = { * the media_devnode structure is *not* called, so the caller is responsible for * freeing any data. */ -int __must_check media_devnode_register(struct media_devnode *mdev, +int __must_check media_devnode_register(struct media_device *mdev, + struct media_devnode *devnode, struct module *owner) { int minor; @@ -243,68 +248,89 @@ int __must_check media_devnode_register(struct media_devnode *mdev, if (minor == MEDIA_NUM_DEVICES) { mutex_unlock(&media_devnode_lock); pr_err("could not get a free minor\n"); + kfree(devnode); return -ENFILE; } set_bit(minor, media_devnode_nums); mutex_unlock(&media_devnode_lock); - mdev->minor = minor; + devnode->minor = minor; + devnode->media_dev = mdev; + + /* Part 1: Initialize dev now to use dev.kobj for cdev.kobj.parent */ + devnode->dev.bus = &media_bus_type; + devnode->dev.devt = MKDEV(MAJOR(media_dev_t), devnode->minor); + devnode->dev.release = media_devnode_release; + if (devnode->parent) + devnode->dev.parent = devnode->parent; + dev_set_name(&devnode->dev, "media%d", devnode->minor); + device_initialize(&devnode->dev); /* Part 2: Initialize and register the character device */ - cdev_init(&mdev->cdev, &media_devnode_fops); - mdev->cdev.owner = owner; + cdev_init(&devnode->cdev, &media_devnode_fops); + devnode->cdev.owner = owner; + devnode->cdev.kobj.parent = &devnode->dev.kobj; - ret = cdev_add(&mdev->cdev, MKDEV(MAJOR(media_dev_t), mdev->minor), 1); + ret = cdev_add(&devnode->cdev, MKDEV(MAJOR(media_dev_t), devnode->minor), 1); if (ret < 0) { pr_err("%s: cdev_add failed\n", __func__); - goto error; + goto cdev_add_error; } - /* Part 3: Register the media device */ - mdev->dev.bus = &media_bus_type; - mdev->dev.devt = MKDEV(MAJOR(media_dev_t), mdev->minor); - mdev->dev.release = media_devnode_release; - if (mdev->parent) - mdev->dev.parent = mdev->parent; - dev_set_name(&mdev->dev, "media%d", mdev->minor); - ret = device_register(&mdev->dev); + /* Part 3: Add the media device */ + ret = device_add(&devnode->dev); if (ret < 0) { - pr_err("%s: device_register failed\n", __func__); - goto error; + pr_err("%s: device_add failed\n", __func__); + goto device_add_error; } /* Part 4: Activate this minor. The char device can now be used. */ - set_bit(MEDIA_FLAG_REGISTERED, &mdev->flags); + set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); return 0; -error: - cdev_del(&mdev->cdev); - clear_bit(mdev->minor, media_devnode_nums); +device_add_error: + cdev_del(&devnode->cdev); +cdev_add_error: + mutex_lock(&media_devnode_lock); + clear_bit(devnode->minor, media_devnode_nums); + devnode->media_dev = NULL; + mutex_unlock(&media_devnode_lock); + + put_device(&devnode->dev); return ret; } +void media_devnode_unregister_prepare(struct media_devnode *devnode) +{ + /* Check if devnode was ever registered at all */ + if (!media_devnode_is_registered(devnode)) + return; + + mutex_lock(&media_devnode_lock); + clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); + mutex_unlock(&media_devnode_lock); +} + /** * media_devnode_unregister - unregister a media device node - * @mdev: the device node to unregister + * @devnode: the device node to unregister * * This unregisters the passed device. Future open calls will be met with * errors. * - * This function can safely be called if the device node has never been - * registered or has already been unregistered. + * Should be called after media_devnode_unregister_prepare() */ -void media_devnode_unregister(struct media_devnode *mdev) +void media_devnode_unregister(struct media_devnode *devnode) { - /* Check if mdev was ever registered at all */ - if (!media_devnode_is_registered(mdev)) - return; - mutex_lock(&media_devnode_lock); - clear_bit(MEDIA_FLAG_REGISTERED, &mdev->flags); + /* Delete the cdev on this minor as well */ + cdev_del(&devnode->cdev); mutex_unlock(&media_devnode_lock); - device_unregister(&mdev->dev); + device_del(&devnode->dev); + devnode->media_dev = NULL; + put_device(&devnode->dev); } /* diff --git a/drivers/media/pci/cx23885/cx23885-av.c b/drivers/media/pci/cx23885/cx23885-av.c index 877dad89107e..e7d4406f9abd 100644 --- a/drivers/media/pci/cx23885/cx23885-av.c +++ b/drivers/media/pci/cx23885/cx23885-av.c @@ -24,7 +24,7 @@ void cx23885_av_work_handler(struct work_struct *work) { struct cx23885_dev *dev = container_of(work, struct cx23885_dev, cx25840_work); - bool handled; + bool handled = false; v4l2_subdev_call(dev->sd_cx25840, core, interrupt_service_routine, PCI_MSK_AV_CORE, &handled); diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c index 36add3c463f7..1256af0dde1d 100644 --- a/drivers/media/platform/am437x/am437x-vpfe.c +++ b/drivers/media/platform/am437x/am437x-vpfe.c @@ -1047,7 +1047,7 @@ static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe, static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe) { enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED; - int ret; + int ret = 0; vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n"); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index a43404cad3e3..ed307488ccbd 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -49,7 +49,7 @@ MODULE_FIRMWARE(FIRMWARE_MEMDMA); #define PID_TABLE_SIZE 1024 #define POLL_MSECS 50 -static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei); +static int load_c8sectpfe_fw(struct c8sectpfei *fei); #define TS_PKT_SIZE 188 #define HEADER_SIZE (4) @@ -143,6 +143,7 @@ static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed) struct channel_info *channel; u32 tmp; unsigned long *bitmap; + int ret; switch (dvbdmxfeed->type) { case DMX_TYPE_TS: @@ -171,8 +172,9 @@ static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed) } if (!atomic_read(&fei->fw_loaded)) { - dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__); - return -EINVAL; + ret = load_c8sectpfe_fw(fei); + if (ret) + return ret; } mutex_lock(&fei->lock); @@ -267,8 +269,9 @@ static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed) unsigned long *bitmap; if (!atomic_read(&fei->fw_loaded)) { - dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__); - return -EINVAL; + ret = load_c8sectpfe_fw(fei); + if (ret) + return ret; } mutex_lock(&fei->lock); @@ -882,13 +885,6 @@ static int c8sectpfe_probe(struct platform_device *pdev) goto err_clk_disable; } - /* ensure all other init has been done before requesting firmware */ - ret = load_c8sectpfe_fw_step1(fei); - if (ret) { - dev_err(dev, "Couldn't load slim core firmware\n"); - goto err_clk_disable; - } - c8sectpfe_debugfs_init(fei); return 0; @@ -1093,15 +1089,14 @@ static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr, phdr->p_memsz - phdr->p_filesz); } -static int load_slim_core_fw(const struct firmware *fw, void *context) +static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei) { - struct c8sectpfei *fei = context; Elf32_Ehdr *ehdr; Elf32_Phdr *phdr; u8 __iomem *dst; int err = 0, i; - if (!fw || !context) + if (!fw || !fei) return -EINVAL; ehdr = (Elf32_Ehdr *)fw->data; @@ -1153,29 +1148,35 @@ static int load_slim_core_fw(const struct firmware *fw, void *context) return err; } -static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context) +static int load_c8sectpfe_fw(struct c8sectpfei *fei) { - struct c8sectpfei *fei = context; + const struct firmware *fw; int err; + dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA); + + err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev); + if (err) + return err; + err = c8sectpfe_elf_sanity_check(fei, fw); if (err) { dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n" , err); - goto err; + return err; } - err = load_slim_core_fw(fw, context); + err = load_slim_core_fw(fw, fei); if (err) { dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err); - goto err; + return err; } /* now the firmware is loaded configure the input blocks */ err = configure_channels(fei); if (err) { dev_err(fei->dev, "configure_channels failed err=(%d)\n", err); - goto err; + return err; } /* @@ -1188,28 +1189,6 @@ static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context) writel(0x1, fei->io + DMA_CPU_RUN); atomic_set(&fei->fw_loaded, 1); -err: - complete_all(&fei->fw_ack); -} - -static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei) -{ - int err; - - dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA); - - init_completion(&fei->fw_ack); - atomic_set(&fei->fw_loaded, 0); - - err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - FIRMWARE_MEMDMA, fei->dev, GFP_KERNEL, fei, - load_c8sectpfe_fw_cb); - - if (err) { - dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err); - complete_all(&fei->fw_ack); - return err; - } return 0; } diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 3f0f71adabb4..ea1008cf14a3 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -61,7 +61,7 @@ struct rc_map *rc_map_get(const char *name) struct rc_map_list *map; map = seek_rc_map(name); -#ifdef MODULE +#ifdef CONFIG_MODULES if (!map) { int rc = request_module("%s", name); if (rc < 0) { diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 9cd0268b2767..f353ab569b8e 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -1800,7 +1800,7 @@ static void uvc_delete(struct uvc_device *dev) if (dev->vdev.dev) v4l2_device_unregister(&dev->vdev); #ifdef CONFIG_MEDIA_CONTROLLER - if (media_devnode_is_registered(&dev->mdev.devnode)) + if (media_devnode_is_registered(dev->mdev.devnode)) media_device_unregister(&dev->mdev); #endif diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c index 234e74f97a4b..9f68a56f2727 100644 --- a/drivers/memory/tegra/tegra124.c +++ b/drivers/memory/tegra/tegra124.c @@ -1007,6 +1007,7 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = { .num_swgroups = ARRAY_SIZE(tegra124_swgroups), .supports_round_robin_arbitration = true, .supports_request_limit = true, + .num_tlb_lines = 32, .num_asids = 128, }; diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c index c7a9825aa4ce..792d51bae20f 100644 --- a/drivers/mfd/lp8788-irq.c +++ b/drivers/mfd/lp8788-irq.c @@ -112,7 +112,7 @@ static irqreturn_t lp8788_irq_handler(int irq, void *ptr) struct lp8788_irq_data *irqd = ptr; struct lp8788 *lp = irqd->lp; u8 status[NUM_REGS], addr, mask; - bool handled; + bool handled = false; int i; if (lp8788_read_multi_bytes(lp, LP8788_INT_1, status, NUM_REGS)) diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 81c3f75b7330..8f9c26b77089 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -152,7 +152,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, access = _PAGE_PRESENT; if (dsisr & CXL_PSL_DSISR_An_S) access |= _PAGE_RW; - if ((!ctx->kernel) || ~(dar & (1ULL << 63))) + if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) access |= _PAGE_USER; if (dsisr & DSISR_NOHPTE) diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index df268365e04e..c8e3995b8cb7 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -276,6 +276,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid(dev, uuid); __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); up_write(&dev->me_clients_rwsem); } @@ -297,6 +298,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); up_write(&dev->me_clients_rwsem); } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 153c229054fe..b849811aaf33 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1290,8 +1290,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, } md = mmc_blk_get(bdev->bd_disk); - if (!md) + if (!md) { + err = -EINVAL; goto cmd_err; + } card = md->queue.card; if (IS_ERR(card)) { diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index da7940e3b9cb..4f3204961c0d 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -191,7 +191,7 @@ static int mmc_ios_show(struct seq_file *s, void *data) str = "invalid"; break; } - seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str); + seq_printf(s, "signal voltage:\t%u (%s)\n", ios->signal_voltage, str); switch (ios->drv_type) { case MMC_SET_DRIVER_TYPE_A: diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index 9becebeeccd1..b2c482da5dd7 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -78,6 +78,70 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) /* Make sure we use phases which we can enumerate with */ if (!IS_ERR(priv->sample_clk)) clk_set_phase(priv->sample_clk, priv->default_sample_phase); + + /* + * Set the drive phase offset based on speed mode to achieve hold times. + * + * NOTE: this is _not_ a value that is dynamically tuned and is also + * _not_ a value that will vary from board to board. It is a value + * that could vary between different SoC models if they had massively + * different output clock delays inside their dw_mmc IP block (delay_o), + * but since it's OK to overshoot a little we don't need to do complex + * calculations and can pick values that will just work for everyone. + * + * When picking values we'll stick with picking 0/90/180/270 since + * those can be made very accurately on all known Rockchip SoCs. + * + * Note that these values match values from the DesignWare Databook + * tables for the most part except for SDR12 and "ID mode". For those + * two modes the databook calculations assume a clock in of 50MHz. As + * seen above, we always use a clock in rate that is exactly the + * card's input clock (times RK3288_CLKGEN_DIV, but that gets divided + * back out before the controller sees it). + * + * From measurement of a single device, it appears that delay_o is + * about .5 ns. Since we try to leave a bit of margin, it's expected + * that numbers here will be fine even with much larger delay_o + * (the 1.4 ns assumed by the DesignWare Databook would result in the + * same results, for instance). + */ + if (!IS_ERR(priv->drv_clk)) { + int phase; + + /* + * In almost all cases a 90 degree phase offset will provide + * sufficient hold times across all valid input clock rates + * assuming delay_o is not absurd for a given SoC. We'll use + * that as a default. + */ + phase = 90; + + switch (ios->timing) { + case MMC_TIMING_MMC_DDR52: + /* + * Since clock in rate with MMC_DDR52 is doubled when + * bus width is 8 we need to double the phase offset + * to get the same timings. + */ + if (ios->bus_width == MMC_BUS_WIDTH_8) + phase = 180; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* + * In the case of 150 MHz clock (typical max for + * Rockchip SoCs), 90 degree offset will add a delay + * of 1.67 ns. That will meet min hold time of .8 ns + * as long as clock output delay is < .87 ns. On + * SoCs measured this seems to be OK, but it doesn't + * hurt to give margin here, so we use 180. + */ + phase = 180; + break; + } + + clk_set_phase(priv->drv_clk, phase); + } } #define NUM_PHASES 360 diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 79905ce895ad..bbad309679cf 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -257,7 +257,7 @@ static void moxart_dma_complete(void *param) static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) { u32 len, dir_data, dir_slave; - unsigned long dma_time; + long dma_time; struct dma_async_tx_descriptor *desc = NULL; struct dma_chan *dma_chan; @@ -397,7 +397,8 @@ static void moxart_prepare_data(struct moxart_host *host) static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct moxart_host *host = mmc_priv(mmc); - unsigned long pio_time, flags; + long pio_time; + unsigned long flags; u32 status; spin_lock_irqsave(&host->lock, flags); diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index f5edf9d3a18a..0535827b02ee 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -307,8 +307,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) __func__, uhs, ctrl_2); } +static void pxav3_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + u8 pwr = host->pwr; + + sdhci_set_power(host, mode, vdd); + + if (host->pwr == pwr) + return; + + if (host->pwr == 0) + vdd = 0; + + if (!IS_ERR(mmc->supply.vmmc)) { + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + spin_lock_irq(&host->lock); + } +} + static const struct sdhci_ops pxav3_sdhci_ops = { .set_clock = sdhci_set_clock, + .set_power = pxav3_set_power, .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .set_bus_width = sdhci_set_bus_width, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index ae1d4067da5d..7f6db432f205 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1453,24 +1453,25 @@ clock_set: } EXPORT_SYMBOL_GPL(sdhci_set_clock); -static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, - unsigned short vdd) +static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) { struct mmc_host *mmc = host->mmc; - u8 pwr = 0; - if (!IS_ERR(mmc->supply.vmmc)) { - spin_unlock_irq(&host->lock); - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); - spin_lock_irq(&host->lock); + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + spin_lock_irq(&host->lock); - if (mode != MMC_POWER_OFF) - sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); - else - sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + if (mode != MMC_POWER_OFF) + sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); + else + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); +} - return; - } +void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + u8 pwr = 0; if (mode != MMC_POWER_OFF) { switch (1 << vdd) { @@ -1503,7 +1504,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, host->ops->check_power_status(host, REQ_BUS_OFF); if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) sdhci_runtime_pm_bus_off(host); - vdd = 0; } else { /* * Spec says that we should clear the power reg before setting @@ -1542,6 +1542,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, mdelay(10); } } +EXPORT_SYMBOL_GPL(sdhci_set_power); + +static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + + if (host->ops->set_power) + host->ops->set_power(host, mode, vdd); + else if (!IS_ERR(mmc->supply.vmmc)) + sdhci_set_power_reg(host, mode, vdd); + else + sdhci_set_power(host, mode, vdd); +} /*****************************************************************************\ * * @@ -1934,7 +1948,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) pr_err("%s: enabling controller clock: failed: %d\n", mmc_hostname(host->mmc), ret); } else { - sdhci_set_power(host, ios->power_mode, ios->vdd); + __sdhci_set_power(host, ios->power_mode, ios->vdd); } } } @@ -1952,7 +1966,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) if (!host->ops->enable_controller_clock && (ios->power_mode & (MMC_POWER_UP | MMC_POWER_ON))) - sdhci_set_power(host, ios->power_mode, ios->vdd); + __sdhci_set_power(host, ios->power_mode, ios->vdd); spin_lock_irqsave(&host->lock, flags); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 86b7066a81c2..c6178c099039 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -642,6 +642,8 @@ struct sdhci_ops { #endif void (*set_clock)(struct sdhci_host *host, unsigned int clock); + void (*set_power)(struct sdhci_host *host, unsigned char mode, + unsigned short vdd); int (*enable_dma)(struct sdhci_host *host); unsigned int (*get_max_clock)(struct sdhci_host *host); @@ -806,6 +808,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host) } void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); +void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd); void sdhci_set_bus_width(struct sdhci_host *host, int width); void sdhci_reset(struct sdhci_host *host, u8 mask); void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 67eb2be0db87..9a5035cac129 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -1622,9 +1622,16 @@ EXPORT_SYMBOL(denali_init); /* driver exit point */ void denali_remove(struct denali_nand_info *denali) { + /* + * Pre-compute DMA buffer size to avoid any problems in case + * nand_release() ever changes in a way that mtd->writesize and + * mtd->oobsize are not reliable after this call. + */ + int bufsize = denali->mtd.writesize + denali->mtd.oobsize; + + nand_release(&denali->mtd); denali_irq_cleanup(denali->irq, denali); - dma_unmap_single(denali->dev, denali->buf.dma_buf, - denali->mtd.writesize + denali->mtd.oobsize, + dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize, DMA_BIDIRECTIONAL); } EXPORT_SYMBOL(denali_remove); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 399c627b15cc..22ebdf4d8cc4 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -100,11 +100,14 @@ enum ad_link_speed_type { #define MAC_ADDRESS_EQUAL(A, B) \ ether_addr_equal_64bits((const u8 *)A, (const u8 *)B) -static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } }; +static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = { + 0, 0, 0, 0, 0, 0 +}; static u16 ad_ticks_per_sec; static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; -static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR; +static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = + MULTICAST_LACPDU_ADDR; /* ================= main 802.3ad protocol functions ================== */ static int ad_lacpdu_send(struct port *port); @@ -1701,7 +1704,7 @@ static void ad_clear_agg(struct aggregator *aggregator) aggregator->is_individual = false; aggregator->actor_admin_aggregator_key = 0; aggregator->actor_oper_aggregator_key = 0; - aggregator->partner_system = null_mac_addr; + eth_zero_addr(aggregator->partner_system.mac_addr_value); aggregator->partner_system_priority = 0; aggregator->partner_oper_aggregator_key = 0; aggregator->receive_state = 0; @@ -1723,7 +1726,7 @@ static void ad_initialize_agg(struct aggregator *aggregator) if (aggregator) { ad_clear_agg(aggregator); - aggregator->aggregator_mac_address = null_mac_addr; + eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value); aggregator->aggregator_identifier = 0; aggregator->slave = NULL; } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 41bd9186d383..295d86ba63d3 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -42,13 +42,10 @@ -#ifndef __long_aligned -#define __long_aligned __attribute__((aligned((sizeof(long))))) -#endif -static const u8 mac_bcast[ETH_ALEN] __long_aligned = { +static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; -static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = { +static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 }; static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b8df0f5e8c25..3f320f470345 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -628,8 +628,7 @@ static int bond_fill_info(struct sk_buff *skb, goto nla_put_failure; if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM, - sizeof(bond->params.ad_actor_system), - &bond->params.ad_actor_system)) + ETH_ALEN, &bond->params.ad_actor_system)) goto nla_put_failure; } if (!bond_3ad_get_active_agg_info(bond, &info)) { diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 641a532b67cb..3f756fa2f603 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -153,8 +153,10 @@ int bond_sysfs_slave_add(struct slave *slave) err = kobject_init_and_add(&slave->kobj, &slave_ktype, &(slave->dev->dev.kobj), "bonding_slave"); - if (err) + if (err) { + kobject_put(&slave->kobj); return err; + } for (a = slave_attrs; *a; ++a) { err = sysfs_create_file(&slave->kobj, &((*a)->attr)); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index e2414f2d7ba9..68ef738a7689 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2064,9 +2064,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * the other bits clear. */ reg = 1 << port; - /* Disable learning for DSA and CPU ports */ - if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) - reg = PORT_ASSOC_VECTOR_LOCKED_PORT; + /* Disable learning for CPU port */ + if (dsa_is_cpu_port(ds, port)) + reg = 0; ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); if (ret) @@ -2150,7 +2150,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * database, and allow every port to egress frames on all other ports. */ reg = BIT(ps->num_ports) - 1; /* all ports */ - ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port); + reg &= ~BIT(port); /* except itself */ + ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg); if (ret) goto abort; diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index e0f3d197e7f2..8ff10fd70b02 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3854,7 +3854,7 @@ static void et131x_tx_timeout(struct net_device *netdev) unsigned long flags; /* If the device is closed, ignore the timeout */ - if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) + if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) return; /* Any nonrecoverable hardware error? diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 3cb99ce7325b..94f06c35ad9c 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -396,7 +396,7 @@ static void bcm_sysport_get_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(u32 *)p; + data[i] = *(unsigned long *)p; } } @@ -526,7 +526,8 @@ static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, dma_addr_t mapping; /* Allocate a new SKB for a new packet */ - skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); + skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH, + GFP_ATOMIC | __GFP_NOWARN); if (!skb) { priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d91953eabfeb..a3949c1a0c23 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4250,6 +4250,10 @@ static void bnxt_del_napi(struct bnxt *bp) napi_hash_del(&bnapi->napi); netif_napi_del(&bnapi->napi); } + /* We called napi_hash_del() before netif_napi_del(), we need + * to respect an RCU grace period before freeing napi structures. + */ + synchronize_net(); } static void bnxt_init_napi(struct bnxt *bp) @@ -4306,9 +4310,7 @@ static void bnxt_tx_disable(struct bnxt *bp) bnapi = bp->bnapi[i]; txr = &bnapi->tx_ring; txq = netdev_get_tx_queue(bp->dev, i); - __netif_tx_lock(txq, smp_processor_id()); txr->dev_state = BNXT_DEV_STATE_CLOSING; - __netif_tx_unlock(txq); } } /* Stop all TX queues */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 34fae5576b60..bae8df951780 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -927,7 +927,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(u32 *)p; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; } } @@ -1346,7 +1350,7 @@ static int bcmgenet_xmit_single(struct net_device *dev, tx_cb_ptr->skb = skb; - skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); + skb_len = skb_headlen(skb); mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); ret = dma_mapping_error(kdev, mapping); @@ -1575,7 +1579,8 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, dma_addr_t mapping; /* Allocate a new Rx skb */ - skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); if (!skb) { priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, priv->dev, diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 18672ad773fb..856b7abe4b8a 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -31,7 +31,7 @@ #define BNAD_NUM_TXF_COUNTERS 12 #define BNAD_NUM_RXF_COUNTERS 10 #define BNAD_NUM_CQ_COUNTERS (3 + 5) -#define BNAD_NUM_RXQ_COUNTERS 6 +#define BNAD_NUM_RXQ_COUNTERS 7 #define BNAD_NUM_TXQ_COUNTERS 5 #define BNAD_ETHTOOL_STATS_NUM \ @@ -658,6 +658,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_allocbuf_failed", q_num); string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_mapbuf_failed", q_num); + string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_producer_index", q_num); string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_consumer_index", q_num); @@ -678,6 +680,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) sprintf(string, "rxq%d_allocbuf_failed", q_num); string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_mapbuf_failed", + q_num); + string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_producer_index", q_num); string += ETH_GSTRING_LEN; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 75bdb6aad352..78803e7de360 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1104,7 +1104,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) - macb_writel(bp, ISR, MACB_BIT(RXUBR)); + queue_writel(queue, ISR, MACB_BIT(RXUBR)); } if (status & MACB_BIT(ISR_ROVR)) { @@ -2904,7 +2904,7 @@ static int macb_probe(struct platform_device *pdev) dev->irq = platform_get_irq(pdev, 0); if (dev->irq < 0) { err = dev->irq; - goto err_disable_clocks; + goto err_out_free_netdev; } mac = of_get_mac_address(np); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b7b93e7a643d..d579f4770ee3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1165,7 +1165,7 @@ out_free: dev_kfree_skb_any(skb); /* Discard the packet if the length is greater than mtu */ max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tagged(skb)) max_pkt_len += VLAN_HLEN; if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) goto out_free; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index ec8ffd7eae33..735bcdeaa7de 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1188,7 +1188,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) /* Discard the packet if the length is greater than mtu */ max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tagged(skb)) max_pkt_len += VLAN_HLEN; if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) goto out_free; diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 796ee362ad70..24f69034f52c 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -468,6 +468,9 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep) struct device *dev = ep->dev->dev.parent; int i; + if (!ep->descs) + return; + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { dma_addr_t d; @@ -490,6 +493,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep) dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, ep->descs_dma_addr); + ep->descs = NULL; } static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9b9793333816..3fd1cba0c7ec 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1708,7 +1708,7 @@ static int enic_open(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); unsigned int i; - int err; + int err, ret; err = enic_request_intr(enic); if (err) { @@ -1766,10 +1766,9 @@ static int enic_open(struct net_device *netdev) err_out_free_rq: for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_disable(&enic->rq[i]); - if (err) - return err; - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + ret = vnic_rq_disable(&enic->rq[i]); + if (!ret) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); } enic_dev_notify_unset(enic); err_out_free_intr: diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 6ee78c203eca..e5fb5cf5401b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -531,6 +531,7 @@ struct be_adapter { struct delayed_work be_err_detection_work; u8 err_flags; + bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */ u32 flags; u32 cmd_privileges; /* Ethtool knobs and info */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7524a33b7032..7cd39324106d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5526,6 +5526,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) pci_iounmap(adapter->pdev, adapter->csr); if (adapter->db) pci_iounmap(adapter->pdev, adapter->db); + if (adapter->pcicfg && adapter->pcicfg_mapped) + pci_iounmap(adapter->pdev, adapter->pcicfg); } static int db_bar(struct be_adapter *adapter) @@ -5577,8 +5579,10 @@ static int be_map_pci_bars(struct be_adapter *adapter) if (!addr) goto pci_map_err; adapter->pcicfg = addr; + adapter->pcicfg_mapped = true; } else { adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + adapter->pcicfg_mapped = false; } } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 52f2230062e7..4d80d9f85c5d 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1088,7 +1088,7 @@ static int ethoc_probe(struct platform_device *pdev) if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); ret = -ENXIO; - goto error; + goto free; } if (netdev->mem_end) { @@ -1097,7 +1097,7 @@ static int ethoc_probe(struct platform_device *pdev) if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); ret = -ENXIO; - goto error; + goto free; } } else { /* Allocate buffer memory */ @@ -1108,7 +1108,7 @@ static int ethoc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "cannot allocate %dB buffer\n", buffer_size); ret = -ENOMEM; - goto error; + goto free; } netdev->mem_end = netdev->mem_start + buffer_size; priv->dma_alloc = buffer_size; @@ -1122,7 +1122,7 @@ static int ethoc_probe(struct platform_device *pdev) 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); if (num_bd < 4) { ret = -ENODEV; - goto error; + goto free; } priv->num_bd = num_bd; /* num_tx must be a power of two */ @@ -1135,7 +1135,7 @@ static int ethoc_probe(struct platform_device *pdev) priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); if (!priv->vma) { ret = -ENOMEM; - goto error; + goto free; } /* Allow the platform setup code to pass in a MAC address. */ diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 3ce41efe8a94..4dd57e6bb3f9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -331,8 +331,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, return ERR_PTR(-ENODEV); handle = dev->ops->get_handle(dev, port_id); - if (IS_ERR(handle)) + if (IS_ERR(handle)) { + put_device(&dev->cls_dev); return handle; + } handle->dev = dev; handle->owner_dev = owner_dev; @@ -355,6 +357,8 @@ out_when_init_queue: for (j = i - 1; j >= 0; j--) hnae_fini_queue(handle->qs[j]); + put_device(&dev->cls_dev); + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(hnae_get_handle); @@ -376,6 +380,8 @@ void hnae_put_handle(struct hnae_handle *h) dev->ops->put_handle(h); module_put(dev->owner); + + put_device(&dev->cls_dev); } EXPORT_SYMBOL(hnae_put_handle); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index fdbba588c6db..efe84ca20da7 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1169,16 +1169,15 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); port = ehea_get_port(adapter, portnum); + if (!port) { + netdev_err(NULL, "unknown portnum %x\n", portnum); + return; + } dev = port->netdev; switch (ec) { case EHEA_EC_PORTSTATE_CHG: /* port state change */ - if (!port) { - netdev_err(dev, "unknown portnum %x\n", portnum); - break; - } - if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { if (!netif_carrier_ok(dev)) { ret = ehea_sense_port_attr(port); diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 5ebe12d56ebf..a7c7b1d9b7c8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -49,7 +49,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, struct i40e_hmc_sd_entry *sd_entry; bool dma_mem_alloc_done = false; struct i40e_dma_mem mem; - i40e_status ret_code; + i40e_status ret_code = I40E_SUCCESS; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index dd4e6ea9e0e1..af7f97791320 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -266,7 +266,7 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + * Reads one 16 bit word from the Shadow RAM using the AdminQ **/ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, u16 *data) @@ -280,27 +280,49 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, } /** - * i40e_read_nvm_word - Reads Shadow RAM + * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + * Reads one 16 bit word from the Shadow RAM. + * + * Do not use this function except in cases where the nvm lock is already + * taken via i40e_acquire_nvm(). + **/ +static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, + u16 offset, u16 *data) +{ + i40e_status ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + ret_code = i40e_read_nvm_word_aq(hw, offset, data); + else + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + return ret_code; +} + +/** + * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM. **/ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - enum i40e_status_code ret_code = 0; + i40e_status ret_code = 0; ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_read_nvm_word_aq(hw, offset, data); - } else { - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); - } - i40e_release_nvm(hw); - } + if (ret_code) + return ret_code; + + ret_code = __i40e_read_nvm_word(hw, offset, data); + + i40e_release_nvm(hw); + return ret_code; } @@ -393,31 +415,25 @@ read_nvm_buffer_aq_exit: } /** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() - * method. The buffer read is preceded by the NVM ownership take - * and followed by the release. + * method. **/ -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, + u16 offset, u16 *words, + u16 *data) { - enum i40e_status_code ret_code = 0; + i40e_status ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, - data); - i40e_release_nvm(hw); - } - } else { + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data); + else ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); - } return ret_code; } @@ -499,15 +515,15 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, data = (u16 *)vmem.va; /* read pointer to VPD area */ - ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); + ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } /* read pointer to PCIe Alt Auto-load module */ - ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, - &pcie_alt_module); + ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, + &pcie_alt_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -521,7 +537,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; - ret_code = i40e_read_nvm_buffer(hw, i, &words, data); + ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -593,14 +609,19 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 checksum_sr = 0; u16 checksum_local = 0; + /* We must acquire the NVM lock in order to correctly synchronize the + * NVM accesses across multiple PFs. Without doing so it is possible + * for one of the PFs to read invalid data potentially indicating that + * the checksum is invalid. + */ + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret_code) + return ret_code; ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); + __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + i40e_release_nvm(hw); if (ret_code) - goto i40e_validate_nvm_checksum_exit; - - /* Do not use i40e_read_nvm_word() because we do not want to take - * the synchronization semaphores twice here. - */ - i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + return ret_code; /* Verify read checksum from EEPROM is the same as * calculated checksum @@ -612,7 +633,6 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, if (checksum) *checksum = checksum_local; -i40e_validate_nvm_checksum_exit: return ret_code; } @@ -958,6 +978,7 @@ retry: break; case I40E_NVMUPD_CSUM_CON: + /* Assumes the caller has acquired the nvm */ status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? @@ -971,6 +992,7 @@ retry: break; case I40E_NVMUPD_CSUM_LCB: + /* Assumes the caller has acquired the nvm */ status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index bb9d583e5416..6caa2ab0ad74 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -282,8 +282,6 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, void i40e_release_nvm(struct i40e_hw *hw); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data); i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9404f38d9d0d..2cf5c581c7e0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3296,7 +3296,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, tdba & 0x00000000ffffffffULL); wr32(E1000_TDBAH(reg_idx), tdba >> 32); - ring->tail = hw->hw_addr + E1000_TDT(reg_idx); + ring->tail = adapter->io_addr + E1000_TDT(reg_idx); wr32(E1000_TDH(reg_idx), 0); writel(0, ring->tail); @@ -3652,7 +3652,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ring->count * sizeof(union e1000_adv_rx_desc)); /* initialize head and tail */ - ring->tail = hw->hw_addr + E1000_RDT(reg_idx); + ring->tail = adapter->io_addr + E1000_RDT(reg_idx); wr32(E1000_RDH(reg_idx), 0); writel(0, ring->tail); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 82f080a5ed5c..7fe6c2cf1c62 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, if (length <= 8 && (uintptr_t)data & 0x7) { /* Copy unaligned small data fragment to TSO header data area */ - memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE, + memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, data, length); desc->buf_ptr = txq->tso_hdrs_dma - + txq->tx_curr_desc * TSO_HEADER_SIZE; + + tx_index * TSO_HEADER_SIZE; } else { /* Alignment is okay, map buffer and hand off to hardware */ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 1c300259d70a..575da945f151 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3058,26 +3058,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) const struct mvneta_statistic *s; void __iomem *base = pp->base; u32 high, low, val; + u64 val64; int i; for (i = 0, s = mvneta_statistics; s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); s++, i++) { - val = 0; - switch (s->type) { case T_REG_32: val = readl_relaxed(base + s->offset); + pp->ethtool_stats[i] += val; break; case T_REG_64: /* Docs say to read low 32-bit then high */ low = readl_relaxed(base + s->offset); high = readl_relaxed(base + s->offset + 4); - val = (u64)high << 32 | low; + val64 = (u64)high << 32 | low; + pp->ethtool_stats[i] += val64; break; } - - pp->ethtool_stats[i] += val; } } @@ -3406,7 +3405,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; - dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; err = register_netdev(dev); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 03f0d20aa08b..b508345aeca6 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3305,7 +3305,7 @@ static void mvpp2_cls_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); /* Clear classifier flow table */ - memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); + memset(&fe.data, 0, sizeof(fe.data)); for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { fe.index = index; mvpp2_cls_flow_write(priv, &fe); diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index e203d0c4e5a3..53daa6ca5d83 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) err = mlx4_reset_slave(dev); else err = mlx4_reset_master(dev); - BUG_ON(err != 0); + if (!err) { + mlx4_err(dev, "device was reset successfully\n"); + } else { + /* EEH could have disabled the PCI channel during reset. That's + * recoverable and the PCI error flow will handle it. + */ + if (!pci_channel_offline(dev->persist->pdev)) + BUG_ON(1); + } dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; - mlx4_err(dev, "device was reset successfully\n"); mutex_unlock(&persist->device_state_mutex); /* At that step HW was already reset, now notify clients */ diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 9e104dcfa9dd..dc1cb6fc5b4e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2451,6 +2451,7 @@ err_comm_admin: kfree(priv->mfunc.master.slave_state); err_comm: iounmap(priv->mfunc.comm); + priv->mfunc.comm = NULL; err_vhcr: dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, priv->mfunc.vhcr, @@ -2518,6 +2519,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) int slave; u32 slave_read; + /* If the comm channel has not yet been initialized, + * skip reporting the internal error event to all + * the communication channels. + */ + if (!priv->mfunc.comm) + return; + /* Report an internal error event to all * communication channels. */ @@ -2552,6 +2560,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) } iounmap(priv->mfunc.comm); + priv->mfunc.comm = NULL; } void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 232191417b93..7d61a5de9d5a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -424,14 +424,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, mutex_lock(&mdev->state_lock); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); - if (err) + if (err) { en_err(priv, "Failed configuring VLAN filter\n"); + goto out; + } } - if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) - en_dbg(HW, priv, "failed adding vlan %d\n", vid); - mutex_unlock(&mdev->state_lock); + err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); + if (err) + en_dbg(HW, priv, "Failed adding vlan %d\n", vid); - return 0; +out: + mutex_unlock(&mdev->state_lock); + return err; } static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, @@ -439,7 +443,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - int err; + int err = 0; en_dbg(HW, priv, "Killing VID:%d\n", vid); @@ -456,7 +460,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, } mutex_unlock(&mdev->state_lock); - return 0; + return err; } static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) @@ -1716,6 +1720,16 @@ int mlx4_en_start_port(struct net_device *dev) vxlan_get_rx_port(dev); #endif priv->port_up = true; + + /* Process all completions if exist to prevent + * the queues freezing if they are full + */ + for (i = 0; i < priv->rx_ring_num; i++) { + local_bh_disable(); + napi_schedule(&priv->rx_cq[i]->napi); + local_bh_enable(); + } + netif_tx_start_all_queues(dev); netif_device_attach(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 3904b5fc0b7c..96fc35a9bb05 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -164,7 +164,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) return PTR_ERR(mailbox); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); + MLX4_CMD_NATIVE); if (err) goto out; @@ -325,7 +325,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, 0, MLX4_CMD_DUMP_ETH_STATS, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 033f99d2f15c..0a4e9731d33b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -610,8 +610,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, MLX4_GET(func_cap->phys_port_id, outbox, QUERY_FUNC_CAP_PHYS_PORT_ID); - MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); - func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT); + MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); /* All other resources are allocated by the master, but we still report * 'num' and 'reserved' capabilities as follows: @@ -2523,7 +2522,7 @@ void mlx4_opreq_action(struct work_struct *work) if (err) { mlx4_err(dev, "Failed to retrieve required operation: %d\n", err); - return; + goto out; } MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); @@ -2840,7 +2839,7 @@ int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv) memset(&func_cap, 0, sizeof(func_cap)); err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); if (!err) - *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT; + *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT; return err; } EXPORT_SYMBOL(get_phv_bit); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 08de5555c2f4..074631be342b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -150,7 +150,7 @@ struct mlx4_func_cap { u32 qp1_proxy_qpn; u32 reserved_lkey; u8 physical_port; - u8 port_flags; + u8 flags0; u8 flags1; u64 phys_port_id; u32 extra_flags; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 1a134e08f010..cfc2a7632201 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -217,6 +217,9 @@ void mlx4_unregister_device(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_interface *intf; + if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)) + return; + mlx4_stop_catas_poll(dev); if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION && mlx4_is_slave(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a7d3144c2388..b774ba64bd4b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2295,6 +2295,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) if (!err || err == -ENOSPC) { priv->def_counter[port] = idx; + err = 0; } else if (err == -ENOENT) { err = 0; continue; @@ -2344,7 +2345,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) *idx = get_param_l(&out_param); - + if (WARN_ON(err == -ENOSPC)) + err = -EINVAL; return err; } return __mlx4_counter_alloc(dev, idx); @@ -3854,45 +3856,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; - struct mlx4_priv *priv = mlx4_priv(dev); - int ret; - int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; - int total_vfs; + int err; mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); - ret = pci_enable_device(pdev); - if (ret) { - mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); + err = pci_enable_device(pdev); + if (err) { + mlx4_err(dev, "Can not re-enable device, err=%d\n", err); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); + return PCI_ERS_RESULT_RECOVERED; +} +static void mlx4_pci_resume(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int total_vfs; + int err; + + mlx4_err(dev, "%s was called\n", __func__); total_vfs = dev->persist->num_vfs; memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mutex_lock(&persist->interface_state_mutex); if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { - ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, + err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, priv, 1); - if (ret) { - mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", - __func__, ret); + if (err) { + mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", + __func__, err); goto end; } - ret = restore_current_port_types(dev, dev->persist-> + err = restore_current_port_types(dev, dev->persist-> curr_port_type, dev->persist-> curr_port_poss_type); - if (ret) - mlx4_err(dev, "could not restore original port types (%d)\n", ret); + if (err) + mlx4_err(dev, "could not restore original port types (%d)\n", err); } end: mutex_unlock(&persist->interface_state_mutex); - return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static void mlx4_shutdown(struct pci_dev *pdev) @@ -3909,6 +3919,7 @@ static void mlx4_shutdown(struct pci_dev *pdev) static const struct pci_error_handlers mlx4_err_handler = { .error_detected = mlx4_pci_err_detected, .slot_reset = mlx4_pci_slot_reset, + .resume = mlx4_pci_resume, }; static struct pci_driver mlx4_driver = { diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 3bf63de3a725..15c8f53f2497 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1109,7 +1109,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; u32 members_count; - int index, prev; + int index = -1, prev; int link = 0; int i; int err; @@ -1188,7 +1188,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], goto out; out: - if (prot == MLX4_PROT_ETH) { + if (prot == MLX4_PROT_ETH && index != -1) { /* manage the steering entry for promisc mode */ if (new_entry) err = new_steering_entry(dev, port, steer, @@ -1464,7 +1464,12 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach); int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode) { - struct mlx4_net_trans_rule rule; + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + }; + u64 *regid_p; switch (mode) { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index db40387ffaf6..b0af462f5e04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -143,9 +143,10 @@ enum mlx4_resource { RES_MTT, RES_MAC, RES_VLAN, - RES_EQ, + RES_NPORT_ID, RES_COUNTER, RES_FS_RULE, + RES_EQ, MLX4_NUM_OF_RESOURCE_TYPE }; @@ -1312,8 +1313,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd); int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, int port, void *buf); -int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, - struct mlx4_cmd_mailbox *outbox); int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index a9c4818448f9..d764081ef675 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1155,24 +1155,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, return err; } -int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, - u32 in_mod, struct mlx4_cmd_mailbox *outbox) -{ - return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, - MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); -} - int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { - if (slave != dev->caps.function) - return 0; - return mlx4_common_dump_eth_stats(dev, slave, - vhcr->in_modifier, outbox); + return 0; } int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 170a49a6803e..6466edfc833b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -918,11 +918,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, counter_index, RES_COUNTER); - if (!r || r->owner != slave) + if (!r || r->owner != slave) { ret = -EINVAL; - counter = container_of(r, struct res_counter, com); - if (!counter->port) - counter->port = port; + } else { + counter = container_of(r, struct res_counter, com); + if (!counter->port) + counter->port = port; + } spin_unlock_irq(mlx4_tlock(dev)); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 9ac14df0ca3b..3f98c8361ccd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -634,17 +634,43 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); +static u16 msg_to_opcode(struct mlx5_cmd_msg *in) +{ + struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static void cb_timeout_handler(struct work_struct *work) +{ + struct delayed_work *dwork = container_of(work, struct delayed_work, + work); + struct mlx5_cmd_work_ent *ent = container_of(dwork, + struct mlx5_cmd_work_ent, + cb_timeout_work); + struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, + cmd); + + ent->ret = -ETIMEDOUT; + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); +} + static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); + unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); struct mlx5_cmd_layout *lay; struct semaphore *sem; unsigned long flags; int alloc_ret; int cmd_mode; + complete(&ent->handling); sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { @@ -691,6 +717,9 @@ static void cmd_work_handler(struct work_struct *work) ent->ts1 = ktime_get_ns(); cmd_mode = cmd->mode; + if (ent->callback) + schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); @@ -735,32 +764,36 @@ static const char *deliv_status_to_str(u8 status) } } -static u16 msg_to_opcode(struct mlx5_cmd_msg *in) -{ - struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); - - return be16_to_cpu(hdr->opcode); -} - static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) { unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); struct mlx5_cmd *cmd = &dev->cmd; int err; + if (!wait_for_completion_timeout(&ent->handling, timeout) && + cancel_work_sync(&ent->work)) { + ent->ret = -ECANCELED; + goto out_err; + } + if (cmd->mode == CMD_MODE_POLLING) { wait_for_completion(&ent->done); - err = ent->ret; - } else { - if (!wait_for_completion_timeout(&ent->done, timeout)) - err = -ETIMEDOUT; - else - err = 0; + } else if (!wait_for_completion_timeout(&ent->done, timeout)) { + ent->ret = -ETIMEDOUT; + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); } + +out_err: + err = ent->ret; + if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + } else if (err == -ECANCELED) { + mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -805,9 +838,11 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, ent->token = token; + init_completion(&ent->handling); if (!callback) init_completion(&ent->done); + INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); INIT_WORK(&ent->work, cmd_work_handler); if (page_queue) { cmd_work_handler(&ent->work); @@ -817,28 +852,28 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, goto out_free; } - if (!callback) { - err = wait_func(dev, ent); - if (err == -ETIMEDOUT) - goto out; - - ds = ent->ts2 - ent->ts1; - op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); - if (op < ARRAY_SIZE(cmd->stats)) { - stats = &cmd->stats[op]; - spin_lock_irq(&stats->lock); - stats->sum += ds; - ++stats->n; - spin_unlock_irq(&stats->lock); - } - mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, - "fw exec time for %s is %lld nsec\n", - mlx5_command_str(op), ds); - *status = ent->status; - free_cmd(ent); - } + if (callback) + goto out; + if (err == -ECANCELED) + goto out_free; - return err; + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out_free; + + ds = ent->ts2 - ent->ts1; + op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock_irq(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock_irq(&stats->lock); + } + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + mlx5_command_str(op), ds); + *status = ent->status; out_free: free_cmd(ent); @@ -1230,41 +1265,30 @@ err_dbg: return err; } -void mlx5_cmd_use_events(struct mlx5_core_dev *dev) +static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) { struct mlx5_cmd *cmd = &dev->cmd; int i; for (i = 0; i < cmd->max_reg_cmds; i++) down(&cmd->sem); - down(&cmd->pages_sem); - flush_workqueue(cmd->wq); - - cmd->mode = CMD_MODE_EVENTS; + cmd->mode = mode; up(&cmd->pages_sem); for (i = 0; i < cmd->max_reg_cmds; i++) up(&cmd->sem); } -void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) +void mlx5_cmd_use_events(struct mlx5_core_dev *dev) { - struct mlx5_cmd *cmd = &dev->cmd; - int i; - - for (i = 0; i < cmd->max_reg_cmds; i++) - down(&cmd->sem); - - down(&cmd->pages_sem); - - flush_workqueue(cmd->wq); - cmd->mode = CMD_MODE_POLLING; + mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); +} - up(&cmd->pages_sem); - for (i = 0; i < cmd->max_reg_cmds; i++) - up(&cmd->sem); +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) +{ + mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); } static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) @@ -1300,6 +1324,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) struct semaphore *sem; ent = cmd->ent_arr[i]; + if (ent->callback) + cancel_delayed_work(&ent->cb_timeout_work); if (ent->page_queue) sem = &cmd->pages_sem; else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7a716733d9ca..717a381bcfd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -543,7 +543,7 @@ enum mlx5e_link_mode { MLX5E_100GBASE_KR4 = 22, MLX5E_100GBASE_LR4 = 23, MLX5E_100BASE_TX = 24, - MLX5E_100BASE_T = 25, + MLX5E_1000BASE_T = 25, MLX5E_10GBASE_T = 26, MLX5E_25GBASE_CR = 27, MLX5E_25GBASE_KR = 28, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c1dd75fe935f..392fa74f1952 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -138,10 +138,10 @@ static const struct { [MLX5E_100BASE_TX] = { .speed = 100, }, - [MLX5E_100BASE_T] = { - .supported = SUPPORTED_100baseT_Full, - .advertised = ADVERTISED_100baseT_Full, - .speed = 100, + [MLX5E_1000BASE_T] = { + .supported = SUPPORTED_1000baseT_Full, + .advertised = ADVERTISED_1000baseT_Full, + .speed = 1000, }, [MLX5E_10GBASE_T] = { .supported = SUPPORTED_10000baseT_Full, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1341b1d3c421..7e2026429d26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -124,7 +124,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, * headers and occur before the data gather. * Therefore these headers must be copied into the WQE */ -#define MLX5E_MIN_INLINE ETH_HLEN +#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) if (bf) { u16 ihs = skb_headlen(skb); @@ -136,7 +136,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, return skb_headlen(skb); } - return MLX5E_MIN_INLINE; + return max(skb_network_offset(skb), MLX5E_MIN_INLINE); } static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) @@ -290,7 +290,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) while ((sq->pc & wq->sz_m1) > sq->edge) mlx5e_send_nop(sq, false); - sq->bf_budget = bf ? sq->bf_budget - 1 : 0; + if (bf) + sq->bf_budget--; sq->stats.packets++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index f5deb642d0d6..94594d47cae6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev) void mlx5_enter_error_state(struct mlx5_core_dev *dev) { + mutex_lock(&dev->intf_state_mutex); if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) - return; + goto unlock; mlx5_core_err(dev, "start\n"); - if (pci_channel_offline(dev->pdev) || in_fatal(dev)) + if (pci_channel_offline(dev->pdev) || in_fatal(dev)) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + trigger_cmd_completions(dev); + } mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); mlx5_core_err(dev, "end\n"); + +unlock: + mutex_unlock(&dev->intf_state_mutex); } static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) @@ -245,7 +251,6 @@ static void poll_health(unsigned long data) u32 count; if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - trigger_cmd_completions(dev); mod_timer(&health->timer, get_next_poll_jiffies()); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 35bcc6dbada9..e88605de84cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -933,7 +933,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) if (err) { dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", FW_PRE_INIT_TIMEOUT_MILI); - goto out; + goto out_err; } err = mlx5_cmd_init(dev); @@ -1276,15 +1276,43 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, dev_info(&pdev->dev, "%s was called\n", __func__); mlx5_enter_error_state(dev); mlx5_unload_one(dev, priv); + pci_save_state(pdev); mlx5_pci_disable_device(dev); return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; } +/* wait for the device to show vital signs by waiting + * for the health counter to start counting. + */ +static int wait_vital(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_health *health = &dev->priv.health; + const int niter = 100; + u32 last_count = 0; + u32 count; + int i; + + for (i = 0; i < niter; i++) { + count = ioread32be(health->health_counter); + if (count && count != 0xffffffff) { + if (last_count && last_count != count) { + dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); + return 0; + } + last_count = count; + } + msleep(50); + } + + return -ETIMEDOUT; +} + static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - int err = 0; + int err; dev_info(&pdev->dev, "%s was called\n", __func__); @@ -1294,11 +1322,16 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) , __func__, err); return PCI_ERS_RESULT_DISCONNECT; } + pci_set_master(pdev); - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; + if (wait_vital(pdev)) { + dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; } void mlx5_disable_device(struct mlx5_core_dev *dev) @@ -1306,48 +1339,6 @@ void mlx5_disable_device(struct mlx5_core_dev *dev) mlx5_pci_err_detected(dev->pdev, 0); } -/* wait for the device to show vital signs. For now we check - * that we can read the device ID and that the health buffer - * shows a non zero value which is different than 0xffffffff - */ -static void wait_vital(struct pci_dev *pdev) -{ - struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - struct mlx5_core_health *health = &dev->priv.health; - const int niter = 100; - u32 count; - u16 did; - int i; - - /* Wait for firmware to be ready after reset */ - msleep(1000); - for (i = 0; i < niter; i++) { - if (pci_read_config_word(pdev, 2, &did)) { - dev_warn(&pdev->dev, "failed reading config word\n"); - break; - } - if (did == pdev->device) { - dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i); - break; - } - msleep(50); - } - if (i == niter) - dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); - - for (i = 0; i < niter; i++) { - count = ioread32be(health->health_counter); - if (count && count != 0xffffffff) { - dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); - break; - } - msleep(50); - } - - if (i == niter) - dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); -} - static void mlx5_pci_resume(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); @@ -1356,9 +1347,6 @@ static void mlx5_pci_resume(struct pci_dev *pdev) dev_info(&pdev->dev, "%s was called\n", __func__); - pci_save_state(pdev); - wait_vital(pdev); - err = mlx5_load_one(dev, priv); if (err) dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4d3377b12657..9a6bd830d104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -243,6 +243,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr) static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) { struct page *page; + u64 zero_addr = 1; u64 addr; int err; int nid = dev_to_node(&dev->pdev->dev); @@ -252,26 +253,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) mlx5_core_warn(dev, "failed to allocate page\n"); return -ENOMEM; } +map: addr = dma_map_page(&dev->pdev->dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(&dev->pdev->dev, addr)) { mlx5_core_warn(dev, "failed dma mapping page\n"); err = -ENOMEM; - goto out_alloc; + goto err_mapping; } + + /* Firmware doesn't support page with physical address 0 */ + if (addr == 0) { + zero_addr = addr; + goto map; + } + err = insert_page(dev, addr, page, func_id); if (err) { mlx5_core_err(dev, "failed to track allocated page\n"); - goto out_mapping; + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); } - return 0; - -out_mapping: - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); +err_mapping: + if (err) + __free_page(page); -out_alloc: - __free_page(page); + if (zero_addr == 0) + dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 30e2ba3f5f16..a4f8c1b99f71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -393,7 +393,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) if (out.hdr.status) err = mlx5_cmd_status_to_err(&out.hdr); else - *xrcdn = be32_to_cpu(out.xrcdn); + *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index ce21ee5b2357..821a087c7ae2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); goto err_db_free; } @@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); goto err_db_free; } @@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } - err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf); + err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); goto err_db_free; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index de69e719dc9d..75a590d1ae40 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) { int index = q->producer_counter & (q->count - 1); - if ((q->producer_counter - q->consumer_counter) == q->count) + if ((u16) (q->producer_counter - q->consumer_counter) == q->count) return NULL; return mlxsw_pci_queue_elem_info_get(q, index); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index 726f5435b32f..ae65b9940aed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -49,7 +49,7 @@ #define MLXSW_PORT_MID 0xd000 #define MLXSW_PORT_MAX_PHY_PORTS 0x40 -#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS +#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1) #define MLXSW_PORT_DEVID_BITS_OFFSET 10 #define MLXSW_PORT_PHY_BITS_OFFSET 4 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index cb165c2d4803..b23f508de811 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -399,7 +399,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, } mlxsw_sp_txhdr_construct(skb, &tx_info); - len = skb->len; + /* TX header is consumed by HW on the way so we shouldn't count its + * bytes as being sent. + */ + len = skb->len - MLXSW_TXHDR_LEN; + /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ @@ -1100,7 +1104,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev, cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | - SUPPORTED_Pause | SUPPORTED_Asym_Pause; + SUPPORTED_Pause | SUPPORTED_Asym_Pause | + SUPPORTED_Autoneg; cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, cmd); @@ -1256,7 +1261,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) /* Each packet needs to have a Tx header (metadata) on top all other * headers. */ - dev->hard_header_len += MLXSW_TXHDR_LEN; + dev->needed_headroom = MLXSW_TXHDR_LEN; err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d4c4c2b5156c..a1df4227ed9d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -87,14 +87,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, int err; switch (state) { - case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_FORWARDING: spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; break; - case BR_STATE_LISTENING: /* fall-through */ case BR_STATE_LEARNING: spms_state = MLXSW_REG_SPMS_STATE_LEARNING; break; + case BR_STATE_LISTENING: /* fall-through */ + case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_BLOCKING: spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index fb2d9a82ce3d..4d7b3edf6662 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -993,7 +993,7 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) /* Each packet needs to have a Tx header (metadata) on top all other * headers. */ - dev->hard_header_len += MLXSW_TXHDR_LEN; + dev->needed_headroom = MLXSW_TXHDR_LEN; err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable); if (err) { @@ -1074,6 +1074,7 @@ err_port_stp_state_set: err_port_admin_status_set: err_port_mtu_set: err_port_speed_set: + mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); err_port_swid_set: err_port_system_port_mapping_set: port_not_usable: diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 09d2e16fd6b0..cb0102dd7f70 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -561,8 +561,8 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_init_table(sg, 1); sg_dma_address(sg) = dma_map_single(adapter->dev, ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); - err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); - if (unlikely(err)) { + if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) { + err = -ENOMEM; sg_dma_address(sg) = 0; goto out; } @@ -572,8 +572,10 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); - if (!ctl->adesc) + if (!ctl->adesc) { + err = -ENOMEM; goto out; + } ctl->adesc->callback_param = netdev; ctl->adesc->callback = ks8842_dma_rx_cb; @@ -584,7 +586,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) goto out; } - return err; + return 0; out: if (sg_dma_address(sg)) dma_unmap_single(adapter->dev, sg_dma_address(sg), diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index a10c928bbd6b..374e691b11da 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -460,9 +460,9 @@ static int moxart_mac_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ndev->base_addr = res->start; priv->base = devm_ioremap_resource(p_dev, res); - ret = IS_ERR(priv->base); - if (ret) { + if (IS_ERR(priv->base)) { dev_err(p_dev, "devm_ioremap_resource failed\n"); + ret = PTR_ERR(priv->base); goto init_fail; } @@ -541,7 +541,7 @@ static int moxart_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); unregister_netdev(ndev); - free_irq(ndev->irq, ndev); + devm_free_irq(&pdev->dev, ndev->irq, ndev); moxart_mac_free_memory(ndev); free_netdev(ndev); diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index acf3f11e38cc..68d2f31921ff 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -247,13 +247,15 @@ static int jazz_sonic_probe(struct platform_device *pdev) goto out; err = register_netdev(dev); if (err) - goto out1; + goto undo_probe1; printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq); return 0; -out1: +undo_probe1: + dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); release_mem_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 8b63c9d183a2..c677b69bbb0b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -400,7 +400,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, u8 xmit_type; u16 idx; u16 hlen; - bool data_split; + bool data_split = false; /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 7f7aea9758e7..75ac5cc2fc23 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3609,7 +3609,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) ahw->diag_cnt = 0; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); if (ret) - goto fail_diag_irq; + goto fail_mbx_args; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[0].id; @@ -3639,6 +3639,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) done: qlcnic_free_mbx_args(&cmd); + +fail_mbx_args: qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_irq: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 98042a3701b5..621eac53ab01 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -2220,7 +2220,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) if (!opcode) return; - ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0])); + ring = QLCNIC_FETCH_RING_ID(sts_data[0]); qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data); desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index e5ea8e972b91..5174e0bd75d1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1419,6 +1419,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; struct pci_dev *pdev = adapter->pdev; bool extended = false; + int ret; prev_version = adapter->fw_version; current_version = qlcnic_83xx_get_fw_version(adapter); @@ -1429,8 +1430,11 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) extended = !qlcnic_83xx_extend_md_capab(adapter); - if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) - dev_info(&pdev->dev, "Supports FW dump capability\n"); + ret = qlcnic_fw_cmd_get_minidump_temp(adapter); + if (ret) + return; + + dev_info(&pdev->dev, "Supports FW dump capability\n"); /* Once we have minidump template with extended iSCSI dump * capability, update the minidump capture mask to 0x1f as diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index fedfd94699cb..5b6320f9c935 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1528,6 +1528,8 @@ static int ravb_close(struct net_device *ndev) priv->phydev = NULL; } + if (priv->chip_id == RCAR_GEN3) + free_irq(priv->emac_irq, ndev); free_irq(ndev->irq, ndev); napi_disable(&priv->napi[RAVB_NC]); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 6dcd436e6e32..e289cb47e6ab 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1304,13 +1304,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) } #if BITS_PER_LONG == 64 + BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); mask[0] = raw_mask[0]; mask[1] = raw_mask[1]; #else + BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); mask[0] = raw_mask[0] & 0xffffffff; mask[1] = raw_mask[0] >> 32; mask[2] = raw_mask[1] & 0xffffffff; - mask[3] = raw_mask[1] >> 32; #endif } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index a3c42a376741..167ccb27e2f1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -479,6 +479,9 @@ efx_copy_channel(const struct efx_channel *old_channel) *channel = *old_channel; channel->napi_dev = NULL; + INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); + channel->napi_str.napi_id = 0; + channel->napi_str.state = 0; memset(&channel->eventq, 0, sizeof(channel->eventq)); for (j = 0; j < EFX_TXQ_TYPES; j++) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index b1e5f24708c9..05e46a82cdb1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -53,7 +53,17 @@ static int dwmac_generic_probe(struct platform_device *pdev) return ret; } - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_exit; + + return 0; + +err_exit: + if (plat_dat->exit) + plat_dat->exit(pdev, plat_dat->bsp_priv); + + return ret; } static const struct of_device_id dwmac_generic_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 68a58333bd74..f2f24f99d086 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -600,7 +600,16 @@ static int rk_gmac_probe(struct platform_device *pdev) if (ret) return ret; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_gmac_exit; + + return 0; + +err_gmac_exit: + rk_gmac_exit(pdev, plat_dat->bsp_priv); + + return ret; } static const struct of_device_id rk_gmac_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 58c05acc2aab..a1ce018bf844 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -365,7 +365,16 @@ static int sti_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_dwmac_exit; + + return 0; + +err_dwmac_exit: + sti_dwmac_exit(pdev, plat_dat->bsp_priv); + + return ret; } static const struct sti_dwmac_of_data stih4xx_dwmac_data = { diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 062bce9acde6..bfe7b55f9714 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4980,7 +4980,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cas_cacheline_size)) { dev_err(&pdev->dev, "Could not set PCI cache " "line size\n"); - goto err_write_cacheline; + goto err_out_free_res; } } #endif @@ -5151,7 +5151,6 @@ err_out_iounmap: err_out_free_res: pci_release_regions(pdev); -err_write_cacheline: /* Try to restore it in case the error occurred after we * set it. */ diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index e9cc61e1ec74..7b0dfdced517 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -154,9 +154,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) } dev = bus_find_device(&platform_bus_type, NULL, node, match); + of_node_put(node); priv = dev_get_drvdata(dev); priv->cpsw_phy_sel(priv, phy_mode, slave); + + put_device(dev); } EXPORT_SYMBOL_GPL(cpsw_phy_sel); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 9a9cb6b11e4c..6ee0bd72d89b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2060,7 +2060,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, slave_data->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); parp = of_get_property(slave_node, "phy_id", &lenp); - if (of_phy_is_fixed_link(slave_node)) { + if (slave_data->phy_node) { + dev_dbg(&pdev->dev, + "slave[%d] using phy-handle=\"%s\"\n", + i, slave_data->phy_node->full_name); + } else if (of_phy_is_fixed_link(slave_node)) { struct device_node *phy_node; struct phy_device *phy_dev; @@ -2097,7 +2101,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, PHY_ID_FMT, mdio->name, phyid); put_device(&mdio->dev); } else { - dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i); + dev_err(&pdev->dev, + "No slave[%d] phy_id, phy-handle, or fixed-link property\n", + i); goto no_phy_slave; } slave_data->phy_if = of_get_phy_mode(slave_node); @@ -2526,12 +2532,14 @@ static int cpsw_probe(struct platform_device *pdev) ret = cpsw_probe_dual_emac(pdev, priv); if (ret) { cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); - goto clean_ale_ret; + goto clean_unregister_netdev_ret; } } return 0; +clean_unregister_netdev_ret: + unregister_netdev(ndev); clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 8ecb24186b7f..e4c4747bdf32 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) /* TODO: Add phy read and write and private statistics get feature */ - return phy_mii_ioctl(priv->phydev, ifrq, cmd); + if (priv->phydev) + return phy_mii_ioctl(priv->phydev, ifrq, cmd); + else + return -EOPNOTSUPP; } static int match_first_device(struct device *dev, void *data) @@ -1885,8 +1888,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) pdata->hw_ram_addr = auxdata->hw_ram_addr; } - pdev->dev.platform_data = pdata; - return pdata; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 7f1a57bb2ab1..44870fc37f54 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1602,9 +1602,9 @@ static int axienet_probe(struct platform_device *pdev) /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); - if (IS_ERR(np)) { + if (!np) { dev_err(&pdev->dev, "could not find DMA node\n"); - ret = PTR_ERR(np); + ret = -ENODEV; goto free_netdev; } ret = of_address_to_resource(np, 0, &dmares); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index f0961cbaf87e..ec13e2ae6d16 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -781,7 +781,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->daddr = geneve->remote.sin6.sin6_addr; } - if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { + dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, + NULL); + if (IS_ERR(dst)) { netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); return ERR_PTR(-ENETUNREACH); } @@ -1340,6 +1342,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; + LIST_HEAD(list_kill); int err; memset(tb, 0, sizeof(tb)); @@ -1350,8 +1353,10 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, err = geneve_configure(net, dev, &geneve_remote_unspec, 0, 0, 0, htons(dst_port), true); - if (err) - goto err; + if (err) { + free_netdev(dev); + return ERR_PTR(err); + } /* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. @@ -1360,10 +1365,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, if (err) goto err; + err = rtnl_configure_link(dev, NULL); + if (err < 0) + goto err; + return dev; err: - free_netdev(dev); + geneve_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d5d4d109ee10..0c4e1ef80355 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -305,6 +305,8 @@ static void macvlan_process_broadcast(struct work_struct *w) rcu_read_unlock(); + if (src) + dev_put(src->dev); kfree_skb(skb); cond_resched(); @@ -312,6 +314,7 @@ static void macvlan_process_broadcast(struct work_struct *w) } static void macvlan_broadcast_enqueue(struct macvlan_port *port, + const struct macvlan_dev *src, struct sk_buff *skb) { struct sk_buff *nskb; @@ -321,8 +324,12 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, if (!nskb) goto err; + MACVLAN_SKB_CB(nskb)->src = src; + spin_lock(&port->bc_queue.lock); if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { + if (src) + dev_hold(src->dev); __skb_queue_tail(&port->bc_queue, nskb); err = 0; } @@ -432,8 +439,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) goto out; } - MACVLAN_SKB_CB(skb)->src = src; - macvlan_broadcast_enqueue(port, skb); + macvlan_broadcast_enqueue(port, src, skb); return RX_HANDLER_PASS; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index ed96fdefd8e5..3a76ca395103 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -373,7 +373,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) goto wake_up; } - kfree_skb(skb); + consume_skb(skb); while (segs) { struct sk_buff *nskb = segs->next; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 37333d38b576..f88e7cc813ef 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -198,7 +198,7 @@ static int at803x_probe(struct phy_device *phydev) if (!priv) return -ENOMEM; - gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod_reset)) return PTR_ERR(gpiod_reset); @@ -274,10 +274,10 @@ static void at803x_link_change_notify(struct phy_device *phydev) at803x_context_save(phydev, &context); - gpiod_set_value(priv->gpiod_reset, 0); - msleep(1); gpiod_set_value(priv->gpiod_reset, 1); msleep(1); + gpiod_set_value(priv->gpiod_reset, 0); + msleep(1); at803x_context_restore(phydev, &context); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index bffa70e46202..b7bc27a89454 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -270,7 +270,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev) phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555); /* reset shadow mode 2 */ - ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0); + ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, MII_BCM7XXX_SHD_MODE_2); if (ret < 0) return ret; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index e6f564d50663..847c9fc10f9a 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1107,7 +1107,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus) goto out; } dp83640_clock_init(clock, bus); - list_add_tail(&phyter_clocks, &clock->list); + list_add_tail(&clock->list, &phyter_clocks); out: mutex_unlock(&phyter_clocks_lock); diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index afd76e07088b..0e8dd446e8c1 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -134,6 +134,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev) } dev_info(&pdev->dev, "no regulator found\n"); + data->regulator = NULL; } else { ret = regulator_enable(data->regulator); if (ret) @@ -149,7 +150,8 @@ static int sun4i_mdio_probe(struct platform_device *pdev) return 0; err_out_disable_regulator: - regulator_disable(data->regulator); + if (data->regulator) + regulator_disable(data->regulator); err_out_free_mdiobus: mdiobus_free(bus); return ret; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index ba84fc3637b1..d31e944b9c24 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -482,9 +482,17 @@ static int ksz9031_config_init(struct phy_device *phydev) "txd2-skew-ps", "txd3-skew-ps" }; static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"}; + const struct device *dev_walker; - if (!of_node && dev->parent->of_node) - of_node = dev->parent->of_node; + /* The Micrel driver has a deprecated option to place phy OF + * properties in the MAC node. Walk up the tree of devices to + * find a device with an OF node. + */ + dev_walker = &phydev->dev; + do { + of_node = dev_walker->of_node; + dev_walker = dev_walker->parent; + } while (!of_node && dev_walker); if (of_node) { ksz9031_of_load_skew_values(phydev, of_node, @@ -584,6 +592,21 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, { } +static int kszphy_resume(struct phy_device *phydev) +{ + int value; + + mutex_lock(&phydev->lock); + + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + + kszphy_config_intr(phydev); + mutex_unlock(&phydev->lock); + + return 0; +} + static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; @@ -775,7 +798,7 @@ static struct phy_driver ksphy_driver[] = { .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .suspend = genphy_suspend, - .resume = genphy_resume, + .resume = kszphy_resume, .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8061, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7d2cf015c5e7..8d09d21f4cbf 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -699,25 +699,29 @@ void phy_change(struct work_struct *work) struct phy_device *phydev = container_of(work, struct phy_device, phy_queue); - if (phydev->drv->did_interrupt && - !phydev->drv->did_interrupt(phydev)) - goto ignore; + if (phy_interrupt_is_valid(phydev)) { + if (phydev->drv->did_interrupt && + !phydev->drv->did_interrupt(phydev)) + goto ignore; - if (phy_disable_interrupts(phydev)) - goto phy_err; + if (phy_disable_interrupts(phydev)) + goto phy_err; + } mutex_lock(&phydev->lock); if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) phydev->state = PHY_CHANGELINK; mutex_unlock(&phydev->lock); - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); + if (phy_interrupt_is_valid(phydev)) { + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); - /* Reenable interrupts */ - if (PHY_HALTED != phydev->state && - phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) - goto irq_enable_err; + /* Reenable interrupts */ + if (PHY_HALTED != phydev->state && + phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) + goto irq_enable_err; + } /* reschedule state queue work to run as soon as possible */ cancel_delayed_work_sync(&phydev->state_queue); @@ -1021,9 +1025,10 @@ void phy_state_machine(struct work_struct *work) void phy_mac_interrupt(struct phy_device *phydev, int new_link) { - cancel_work_sync(&phydev->phy_queue); phydev->link = new_link; - schedule_work(&phydev->phy_queue); + + /* Trigger a state machine change */ + queue_work(system_power_efficient_wq, &phydev->phy_queue); } EXPORT_SYMBOL(phy_mac_interrupt); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b15eceb8b442..3b2b853ee3d3 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -522,6 +522,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id, phydev = to_phy_device(d); rc = phy_connect_direct(dev, phydev, handler, interface); + put_device(d); if (rc) return ERR_PTR(rc); @@ -721,6 +722,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, phydev = to_phy_device(d); rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); + put_device(d); if (rc) return ERR_PTR(rc); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index d3d59122a357..27fd5640a273 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -71,41 +71,6 @@ struct pcpu_dstats { struct u64_stats_sync syncp; }; -static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie) -{ - return dst; -} - -static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - return ip_local_out(net, sk, skb); -} - -static unsigned int vrf_v4_mtu(const struct dst_entry *dst) -{ - /* TO-DO: return max ethernet size? */ - return dst->dev->mtu; -} - -static void vrf_dst_destroy(struct dst_entry *dst) -{ - /* our dst lives forever - or until the device is closed */ -} - -static unsigned int vrf_default_advmss(const struct dst_entry *dst) -{ - return 65535 - 40; -} - -static struct dst_ops vrf_dst_ops = { - .family = AF_INET, - .local_out = vrf_ip_local_out, - .check = vrf_ip_check, - .mtu = vrf_v4_mtu, - .destroy = vrf_dst_destroy, - .default_advmss = vrf_default_advmss, -}; - /* neighbor handling is done with actual device; do not want * to flip skb->dev for those ndisc packets. This really fails * for multiple next protocols (e.g., NEXTHDR_HOP). But it is @@ -363,46 +328,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) } #if IS_ENABLED(CONFIG_IPV6) -static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie) -{ - return dst; -} - -static struct dst_ops vrf_dst_ops6 = { - .family = AF_INET6, - .local_out = ip6_local_out, - .check = vrf_ip6_check, - .mtu = vrf_v4_mtu, - .destroy = vrf_dst_destroy, - .default_advmss = vrf_default_advmss, -}; - -static int init_dst_ops6_kmem_cachep(void) -{ - vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache", - sizeof(struct rt6_info), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - - if (!vrf_dst_ops6.kmem_cachep) - return -ENOMEM; - - return 0; -} - -static void free_dst_ops6_kmem_cachep(void) -{ - kmem_cache_destroy(vrf_dst_ops6.kmem_cachep); -} - -static int vrf_input6(struct sk_buff *skb) -{ - skb->dev->stats.rx_errors++; - kfree_skb(skb); - return 0; -} - /* modelled after ip6_finish_output2 */ static int vrf_finish_output6(struct net *net, struct sock *sk, struct sk_buff *skb) @@ -445,67 +370,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } -static void vrf_rt6_destroy(struct net_vrf *vrf) +static void vrf_rt6_release(struct net_vrf *vrf) { - dst_destroy(&vrf->rt6->dst); - free_percpu(vrf->rt6->rt6i_pcpu); + dst_release(&vrf->rt6->dst); vrf->rt6 = NULL; } static int vrf_rt6_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct dst_entry *dst; + struct net *net = dev_net(dev); struct rt6_info *rt6; - int cpu; int rc = -ENOMEM; - rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, - DST_OBSOLETE_NONE, - (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); + rt6 = ip6_dst_alloc(net, dev, + DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); if (!rt6) goto out; - dst = &rt6->dst; - - rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL); - if (!rt6->rt6i_pcpu) { - dst_destroy(dst); - goto out; - } - for_each_possible_cpu(cpu) { - struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu); - *p = NULL; - } - - memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst)); - - INIT_LIST_HEAD(&rt6->rt6i_siblings); - INIT_LIST_HEAD(&rt6->rt6i_uncached); - - rt6->dst.input = vrf_input6; rt6->dst.output = vrf_output6; - - rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); - - atomic_set(&rt6->dst.__refcnt, 2); - + rt6->rt6i_table = fib6_get_table(net, vrf->tb_id); + dst_hold(&rt6->dst); vrf->rt6 = rt6; rc = 0; out: return rc; } #else -static int init_dst_ops6_kmem_cachep(void) -{ - return 0; -} - -static void free_dst_ops6_kmem_cachep(void) -{ -} - -static void vrf_rt6_destroy(struct net_vrf *vrf) +static void vrf_rt6_release(struct net_vrf *vrf) { } @@ -577,11 +469,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) !(IPCB(skb)->flags & IPSKB_REROUTED)); } -static void vrf_rtable_destroy(struct net_vrf *vrf) +static void vrf_rtable_release(struct net_vrf *vrf) { struct dst_entry *dst = (struct dst_entry *)vrf->rth; - dst_destroy(dst); + dst_release(dst); vrf->rth = NULL; } @@ -590,22 +482,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev) struct net_vrf *vrf = netdev_priv(dev); struct rtable *rth; - rth = dst_alloc(&vrf_dst_ops, dev, 2, - DST_OBSOLETE_NONE, - (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); + rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); if (rth) { rth->dst.output = vrf_output; - rth->rt_genid = rt_genid_ipv4(dev_net(dev)); - rth->rt_flags = 0; - rth->rt_type = RTN_UNICAST; - rth->rt_is_input = 0; - rth->rt_iif = 0; - rth->rt_pmtu = 0; - rth->rt_gateway = 0; - rth->rt_uses_gateway = 0; rth->rt_table_id = vrf->tb_id; - INIT_LIST_HEAD(&rth->rt_uncached); - rth->rt_uncached_list = NULL; } return rth; @@ -739,8 +619,8 @@ static void vrf_dev_uninit(struct net_device *dev) // struct list_head *head = &queue->all_slaves; // struct slave *slave, *next; - vrf_rtable_destroy(vrf); - vrf_rt6_destroy(vrf); + vrf_rtable_release(vrf); + vrf_rt6_release(vrf); // list_for_each_entry_safe(slave, next, head, list) // vrf_del_slave(dev, slave->dev); @@ -772,7 +652,7 @@ static int vrf_dev_init(struct net_device *dev) return 0; out_rth: - vrf_rtable_destroy(vrf); + vrf_rtable_release(vrf); out_stats: free_percpu(dev->dstats); dev->dstats = NULL; @@ -805,7 +685,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev, struct net_vrf *vrf = netdev_priv(dev); rth = vrf->rth; - atomic_inc(&rth->dst.__refcnt); + dst_hold(&rth->dst); } return rth; @@ -856,7 +736,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, struct net_vrf *vrf = netdev_priv(dev); rt = vrf->rt6; - atomic_inc(&rt->dst.__refcnt); + dst_hold(&rt->dst); } return (struct dst_entry *)rt; @@ -1003,19 +883,6 @@ static int __init vrf_init_module(void) { int rc; - vrf_dst_ops.kmem_cachep = - kmem_cache_create("vrf_ip_dst_cache", - sizeof(struct rtable), 0, - SLAB_HWCACHE_ALIGN, - NULL); - - if (!vrf_dst_ops.kmem_cachep) - return -ENOMEM; - - rc = init_dst_ops6_kmem_cachep(); - if (rc != 0) - goto error2; - register_netdevice_notifier(&vrf_notifier_block); rc = rtnl_link_register(&vrf_link_ops); @@ -1026,22 +893,10 @@ static int __init vrf_init_module(void) error: unregister_netdevice_notifier(&vrf_notifier_block); - free_dst_ops6_kmem_cachep(); -error2: - kmem_cache_destroy(vrf_dst_ops.kmem_cachep); return rc; } -static void __exit vrf_cleanup_module(void) -{ - rtnl_link_unregister(&vrf_link_ops); - unregister_netdevice_notifier(&vrf_notifier_block); - kmem_cache_destroy(vrf_dst_ops.kmem_cachep); - free_dst_ops6_kmem_cachep(); -} - module_init(vrf_init_module); -module_exit(vrf_cleanup_module); MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d294949005bd..d6ae6d3c98ed 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1864,7 +1864,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, { struct dst_entry *ndst; struct flowi6 fl6; - int err; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; @@ -1873,11 +1872,10 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; - err = ipv6_stub->ipv6_dst_lookup(vxlan->net, - vxlan->vn6_sock->sock->sk, - &ndst, &fl6); - if (err < 0) - return ERR_PTR(err); + ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, vxlan->vn6_sock->sock->sk, + &fl6, NULL); + if (unlikely(IS_ERR(ndst))) + return ERR_PTR(-ENETUNREACH); *saddr = fl6.saddr; return ndst; @@ -2054,7 +2052,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } /* Bypass encapsulation if the destination is local */ - if (rt->rt_flags & RTCF_LOCAL && + if (!info && rt->rt_flags & RTCF_LOCAL && !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; @@ -2112,7 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, /* Bypass encapsulation if the destination is local */ rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; - if (rt6i_flags & RTF_LOCAL && + if (!info && rt6i_flags & RTF_LOCAL && !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; @@ -2927,30 +2925,6 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, return 0; } -struct net_device *vxlan_dev_create(struct net *net, const char *name, - u8 name_assign_type, struct vxlan_config *conf) -{ - struct nlattr *tb[IFLA_MAX+1]; - struct net_device *dev; - int err; - - memset(&tb, 0, sizeof(tb)); - - dev = rtnl_create_link(net, name, name_assign_type, - &vxlan_link_ops, tb); - if (IS_ERR(dev)) - return dev; - - err = vxlan_dev_configure(net, dev, conf); - if (err < 0) { - free_netdev(dev); - return ERR_PTR(err); - } - - return dev; -} -EXPORT_SYMBOL_GPL(vxlan_dev_create); - static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { @@ -3218,6 +3192,40 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = { .get_link_net = vxlan_get_link_net, }; +struct net_device *vxlan_dev_create(struct net *net, const char *name, + u8 name_assign_type, + struct vxlan_config *conf) +{ + struct nlattr *tb[IFLA_MAX + 1]; + struct net_device *dev; + int err; + + memset(&tb, 0, sizeof(tb)); + + dev = rtnl_create_link(net, name, name_assign_type, + &vxlan_link_ops, tb); + if (IS_ERR(dev)) + return dev; + + err = vxlan_dev_configure(net, dev, conf); + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } + + err = rtnl_configure_link(dev, NULL); + if (err < 0) { + LIST_HEAD(list_kill); + + vxlan_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); + return ERR_PTR(err); + } + + return dev; +} +EXPORT_SYMBOL_GPL(vxlan_dev_create); + static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, struct net_device *dev) { diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c index e74664b84925..4e4167976acf 100644 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ b/drivers/net/wimax/i2400m/usb-fw.c @@ -354,6 +354,7 @@ out: usb_autopm_put_interface(i2400mu->usb_iface); d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n", i2400m, ack, ack_size, (long) result); + usb_put_urb(¬if_urb); return result; error_exceeded: diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 2294709ee8b0..fd85f996c554 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -414,7 +414,7 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle, return; } - if (epid >= ENDPOINT_MAX) { + if (epid < 0 || epid >= ENDPOINT_MAX) { if (pipe_id != USB_REG_IN_PIPE) dev_kfree_skb_any(skb); else diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index 4029fbf0ce36..febc7d3e94c8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -2419,12 +2419,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, const u8 *mac, struct station_info *sinfo) { struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_scb_val_le scb_val; s32 err = 0; struct brcmf_sta_info_le sta_info_le; u32 sta_flags; u32 is_tdls_peer; s32 total_rssi; s32 count_rssi; + int rssi; u32 i; brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); @@ -2505,6 +2507,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); total_rssi /= count_rssi; sinfo->signal = total_rssi; + } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state)) { + memset(&scb_val, 0, sizeof(scb_val)); + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, + &scb_val, sizeof(scb_val)); + if (err) { + brcmf_err("Could not get rssi (%d)\n", err); + goto done; + } else { + rssi = le32_to_cpu(scb_val.val); + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->signal = rssi; + brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); + } } } done: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index 086cac3f86d6..7b120d841aed 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -2262,10 +2262,22 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked) { struct brcmf_fws_info *fws = drvr->fws; + struct brcmf_if *ifp; + int i; - fws->bus_flow_blocked = flow_blocked; - if (!flow_blocked) - brcmf_fws_schedule_deq(fws); - else - fws->stats.bus_flow_block++; + if (fws->avoid_queueing) { + for (i = 0; i < BRCMF_MAX_IFS; i++) { + ifp = drvr->iflist[i]; + if (!ifp || !ifp->ndev) + continue; + brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, + flow_blocked); + } + } else { + fws->bus_flow_blocked = flow_blocked; + if (!flow_blocked) + brcmf_fws_schedule_deq(fws); + else + fws->stats.bus_flow_block++; + } } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 6f7138cea555..f944f356d9c5 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -1155,6 +1155,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) brcmu_pkt_buf_free_skb(skb); return; } + + skb->protocol = eth_type_trans(skb, ifp->ndev); brcmf_netif_rx(ifp, skb); } diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index b340aa00b7f5..c2dbe0fd235e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -70,7 +70,7 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 -#define IWL7265_UCODE_API_MAX 19 +#define IWL7265_UCODE_API_MAX 17 #define IWL7265D_UCODE_API_MAX 19 /* Oldest version we won't warn about */ diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h index 48e549c3b285..347ba45f1f2a 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/mwifiex/pcie.h @@ -210,17 +210,17 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG, .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG, .tx_rdptr = 0xC1A4, - .tx_wrptr = 0xC1A8, - .rx_rdptr = 0xC1A8, + .tx_wrptr = 0xC174, + .rx_rdptr = 0xC174, .rx_wrptr = 0xC1A4, .evt_rdptr = PCIE_SCRATCH_10_REG, .evt_wrptr = PCIE_SCRATCH_11_REG, .drv_rdy = PCIE_SCRATCH_12_REG, .tx_start_ptr = 16, .tx_mask = 0x0FFF0000, - .tx_wrap_mask = 0x01FF0000, + .tx_wrap_mask = 0x1FFF0000, .rx_mask = 0x00000FFF, - .rx_wrap_mask = 0x000001FF, + .rx_wrap_mask = 0x00001FFF, .tx_rollover_ind = BIT(28), .rx_rollover_ind = BIT(12), .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND, @@ -342,6 +342,7 @@ mwifiex_pcie_txbd_empty(struct pcie_service_card *card, u32 rdptr) return 1; break; case PCIE_DEVICE_ID_MARVELL_88W8897: + case PCIE_DEVICE_ID_MARVELL_88W8997: if (((card->txbd_wrptr & reg->tx_mask) == (rdptr & reg->tx_mask)) && ((card->txbd_wrptr & reg->tx_rollover_ind) == diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index ff3ee9dfbbd5..23bae87d4d3d 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -607,11 +607,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_PS_AWAKE: mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); - if (!adapter->pps_uapsd_mode && priv->port_open && + if (!adapter->pps_uapsd_mode && + (priv->port_open || + (priv->bss_mode == NL80211_IFTYPE_ADHOC)) && priv->media_connected && adapter->sleep_period.period) { - adapter->pps_uapsd_mode = true; - mwifiex_dbg(adapter, EVENT, - "event: PPS/UAPSD mode activated\n"); + adapter->pps_uapsd_mode = true; + mwifiex_dbg(adapter, EVENT, + "event: PPS/UAPSD mode activated\n"); } adapter->tx_lock_flag = false; if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) { diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 3a2ecb6cf1c3..cad399221b61 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -475,7 +475,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) priv = adapter->priv[i]; if (!priv) continue; - if (!priv->port_open) + if (!priv->port_open && + (priv->bss_mode != NL80211_IFTYPE_ADHOC)) continue; if (adapter->if_ops.is_port_ready && !adapter->if_ops.is_port_ready(priv)) @@ -1109,7 +1110,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; - if (!priv_tmp->port_open || + if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) && + !priv_tmp->port_open) || (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)) continue; diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 957234272ef7..727eaf203463 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -443,9 +443,9 @@ static int btt_log_init(struct arena_info *arena) static int btt_freelist_init(struct arena_info *arena) { - int old, new, ret; + int new, ret; u32 i, map_entry; - struct log_entry log_new, log_old; + struct log_entry log_new; arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), GFP_KERNEL); @@ -453,10 +453,6 @@ static int btt_freelist_init(struct arena_info *arena) return -ENOMEM; for (i = 0; i < arena->nfree; i++) { - old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT); - if (old < 0) - return old; - new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); if (new < 0) return new; diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index a87a868fed64..2b1ccb806249 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -334,8 +334,11 @@ int of_phy_register_fixed_link(struct device_node *np) status.link = 1; status.duplex = of_property_read_bool(fixed_link_node, "full-duplex"); - if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) + if (of_property_read_u32(fixed_link_node, "speed", + &status.speed)) { + of_node_put(fixed_link_node); return -EINVAL; + } status.pause = of_property_read_bool(fixed_link_node, "pause"); status.asym_pause = of_property_read_bool(fixed_link_node, "asym-pause"); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 6ac6618c1c10..ac9c1172c84a 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1027,6 +1027,9 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, if (i >= PCI_ROM_RESOURCE) return -ENODEV; + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) + return -EINVAL; + if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, @@ -1043,10 +1046,6 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, pci_resource_to_user(pdev, i, res, &start, &end); vma->vm_pgoff += start >> PAGE_SHIFT; mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; - - if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start)) - return -EINVAL; - return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); } diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 17dd8fe12b54..4ae15edde037 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -795,7 +795,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev, return 0; out: - kfree(maps); + bcm2835_pctl_dt_free_map(pctldev, maps, num_pins * maps_per_pin); return err; } diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c index a30e967d75c2..d3d1dceaec2d 100644 --- a/drivers/pinctrl/pinctrl-tegra.c +++ b/drivers/pinctrl/pinctrl-tegra.c @@ -418,7 +418,7 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx, return -ENOTSUPP; } - if (*reg < 0 || *bit > 31) { + if (*reg < 0 || *bit < 0) { if (report_err) { const char *prop = "unknown"; int i; diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index f5585740a765..95121bff2d3e 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -449,23 +449,22 @@ static acpi_status alienware_hdmi_command(struct hdmi_args *in_args, input.length = (acpi_size) sizeof(*in_args); input.pointer = in_args; - if (out_data != NULL) { + if (out_data) { output.length = ACPI_ALLOCATE_BUFFER; output.pointer = NULL; status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1, command, &input, &output); - } else + if (ACPI_SUCCESS(status)) { + obj = (union acpi_object *)output.pointer; + if (obj && obj->type == ACPI_TYPE_INTEGER) + *out_data = (u32)obj->integer.value; + } + kfree(output.pointer); + } else { status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1, command, &input, NULL); - - if (ACPI_SUCCESS(status) && out_data != NULL) { - obj = (union acpi_object *)output.pointer; - if (obj && obj->type == ACPI_TYPE_INTEGER) - *out_data = (u32) obj->integer.value; } - kfree(output.pointer); return status; - } static ssize_t show_hdmi_cable(struct device *dev, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index cccf250cd1e3..ee64c9512a3a 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -551,9 +551,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { .detect_quirks = asus_nb_wmi_quirks, }; +static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = { + { + /* + * asus-nb-wm adds no functionality. The T100TA has a detachable + * USB kbd, so no hotkeys and it has no WMI rfkill; and loading + * asus-nb-wm causes the camera LED to turn and _stay_ on. + */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), + }, + }, + { + /* The Asus T200TA has the same issue as the T100TA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"), + }, + }, + {} /* Terminating entry */ +}; static int __init asus_nb_wmi_init(void) { + if (dmi_check_system(asus_nb_wmi_blacklist)) + return -ENODEV; + return asus_wmi_register_driver(&asus_nb_wmi_driver); } diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c index 6c3a447f378b..286122df3e01 100644 --- a/drivers/power/bq27xxx_battery.c +++ b/drivers/power/bq27xxx_battery.c @@ -198,10 +198,10 @@ static u8 bq27500_regs[] = { INVALID_REG_ADDR, /* TTECP - NA */ 0x0c, /* NAC */ 0x12, /* LMD(FCC) */ - 0x1e, /* CYCT */ + 0x2a, /* CYCT */ INVALID_REG_ADDR, /* AE - NA */ - 0x20, /* SOC(RSOC) */ - 0x2e, /* DCAP(ILMD) */ + 0x2c, /* SOC(RSOC) */ + 0x3c, /* DCAP(ILMD) */ INVALID_REG_ADDR, /* AP - NA */ }; @@ -242,7 +242,7 @@ static u8 bq27541_regs[] = { INVALID_REG_ADDR, /* AE - NA */ 0x2c, /* SOC(RSOC) */ 0x3c, /* DCAP */ - 0x76, /* AP */ + 0x24, /* AP */ }; static u8 bq27545_regs[] = { @@ -471,7 +471,10 @@ static int bq27xxx_battery_read_soc(struct bq27xxx_device_info *di) { int soc; - soc = bq27xxx_read(di, BQ27XXX_REG_SOC, false); + if (di->chip == BQ27000 || di->chip == BQ27010) + soc = bq27xxx_read(di, BQ27XXX_REG_SOC, true); + else + soc = bq27xxx_read(di, BQ27XXX_REG_SOC, false); if (soc < 0) dev_dbg(di->dev, "error reading State-of-Charge\n"); @@ -536,7 +539,10 @@ static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di) { int dcap; - dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, false); + if (di->chip == BQ27000 || di->chip == BQ27010) + dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, true); + else + dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, false); if (dcap < 0) { dev_dbg(di->dev, "error reading initial last measured discharge\n"); @@ -544,7 +550,7 @@ static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di) } if (di->chip == BQ27000 || di->chip == BQ27010) - dcap *= BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS; + dcap = (dcap << 8) * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS; else dcap *= 1000; diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c index f03014ea1dc4..65e9921c5a11 100644 --- a/drivers/power/ipaq_micro_battery.c +++ b/drivers/power/ipaq_micro_battery.c @@ -261,7 +261,7 @@ static int micro_batt_probe(struct platform_device *pdev) return 0; ac_err: - power_supply_unregister(micro_ac_power); + power_supply_unregister(micro_batt_power); batt_err: cancel_delayed_work_sync(&mb->update); destroy_workqueue(mb->wq); diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c index 83c42ea88f2b..57246cdbd042 100644 --- a/drivers/power/test_power.c +++ b/drivers/power/test_power.c @@ -301,6 +301,8 @@ static int map_get_value(struct battery_property_map *map, const char *key, buf[MAX_KEYLENGTH-1] = '\0'; cr = strnlen(buf, MAX_KEYLENGTH) - 1; + if (cr < 0) + return def_val; if (buf[cr] == '\n') buf[cr] = '\0'; diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c index 040a40b4b173..4c56e54af6ac 100644 --- a/drivers/power/tps65217_charger.c +++ b/drivers/power/tps65217_charger.c @@ -197,6 +197,7 @@ static int tps65217_charger_probe(struct platform_device *pdev) { struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); struct tps65217_charger *charger; + struct power_supply_config cfg = {}; int ret; dev_dbg(&pdev->dev, "%s\n", __func__); @@ -209,9 +210,12 @@ static int tps65217_charger_probe(struct platform_device *pdev) charger->tps = tps; charger->dev = &pdev->dev; + cfg.of_node = pdev->dev.of_node; + cfg.drv_data = charger; + charger->ac = devm_power_supply_register(&pdev->dev, &tps65217_charger_desc, - NULL); + &cfg); if (IS_ERR(charger->ac)) { dev_err(&pdev->dev, "failed: power supply register\n"); return PTR_ERR(charger->ac); diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 60a5e0c63a13..efe68b13704d 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -171,10 +171,11 @@ static struct posix_clock_operations ptp_clock_ops = { .read = ptp_read, }; -static void delete_ptp_clock(struct posix_clock *pc) +static void ptp_clock_release(struct device *dev) { - struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); + ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); @@ -205,7 +206,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } ptp->clock.ops = ptp_clock_ops; - ptp->clock.release = delete_ptp_clock; ptp->info = info; ptp->devid = MKDEV(major, index); ptp->index = index; @@ -214,17 +214,9 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, mutex_init(&ptp->pincfg_mux); init_waitqueue_head(&ptp->tsev_wq); - /* Create a new device in our class. */ - ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp, - "ptp%d", ptp->index); - if (IS_ERR(ptp->dev)) - goto no_device; - - dev_set_drvdata(ptp->dev, ptp); - - err = ptp_populate_sysfs(ptp); + err = ptp_populate_pin_groups(ptp); if (err) - goto no_sysfs; + goto no_pin_groups; /* Register a new PPS source. */ if (info->pps) { @@ -235,13 +227,24 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, pps.owner = info->owner; ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS); if (!ptp->pps_source) { + err = -EINVAL; pr_err("failed to register pps source\n"); goto no_pps; } } - /* Create a posix clock. */ - err = posix_clock_register(&ptp->clock, ptp->devid); + /* Initialize a new device of our class in our clock structure. */ + device_initialize(&ptp->dev); + ptp->dev.devt = ptp->devid; + ptp->dev.class = ptp_class; + ptp->dev.parent = parent; + ptp->dev.groups = ptp->pin_attr_groups; + ptp->dev.release = ptp_clock_release; + dev_set_drvdata(&ptp->dev, ptp); + dev_set_name(&ptp->dev, "ptp%d", ptp->index); + + /* Create a posix clock and link it to the device. */ + err = posix_clock_register(&ptp->clock, &ptp->dev); if (err) { pr_err("failed to create posix clock\n"); goto no_clock; @@ -253,10 +256,8 @@ no_clock: if (ptp->pps_source) pps_unregister_source(ptp->pps_source); no_pps: - ptp_cleanup_sysfs(ptp); -no_sysfs: - device_destroy(ptp_class, ptp->devid); -no_device: + ptp_cleanup_pin_groups(ptp); +no_pin_groups: mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); no_slot: @@ -274,10 +275,9 @@ int ptp_clock_unregister(struct ptp_clock *ptp) /* Release the clock's resources. */ if (ptp->pps_source) pps_unregister_source(ptp->pps_source); - ptp_cleanup_sysfs(ptp); - device_destroy(ptp_class, ptp->devid); posix_clock_unregister(&ptp->clock); + return 0; } EXPORT_SYMBOL(ptp_clock_unregister); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 9c5d41421b65..15346e840caa 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -40,7 +40,7 @@ struct timestamp_event_queue { struct ptp_clock { struct posix_clock clock; - struct device *dev; + struct device dev; struct ptp_clock_info *info; dev_t devid; int index; /* index into clocks.map */ @@ -54,6 +54,8 @@ struct ptp_clock { struct device_attribute *pin_dev_attr; struct attribute **pin_attr; struct attribute_group pin_attr_group; + /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ + const struct attribute_group *pin_attr_groups[2]; }; /* @@ -94,8 +96,7 @@ uint ptp_poll(struct posix_clock *pc, extern const struct attribute_group *ptp_groups[]; -int ptp_cleanup_sysfs(struct ptp_clock *ptp); - -int ptp_populate_sysfs(struct ptp_clock *ptp); +int ptp_populate_pin_groups(struct ptp_clock *ptp); +void ptp_cleanup_pin_groups(struct ptp_clock *ptp); #endif diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 302e626fe6b0..731d0423c8aa 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -46,27 +46,6 @@ PTP_SHOW_INT(n_periodic_outputs, n_per_out); PTP_SHOW_INT(n_programmable_pins, n_pins); PTP_SHOW_INT(pps_available, pps); -static struct attribute *ptp_attrs[] = { - &dev_attr_clock_name.attr, - &dev_attr_max_adjustment.attr, - &dev_attr_n_alarms.attr, - &dev_attr_n_external_timestamps.attr, - &dev_attr_n_periodic_outputs.attr, - &dev_attr_n_programmable_pins.attr, - &dev_attr_pps_available.attr, - NULL, -}; - -static const struct attribute_group ptp_group = { - .attrs = ptp_attrs, -}; - -const struct attribute_group *ptp_groups[] = { - &ptp_group, - NULL, -}; - - static ssize_t extts_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -91,6 +70,7 @@ static ssize_t extts_enable_store(struct device *dev, out: return err; } +static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store); static ssize_t extts_fifo_show(struct device *dev, struct device_attribute *attr, char *page) @@ -124,6 +104,7 @@ out: mutex_unlock(&ptp->tsevq_mux); return cnt; } +static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL); static ssize_t period_store(struct device *dev, struct device_attribute *attr, @@ -151,6 +132,7 @@ static ssize_t period_store(struct device *dev, out: return err; } +static DEVICE_ATTR(period, 0220, NULL, period_store); static ssize_t pps_enable_store(struct device *dev, struct device_attribute *attr, @@ -177,6 +159,57 @@ static ssize_t pps_enable_store(struct device *dev, out: return err; } +static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); + +static struct attribute *ptp_attrs[] = { + &dev_attr_clock_name.attr, + + &dev_attr_max_adjustment.attr, + &dev_attr_n_alarms.attr, + &dev_attr_n_external_timestamps.attr, + &dev_attr_n_periodic_outputs.attr, + &dev_attr_n_programmable_pins.attr, + &dev_attr_pps_available.attr, + + &dev_attr_extts_enable.attr, + &dev_attr_fifo.attr, + &dev_attr_period.attr, + &dev_attr_pps_enable.attr, + NULL +}; + +static umode_t ptp_is_attribute_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *info = ptp->info; + umode_t mode = attr->mode; + + if (attr == &dev_attr_extts_enable.attr || + attr == &dev_attr_fifo.attr) { + if (!info->n_ext_ts) + mode = 0; + } else if (attr == &dev_attr_period.attr) { + if (!info->n_per_out) + mode = 0; + } else if (attr == &dev_attr_pps_enable.attr) { + if (!info->pps) + mode = 0; + } + + return mode; +} + +static const struct attribute_group ptp_group = { + .is_visible = ptp_is_attribute_visible, + .attrs = ptp_attrs, +}; + +const struct attribute_group *ptp_groups[] = { + &ptp_group, + NULL +}; static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name) { @@ -235,40 +268,14 @@ static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store); -static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL); -static DEVICE_ATTR(period, 0220, NULL, period_store); -static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); - -int ptp_cleanup_sysfs(struct ptp_clock *ptp) +int ptp_populate_pin_groups(struct ptp_clock *ptp) { - struct device *dev = ptp->dev; - struct ptp_clock_info *info = ptp->info; - - if (info->n_ext_ts) { - device_remove_file(dev, &dev_attr_extts_enable); - device_remove_file(dev, &dev_attr_fifo); - } - if (info->n_per_out) - device_remove_file(dev, &dev_attr_period); - - if (info->pps) - device_remove_file(dev, &dev_attr_pps_enable); - - if (info->n_pins) { - sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group); - kfree(ptp->pin_attr); - kfree(ptp->pin_dev_attr); - } - return 0; -} - -static int ptp_populate_pins(struct ptp_clock *ptp) -{ - struct device *dev = ptp->dev; struct ptp_clock_info *info = ptp->info; int err = -ENOMEM, i, n_pins = info->n_pins; + if (!n_pins) + return 0; + ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr), GFP_KERNEL); if (!ptp->pin_dev_attr) @@ -292,61 +299,18 @@ static int ptp_populate_pins(struct ptp_clock *ptp) ptp->pin_attr_group.name = "pins"; ptp->pin_attr_group.attrs = ptp->pin_attr; - err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group); - if (err) - goto no_group; + ptp->pin_attr_groups[0] = &ptp->pin_attr_group; + return 0; -no_group: - kfree(ptp->pin_attr); no_pin_attr: kfree(ptp->pin_dev_attr); no_dev_attr: return err; } -int ptp_populate_sysfs(struct ptp_clock *ptp) +void ptp_cleanup_pin_groups(struct ptp_clock *ptp) { - struct device *dev = ptp->dev; - struct ptp_clock_info *info = ptp->info; - int err; - - if (info->n_ext_ts) { - err = device_create_file(dev, &dev_attr_extts_enable); - if (err) - goto out1; - err = device_create_file(dev, &dev_attr_fifo); - if (err) - goto out2; - } - if (info->n_per_out) { - err = device_create_file(dev, &dev_attr_period); - if (err) - goto out3; - } - if (info->pps) { - err = device_create_file(dev, &dev_attr_pps_enable); - if (err) - goto out4; - } - if (info->n_pins) { - err = ptp_populate_pins(ptp); - if (err) - goto out5; - } - return 0; -out5: - if (info->pps) - device_remove_file(dev, &dev_attr_pps_enable); -out4: - if (info->n_per_out) - device_remove_file(dev, &dev_attr_period); -out3: - if (info->n_ext_ts) - device_remove_file(dev, &dev_attr_fifo); -out2: - if (info->n_ext_ts) - device_remove_file(dev, &dev_attr_extts_enable); -out1: - return err; + kfree(ptp->pin_attr); + kfree(ptp->pin_dev_attr); } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 188920367e84..07d44fe12f41 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1085,18 +1085,18 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = machine_constraints_voltage(rdev, rdev->constraints); if (ret != 0) - goto out; + return ret; ret = machine_constraints_current(rdev, rdev->constraints); if (ret != 0) - goto out; + return ret; if (rdev->constraints->ilim_uA && ops->set_input_current_limit) { ret = ops->set_input_current_limit(rdev, rdev->constraints->ilim_uA); if (ret < 0) { rdev_err(rdev, "failed to set input limit\n"); - goto out; + return ret; } } @@ -1105,21 +1105,20 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = suspend_prepare(rdev, rdev->constraints->initial_state); if (ret < 0) { rdev_err(rdev, "failed to set suspend state\n"); - goto out; + return ret; } } if (rdev->constraints->initial_mode) { if (!ops->set_mode) { rdev_err(rdev, "no set_mode operation\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } ret = ops->set_mode(rdev, rdev->constraints->initial_mode); if (ret < 0) { rdev_err(rdev, "failed to set initial mode: %d\n", ret); - goto out; + return ret; } } @@ -1130,7 +1129,7 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = _regulator_do_enable(rdev); if (ret < 0 && ret != -EINVAL) { rdev_err(rdev, "failed to enable\n"); - goto out; + return ret; } } @@ -1139,7 +1138,7 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay); if (ret < 0) { rdev_err(rdev, "failed to set ramp_delay\n"); - goto out; + return ret; } } @@ -1147,7 +1146,7 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = ops->set_pull_down(rdev); if (ret < 0) { rdev_err(rdev, "failed to set pull down\n"); - goto out; + return ret; } } @@ -1155,7 +1154,7 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = ops->set_soft_start(rdev); if (ret < 0) { rdev_err(rdev, "failed to set soft start\n"); - goto out; + return ret; } } @@ -1164,16 +1163,12 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = ops->set_over_current_protection(rdev); if (ret < 0) { rdev_err(rdev, "failed to set over current protection\n"); - goto out; + return ret; } } print_constraints(rdev); return 0; -out: - kfree(rdev->constraints); - rdev->constraints = NULL; - return ret; } /** @@ -4416,7 +4411,7 @@ unset_supplies: scrub: regulator_ena_gpio_free(rdev); - kfree(rdev->constraints); + wash: device_unregister(&rdev->dev); /* device core frees rdev */ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index a3aaef4c53a3..0d2bcb33697f 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1594,6 +1594,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; + unsigned long req_id = 0; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1616,6 +1617,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); req->data = wka_port; + req_id = req->req_id; + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); if (retval) @@ -1623,7 +1626,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) out: spin_unlock_irq(&qdio->req_q_lock); if (!retval) - zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); return retval; } @@ -1649,6 +1652,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; + unsigned long req_id = 0; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1671,6 +1675,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) req->data = wka_port; req->qtcb->header.port_handle = wka_port->handle; + req_id = req->req_id; + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); if (retval) @@ -1678,7 +1684,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) out: spin_unlock_irq(&qdio->req_q_lock); if (!retval) - zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); return retval; } diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 5eaf14c15590..59cb3af9a318 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -692,6 +692,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, { struct flowi6 fl; + memset(&fl, 0, sizeof(fl)); if (saddr) memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); if (daddr) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 0fdc8c417035..b4fbcf4cade8 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1982,7 +1982,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); - spin_lock(&session->frwd_lock); + spin_lock_bh(&session->frwd_lock); task = (struct iscsi_task *)sc->SCp.ptr; if (!task) { /* @@ -2109,7 +2109,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) done: if (task) task->last_timeout = jiffies; - spin_unlock(&session->frwd_lock); + spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "shutdown or nh"); return rc; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 41a646696bab..0772804dbc27 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -364,8 +364,8 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res) srb_t *sp = (srb_t *)ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; - del_timer(&sp->u.iocb_cmd.timer); - complete(&abt->u.abt.comp); + if (del_timer(&sp->u.iocb_cmd.timer)) + complete(&abt->u.abt.comp); } static int diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 354719a0e965..237628742ecf 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -706,8 +706,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; - if (__copy_from_user(cmnd, buf, cmd_size)) + if (__copy_from_user(cmnd, buf, cmd_size)) { + sg_remove_request(sfp, srp); return -EFAULT; + } /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 87a0e47eeae6..4edd38d03b93 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -180,9 +180,11 @@ static inline u32 rx_max(struct dw_spi *dws) static void dw_writer(struct dw_spi *dws) { - u32 max = tx_max(dws); + u32 max; u16 txw = 0; + spin_lock(&dws->buf_lock); + max = tx_max(dws); while (max--) { /* Set the tx word if the transfer's original "tx" is not null */ if (dws->tx_end - dws->len) { @@ -194,13 +196,16 @@ static void dw_writer(struct dw_spi *dws) dw_write_io_reg(dws, DW_SPI_DR, txw); dws->tx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void dw_reader(struct dw_spi *dws) { - u32 max = rx_max(dws); + u32 max; u16 rxw; + spin_lock(&dws->buf_lock); + max = rx_max(dws); while (max--) { rxw = dw_read_io_reg(dws, DW_SPI_DR); /* Care rx only if the transfer's original "rx" is not null */ @@ -212,6 +217,7 @@ static void dw_reader(struct dw_spi *dws) } dws->rx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void int_error_stop(struct dw_spi *dws, const char *msg) @@ -284,6 +290,7 @@ static int dw_spi_transfer_one(struct spi_master *master, { struct dw_spi *dws = spi_master_get_devdata(master); struct chip_data *chip = spi_get_ctldata(spi); + unsigned long flags; u8 imask = 0; u16 txlevel = 0; u16 clk_div; @@ -291,12 +298,13 @@ static int dw_spi_transfer_one(struct spi_master *master, int ret; dws->dma_mapped = 0; - + spin_lock_irqsave(&dws->buf_lock, flags); dws->tx = (void *)transfer->tx_buf; dws->tx_end = dws->tx + transfer->len; dws->rx = transfer->rx_buf; dws->rx_end = dws->rx + transfer->len; dws->len = transfer->len; + spin_unlock_irqrestore(&dws->buf_lock, flags); spi_enable_chip(dws, 0); @@ -488,6 +496,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num); + spin_lock_init(&dws->buf_lock); ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master); if (ret < 0) { diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 35589a270468..d05b216ea3f8 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -117,6 +117,7 @@ struct dw_spi { size_t len; void *tx; void *tx_end; + spinlock_t buf_lock; void *rx; void *rx_end; int dma_mapped; diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c index 20b878d35ea2..fc8b6f179ec6 100644 --- a/drivers/staging/iio/accel/sca3000_ring.c +++ b/drivers/staging/iio/accel/sca3000_ring.c @@ -56,7 +56,7 @@ static int sca3000_read_data(struct sca3000_state *st, st->tx[0] = SCA3000_READ_REG(reg_address_high); ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); if (ret) { - dev_err(get_device(&st->us->dev), "problem reading register"); + dev_err(&st->us->dev, "problem reading register"); goto error_free_rx; } diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index 8eb7179da342..4a12a3ea3f25 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -125,17 +125,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) static int ad2s1210_config_read(struct ad2s1210_state *st, unsigned char address) { - struct spi_transfer xfer = { - .len = 2, - .rx_buf = st->rx, - .tx_buf = st->tx, + struct spi_transfer xfers[] = { + { + .len = 1, + .rx_buf = &st->rx[0], + .tx_buf = &st->tx[0], + .cs_change = 1, + }, { + .len = 1, + .rx_buf = &st->rx[1], + .tx_buf = &st->tx[1], + }, }; int ret = 0; ad2s1210_set_mode(MOD_CONFIG, st); st->tx[0] = address | AD2S1210_MSB_IS_HIGH; st->tx[1] = AD2S1210_REG_FAULT; - ret = spi_sync_transfer(st->sdev, &xfer, 1); + ret = spi_sync_transfer(st->sdev, xfers, 2); if (ret < 0) return ret; st->old_data = true; diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c index 534b8103ae80..ff1926ca1f96 100644 --- a/drivers/staging/media/lirc/lirc_imon.c +++ b/drivers/staging/media/lirc/lirc_imon.c @@ -885,12 +885,14 @@ static int imon_probe(struct usb_interface *interface, vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); /* Everything went fine. Just unlock and return retval (with is 0) */ + mutex_unlock(&context->ctx_lock); goto driver_unlock; unregister_lirc: lirc_unregister_driver(driver->minor); free_tx_urb: + mutex_unlock(&context->ctx_lock); usb_free_urb(tx_urb); free_rx_urb: diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 0f6bc6b8e4c6..1e0d2a33787e 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -1050,7 +1050,7 @@ static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, spin_lock_irqsave(&priv->tx_lock, flags); - memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); + *(struct net_device **)(skb->cb) = dev; tcb_desc->bTxEnableFwCalcDur = 1; skb_push(skb, priv->ieee80211->tx_headroom); ret = rtl8192_tx(dev, skb); @@ -1092,7 +1092,7 @@ static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) static void rtl8192_tx_isr(struct urb *tx_urb) { struct sk_buff *skb = (struct sk_buff *)tx_urb->context; - struct net_device *dev = (struct net_device *)(skb->cb); + struct net_device *dev = *(struct net_device **)(skb->cb); struct r8192_priv *priv = NULL; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); u8 queue_index = tcb_desc->queue_index; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index b9b9ffde4c7a..d2ceefe4a076 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1980,14 +1980,14 @@ static ssize_t target_dev_lba_map_store(struct config_item *item, struct se_device *dev = to_device(item); struct t10_alua_lba_map *lba_map = NULL; struct list_head lba_list; - char *map_entries, *ptr; + char *map_entries, *orig, *ptr; char state; int pg_num = -1, pg; int ret = 0, num = 0, pg_id, alua_state; unsigned long start_lba = -1, end_lba = -1; unsigned long segment_size = -1, segment_mult = -1; - map_entries = kstrdup(page, GFP_KERNEL); + orig = map_entries = kstrdup(page, GFP_KERNEL); if (!map_entries) return -ENOMEM; @@ -2085,7 +2085,7 @@ out: } else core_alua_set_lba_map(dev, &lba_list, segment_size, segment_mult); - kfree(map_entries); + kfree(orig); return count; } diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 1922d87ea30d..12746a90d8e5 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1841,8 +1841,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) ourport->min_dma_size = max_t(int, ourport->port.fifosize, dma_get_cache_alignment()); - probe_index++; - dbg("%s: initialising port %p...\n", __func__, ourport); ret = s3c24xx_serial_init_port(ourport, pdev); @@ -1872,6 +1870,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "failed to add cpufreq notifier\n"); + probe_index++; + return 0; } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index a3dfefa33e3c..3f82014269f0 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1489,10 +1489,12 @@ static int __init sc16is7xx_init(void) #endif return ret; +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI err_spi: #ifdef CONFIG_SERIAL_SC16IS7XX_I2C i2c_del_driver(&sc16is7xx_i2c_uart_driver); #endif +#endif err_i2c: uart_unregister_driver(&sc16is7xx_uart); return ret; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index e3bff71dfbf3..3cc31863cb0b 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1081,11 +1081,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; - if (reset_hardware) + if (reset_hardware && epnum != 0) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; - if (reset_hardware) + if (reset_hardware && epnum != 0) dev->ep_in[epnum] = NULL; } if (ep) { diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index a1d8e495d1e5..f41dd75a6011 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -303,6 +303,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, char *name; int ret; + if (strlen(page) < len) + return -EOVERFLOW; + name = kstrdup(page, GFP_KERNEL); if (!name) return -ENOMEM; diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 1bcfe819fad3..5819f6503f75 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -779,10 +779,10 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page) return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num); } -CONFIGFS_ATTR_RO(f_acm_port_, num); +CONFIGFS_ATTR_RO(f_acm_, port_num); static struct configfs_attribute *acm_attrs[] = { - &f_acm_port_attr_num, + &f_acm_attr_port_num, NULL, }; diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c index c8682ed59d79..78ee92cdfb59 100644 --- a/drivers/usb/gadget/legacy/audio.c +++ b/drivers/usb/gadget/legacy/audio.c @@ -303,8 +303,10 @@ static int audio_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(cdev->gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail; + } usb_otg_descriptor_init(cdev->gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c index d70e7d43241a..2bac77a6f4b6 100644 --- a/drivers/usb/gadget/legacy/cdc2.c +++ b/drivers/usb/gadget/legacy/cdc2.c @@ -183,8 +183,10 @@ static int cdc_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail1; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 81f3c9cb333c..b95900168a6b 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1360,7 +1360,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) req->buf = dev->rbuf; req->context = NULL; - value = -EOPNOTSUPP; switch (ctrl->bRequest) { case USB_REQ_GET_DESCRIPTOR: @@ -1806,7 +1805,7 @@ static ssize_t dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) { struct dev_data *dev = fd->private_data; - ssize_t value = len, length = len; + ssize_t value, length = len; unsigned total; u32 tag; char *kbuf; diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c index cc3ffacbade1..0d45eb497063 100644 --- a/drivers/usb/gadget/legacy/ncm.c +++ b/drivers/usb/gadget/legacy/ncm.c @@ -162,8 +162,10 @@ static int gncm_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 553922c3be85..285e21ffa711 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2670,6 +2670,8 @@ net2272_plat_probe(struct platform_device *pdev) err_req: release_mem_region(base, len); err: + kfree(dev); + return ret; } diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index 2806457b4748..3fd603494e86 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -1488,11 +1488,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req, req->dma_mapped = 0; } ep->halted = 1; - spin_lock(&dev->lock); + spin_unlock(&dev->lock); if (!ep->in) pch_udc_ep_clear_rrdy(ep); usb_gadget_giveback_request(&ep->ep, &req->req); - spin_unlock(&dev->lock); + spin_lock(&dev->lock); ep->halted = halted; } @@ -1731,14 +1731,12 @@ static int pch_udc_pcd_ep_enable(struct usb_ep *usbep, static int pch_udc_pcd_ep_disable(struct usb_ep *usbep) { struct pch_udc_ep *ep; - struct pch_udc_dev *dev; unsigned long iflags; if (!usbep) return -EINVAL; ep = container_of(usbep, struct pch_udc_ep, ep); - dev = ep->dev; if ((usbep->name == ep0_string) || !ep->ep.desc) return -EINVAL; @@ -1769,12 +1767,10 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep, struct pch_udc_request *req; struct pch_udc_ep *ep; struct pch_udc_data_dma_desc *dma_desc; - struct pch_udc_dev *dev; if (!usbep) return NULL; ep = container_of(usbep, struct pch_udc_ep, ep); - dev = ep->dev; req = kzalloc(sizeof *req, gfp); if (!req) return NULL; @@ -1947,12 +1943,10 @@ static int pch_udc_pcd_dequeue(struct usb_ep *usbep, { struct pch_udc_ep *ep; struct pch_udc_request *req; - struct pch_udc_dev *dev; unsigned long flags; int ret = -EINVAL; ep = container_of(usbep, struct pch_udc_ep, ep); - dev = ep->dev; if (!usbep || !usbreq || (!ep->ep.desc && ep->num)) return ret; req = container_of(usbreq, struct pch_udc_request, req); @@ -1984,14 +1978,12 @@ static int pch_udc_pcd_dequeue(struct usb_ep *usbep, static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt) { struct pch_udc_ep *ep; - struct pch_udc_dev *dev; unsigned long iflags; int ret; if (!usbep) return -EINVAL; ep = container_of(usbep, struct pch_udc_ep, ep); - dev = ep->dev; if (!ep->ep.desc && !ep->num) return -EINVAL; if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN)) @@ -2029,14 +2021,12 @@ static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt) static int pch_udc_pcd_set_wedge(struct usb_ep *usbep) { struct pch_udc_ep *ep; - struct pch_udc_dev *dev; unsigned long iflags; int ret; if (!usbep) return -EINVAL; ep = container_of(usbep, struct pch_udc_ep, ep); - dev = ep->dev; if (!ep->ep.desc && !ep->num) return -EINVAL; if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN)) @@ -2593,9 +2583,9 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev) empty_req_queue(ep); } if (dev->driver) { - spin_lock(&dev->lock); - usb_gadget_udc_reset(&dev->gadget, dev->driver); spin_unlock(&dev->lock); + usb_gadget_udc_reset(&dev->gadget, dev->driver); + spin_lock(&dev->lock); } } @@ -2646,7 +2636,7 @@ static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev) static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev) { u32 reg, dev_stat = 0; - int i, ret; + int i; dev_stat = pch_udc_read_device_status(dev); dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >> @@ -2674,9 +2664,9 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev) dev->ep[i].halted = 0; } dev->stall = 0; - spin_lock(&dev->lock); - ret = dev->driver->setup(&dev->gadget, &dev->setup_data); spin_unlock(&dev->lock); + dev->driver->setup(&dev->gadget, &dev->setup_data); + spin_lock(&dev->lock); } /** @@ -2686,7 +2676,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev) */ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev) { - int i, ret; + int i; u32 reg, dev_stat = 0; dev_stat = pch_udc_read_device_status(dev); @@ -2711,9 +2701,9 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev) dev->stall = 0; /* call gadget zero with setup data received */ - spin_lock(&dev->lock); - ret = dev->driver->setup(&dev->gadget, &dev->setup_data); spin_unlock(&dev->lock); + dev->driver->setup(&dev->gadget, &dev->setup_data); + spin_lock(&dev->lock); } /** diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 476ac5e511a4..21a7915d6a4a 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -97,7 +97,7 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget, return; if (req->num_mapped_sgs) { - dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs, + dma_unmap_sg(gadget->dev.parent, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->num_mapped_sgs = 0; diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 5731621984c6..82c9c9d068be 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -1103,7 +1103,6 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) ret = -ENOMEM; goto usbhs_mod_gadget_probe_err_gpriv; } - spin_lock_init(&uep->lock); gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); dev_info(dev, "%stransceiver found\n", @@ -1151,6 +1150,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) uep->ep.name = uep->ep_name; uep->ep.ops = &usbhsg_ep_ops; INIT_LIST_HEAD(&uep->ep.ep_list); + spin_lock_init(&uep->lock); /* init DCP */ if (usbhsg_is_dcp(uep)) { diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 2220c1b9df10..c2c50d5d4ad1 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1162,8 +1162,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p, send it directly to the tty port */ if (garmin_data_p->flags & FLAGS_QUEUING) { pkt_add(garmin_data_p, data, data_length); - } else if (bulk_data || - getLayerId(data) == GARMIN_LAYERID_APPL) { + } else if (bulk_data || (data_length >= sizeof(u32) && + getLayerId(data) == GARMIN_LAYERID_APPL)) { spin_lock_irqsave(&garmin_data_p->lock, flags); garmin_data_p->flags |= APP_RESP_SEEN; diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index fb6dc16c754a..06916ddc3159 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 9aad6825947c..cb0af57aad6e 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -40,6 +40,13 @@ * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> */ +/* Reported-by: Julian Groß <julian.g@posteo.de> */ +UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, + "LaCie", + "2Big Quadra USB3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index ad5929fbceb1..98a12be76c9c 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -698,7 +698,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) return count; } else { - if (pci_read_vpd(pdev, addr, 4, &data) != 4) + data = 0; + if (pci_read_vpd(pdev, addr, 4, &data) < 0) return count; *pdata = cpu_to_le32(data); } diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c index da5356f48d0b..d4030d0c38e9 100644 --- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c +++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c @@ -110,7 +110,7 @@ int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev) usleep_range(10, 15); count = 2000; - while (count-- && (ioread32(xgmac_regs->ioaddr + DMA_MR) & 1)) + while (--count && (ioread32(xgmac_regs->ioaddr + DMA_MR) & 1)) usleep_range(500, 600); if (!count) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 91abe314a771..3eb05f441451 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1101,6 +1101,18 @@ static int load_elf_binary(struct linux_binprm *bprm) current->mm->start_stack = bprm->p; if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { + /* + * For architectures with ELF randomization, when executing + * a loader directly (i.e. no interpreter listed in ELF + * headers), move the brk area out of the mmap region + * (since it grows up, and may collide early with the stack + * growing down), and into the unused ELF_ET_DYN_BASE region. + */ + if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && + loc->elf_ex.e_type == ET_DYN && !interpreter) + current->mm->brk = current->mm->start_brk = + ELF_ET_DYN_BASE; + current->mm->brk = current->mm->start_brk = arch_randomize_brk(current->mm); #ifdef compat_brk_randomized @@ -1718,7 +1730,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, (!regset->active || regset->active(t->task, regset) > 0)) { int ret; size_t size = regset->n * regset->size; - void *data = kmalloc(size, GFP_KERNEL); + void *data = kzalloc(size, GFP_KERNEL); if (unlikely(!data)) return 0; ret = regset->get(t->task, regset, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 774728143b63..de63cb9bc64b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1750,7 +1750,7 @@ static int cleaner_kthread(void *arg) */ btrfs_delete_unused_bgs(root->fs_info); sleep: - if (!try_to_freeze() && !again) { + if (!again) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule(); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 34ffc125763f..3bb731b2156c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -10688,7 +10688,7 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info) disk_super = fs_info->super_copy; if (!btrfs_super_root(disk_super)) - return 1; + return -EINVAL; features = btrfs_super_incompat_flags(disk_super); if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index c05ab2ec0fef..5df898fd0a0a 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -64,9 +64,9 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, object = container_of(op->op.object, struct cachefiles_object, fscache); spin_lock(&object->work_lock); list_add_tail(&monitor->op_link, &op->to_do); + fscache_enqueue_retrieval(op); spin_unlock(&object->work_lock); - fscache_enqueue_retrieval(op); fscache_put_retrieval(op); return 0; } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index efdf81ea3b5f..3d0497421e62 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -3293,6 +3293,7 @@ retry: WARN_ON(1); tsession = NULL; target = -1; + mutex_lock(&session->s_mutex); } goto retry; diff --git a/fs/char_dev.c b/fs/char_dev.c index f1f3bb812799..9154a2d7b195 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -472,6 +472,85 @@ int cdev_add(struct cdev *p, dev_t dev, unsigned count) return 0; } +/** + * cdev_set_parent() - set the parent kobject for a char device + * @p: the cdev structure + * @kobj: the kobject to take a reference to + * + * cdev_set_parent() sets a parent kobject which will be referenced + * appropriately so the parent is not freed before the cdev. This + * should be called before cdev_add. + */ +void cdev_set_parent(struct cdev *p, struct kobject *kobj) +{ + WARN_ON(!kobj->state_initialized); + p->kobj.parent = kobj; +} + +/** + * cdev_device_add() - add a char device and it's corresponding + * struct device, linkink + * @dev: the device structure + * @cdev: the cdev structure + * + * cdev_device_add() adds the char device represented by @cdev to the system, + * just as cdev_add does. It then adds @dev to the system using device_add + * The dev_t for the char device will be taken from the struct device which + * needs to be initialized first. This helper function correctly takes a + * reference to the parent device so the parent will not get released until + * all references to the cdev are released. + * + * This helper uses dev->devt for the device number. If it is not set + * it will not add the cdev and it will be equivalent to device_add. + * + * This function should be used whenever the struct cdev and the + * struct device are members of the same structure whose lifetime is + * managed by the struct device. + * + * NOTE: Callers must assume that userspace was able to open the cdev and + * can call cdev fops callbacks at any time, even if this function fails. + */ +int cdev_device_add(struct cdev *cdev, struct device *dev) +{ + int rc = 0; + + if (dev->devt) { + cdev_set_parent(cdev, &dev->kobj); + + rc = cdev_add(cdev, dev->devt, 1); + if (rc) + return rc; + } + + rc = device_add(dev); + if (rc) + cdev_del(cdev); + + return rc; +} + +/** + * cdev_device_del() - inverse of cdev_device_add + * @dev: the device structure + * @cdev: the cdev structure + * + * cdev_device_del() is a helper function to call cdev_del and device_del. + * It should be used whenever cdev_device_add is used. + * + * If dev->devt is not set it will not remove the cdev and will be equivalent + * to device_del. + * + * NOTE: This guarantees that associated sysfs callbacks are not running + * or runnable, however any cdevs already open will remain and their fops + * will still be callable even after this function returns. + */ +void cdev_device_del(struct cdev *cdev, struct device *dev) +{ + device_del(dev); + if (dev->devt) + cdev_del(cdev); +} + static void cdev_unmap(dev_t dev, unsigned count) { kobj_unmap(cdev_map, dev, count); @@ -483,6 +562,10 @@ static void cdev_unmap(dev_t dev, unsigned count) * * cdev_del() removes @p from the system, possibly freeing the structure * itself. + * + * NOTE: This guarantees that cdev device will no longer be able to be + * opened, however any cdevs already open will remain and their fops will + * still be callable even after cdev_del returns. */ void cdev_del(struct cdev *p) { @@ -571,5 +654,8 @@ EXPORT_SYMBOL(cdev_init); EXPORT_SYMBOL(cdev_alloc); EXPORT_SYMBOL(cdev_del); EXPORT_SYMBOL(cdev_add); +EXPORT_SYMBOL(cdev_set_parent); +EXPORT_SYMBOL(cdev_device_add); +EXPORT_SYMBOL(cdev_device_del); EXPORT_SYMBOL(__register_chrdev); EXPORT_SYMBOL(__unregister_chrdev); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b9b8f19dce0e..fa07f7cb85a5 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -184,6 +184,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) * reconnect the same SMB session */ mutex_lock(&ses->session_mutex); + + /* + * Recheck after acquire mutex. If another thread is negotiating + * and the server never sends an answer the socket will be closed + * and tcpStatus set to reconnect. + */ + if (server->tcpStatus == CifsNeedReconnect) { + rc = -EHOSTDOWN; + mutex_unlock(&ses->session_mutex); + goto out; + } + rc = cifs_negotiate_protocol(0, ses); if (rc == 0 && ses->need_reconnect) rc = cifs_setup_session(0, ses, nls_codepage); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index cf104bbe30a1..21ddfd77966e 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -338,8 +338,10 @@ static int reconn_set_ipaddr(struct TCP_Server_Info *server) return rc; } + spin_lock(&cifs_tcp_ses_lock); rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, strlen(ipaddr)); + spin_unlock(&cifs_tcp_ses_lock); kfree(ipaddr); return !rc ? -1 : 0; @@ -546,20 +548,21 @@ static bool server_unresponsive(struct TCP_Server_Info *server) { /* - * We need to wait 2 echo intervals to make sure we handle such + * We need to wait 3 echo intervals to make sure we handle such * situations right: * 1s client sends a normal SMB request - * 2s client gets a response + * 3s client gets a response * 30s echo workqueue job pops, and decides we got a response recently * and don't need to send another * ... * 65s kernel_recvmsg times out, and we see that we haven't gotten * a response in >60s. */ - if (server->tcpStatus == CifsGood && - time_after(jiffies, server->lstrp + 2 * SMB_ECHO_INTERVAL)) { + if ((server->tcpStatus == CifsGood || + server->tcpStatus == CifsNeedNegotiate) && + time_after(jiffies, server->lstrp + 3 * SMB_ECHO_INTERVAL)) { cifs_dbg(VFS, "Server %s has not responded in %d seconds. Reconnecting...\n", - server->hostname, (2 * SMB_ECHO_INTERVAL) / HZ); + server->hostname, (3 * SMB_ECHO_INTERVAL) / HZ); cifs_reconnect(server); wake_up(&server->response_q); return true; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 18ecfa4d9757..a5008d692c2a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3230,7 +3230,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) * than it negotiated since it will refuse the read * then. */ - if ((tcon->ses) && !(tcon->ses->capabilities & + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)) { current_read_size = min_t(uint, current_read_size, CIFSMaxBufSize); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index d4472a494758..4ffd5e177288 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -249,6 +249,18 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) * the same SMB session */ mutex_lock(&tcon->ses->session_mutex); + + /* + * Recheck after acquire mutex. If another thread is negotiating + * and the server never sends an answer the socket will be closed + * and tcpStatus set to reconnect. + */ + if (server->tcpStatus == CifsNeedReconnect) { + rc = -EHOSTDOWN; + mutex_unlock(&tcon->ses->session_mutex); + goto out; + } + rc = cifs_negotiate_protocol(0, tcon->ses); if (!rc && tcon->ses->need_reconnect) { rc = cifs_setup_session(0, tcon->ses, nls_codepage); diff --git a/fs/exec.c b/fs/exec.c index 422ab6b07574..84b6c88fcc7a 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1124,6 +1124,8 @@ int flush_old_exec(struct linux_binprm * bprm) */ set_mm_exe_file(bprm->mm, bprm->file); + would_dump(bprm, bprm->file); + /* * Release all of the old mmap stuff */ @@ -1634,8 +1636,6 @@ static int do_execveat_common(int fd, struct filename *filename, if (retval < 0) goto out; - would_dump(bprm, bprm->file); - retval = exec_binprm(bprm); if (retval < 0) goto out; diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 01f4d0d3250c..e66047d6943f 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -55,7 +55,6 @@ #include <linux/buffer_head.h> #include <linux/init.h> -#include <linux/printk.h> #include <linux/slab.h> #include <linux/mbcache2.h> #include <linux/quotaops.h> @@ -86,8 +85,8 @@ printk("\n"); \ } while (0) #else -# define ea_idebug(inode, f...) no_printk(f) -# define ea_bdebug(bh, f...) no_printk(f) +# define ea_idebug(f...) +# define ea_bdebug(f...) #endif static int ext2_xattr_set2(struct inode *, struct buffer_head *, diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index ccd80f2b3b19..d5055b3adccc 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -152,6 +152,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino) return PTR_ERR(inode); num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; while (i < num) { + cond_resched(); map.m_lblk = i; map.m_len = num - i; n = ext4_map_blocks(NULL, inode, &map, 0); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index fdea338ce8e1..157187e6e060 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -139,31 +139,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode, } static int ext4_xattr_block_csum_verify(struct inode *inode, - sector_t block_nr, - struct ext4_xattr_header *hdr) + struct buffer_head *bh) { - if (ext4_has_metadata_csum(inode->i_sb) && - (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) - return 0; - return 1; -} - -static void ext4_xattr_block_csum_set(struct inode *inode, - sector_t block_nr, - struct ext4_xattr_header *hdr) -{ - if (!ext4_has_metadata_csum(inode->i_sb)) - return; + struct ext4_xattr_header *hdr = BHDR(bh); + int ret = 1; - hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); + if (ext4_has_metadata_csum(inode->i_sb)) { + lock_buffer(bh); + ret = (hdr->h_checksum == ext4_xattr_block_csum(inode, + bh->b_blocknr, hdr)); + unlock_buffer(bh); + } + return ret; } -static inline int ext4_handle_dirty_xattr_block(handle_t *handle, - struct inode *inode, - struct buffer_head *bh) +static void ext4_xattr_block_csum_set(struct inode *inode, + struct buffer_head *bh) { - ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); - return ext4_handle_dirty_metadata(handle, inode, bh); + if (ext4_has_metadata_csum(inode->i_sb)) + BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode, + bh->b_blocknr, BHDR(bh)); } static inline const struct xattr_handler * @@ -226,7 +221,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) if (buffer_verified(bh)) return 0; - if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) + if (!ext4_xattr_block_csum_verify(inode, bh)) return -EFSBADCRC; error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, bh->b_data); @@ -591,23 +586,23 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, EXT4_FREE_BLOCKS_FORGET); } else { le32_add_cpu(&BHDR(bh)->h_refcount, -1); + + ext4_xattr_block_csum_set(inode, bh); /* * Beware of this ugliness: Releasing of xattr block references * from different inodes can race and so we have to protect * from a race where someone else frees the block (and releases * its journal_head) before we are done dirtying the buffer. In * nojournal mode this race is harmless and we actually cannot - * call ext4_handle_dirty_xattr_block() with locked buffer as + * call ext4_handle_dirty_metadata() with locked buffer as * that function can call sync_dirty_buffer() so for that case * we handle the dirtying after unlocking the buffer. */ if (ext4_handle_valid(handle)) - error = ext4_handle_dirty_xattr_block(handle, inode, - bh); + error = ext4_handle_dirty_metadata(handle, inode, bh); unlock_buffer(bh); if (!ext4_handle_valid(handle)) - error = ext4_handle_dirty_xattr_block(handle, inode, - bh); + error = ext4_handle_dirty_metadata(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); @@ -841,13 +836,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ext4_xattr_rehash(header(s->base), s->here); } + ext4_xattr_block_csum_set(inode, bs->bh); unlock_buffer(bs->bh); if (error == -EFSCORRUPTED) goto bad_block; if (!error) - error = ext4_handle_dirty_xattr_block(handle, - inode, - bs->bh); + error = ext4_handle_dirty_metadata(handle, + inode, + bs->bh); if (error) goto cleanup; goto inserted; @@ -937,10 +933,11 @@ inserted: le32_add_cpu(&BHDR(new_bh)->h_refcount, 1); ea_bdebug(new_bh, "reusing; refcount now=%d", le32_to_cpu(BHDR(new_bh)->h_refcount)); + ext4_xattr_block_csum_set(inode, new_bh); unlock_buffer(new_bh); - error = ext4_handle_dirty_xattr_block(handle, - inode, - new_bh); + error = ext4_handle_dirty_metadata(handle, + inode, + new_bh); if (error) goto cleanup_dquot; } @@ -991,11 +988,12 @@ getblk_failed: goto getblk_failed; } memcpy(new_bh->b_data, s->base, new_bh->b_size); + ext4_xattr_block_csum_set(inode, new_bh); set_buffer_uptodate(new_bh); unlock_buffer(new_bh); ext4_xattr_cache_insert(ext4_mb_cache, new_bh); - error = ext4_handle_dirty_xattr_block(handle, - inode, new_bh); + error = ext4_handle_dirty_metadata(handle, inode, + new_bh); if (error) goto cleanup; } diff --git a/fs/file.c b/fs/file.c index 7e9eb65a2912..090015401c55 100644 --- a/fs/file.c +++ b/fs/file.c @@ -88,7 +88,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, */ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) { - unsigned int cpy, set; + size_t cpy, set; BUG_ON(nfdt->max_fds < ofdt->max_fds); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 8744bd773823..dec23fb358ec 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1035,7 +1035,10 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) if (fl_gh->gh_state == state) goto out; locks_lock_file_wait(file, - &(struct file_lock){.fl_type = F_UNLCK}); + &(struct file_lock) { + .fl_type = F_UNLCK, + .fl_flags = FL_FLOCK + }); gfs2_glock_dq(fl_gh); gfs2_holder_reinit(state, flags, fl_gh); } else { diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index f80ffccb0316..1eb737c466dd 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -541,9 +541,6 @@ __acquires(&gl->gl_lockref.lock) goto out_unlock; if (nonblock) goto out_sched; - smp_mb(); - if (atomic_read(&gl->gl_revokes) != 0) - goto out_sched; set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl->gl_target = gl->gl_demote_state; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 08207001d475..0308b5689638 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6054,6 +6054,7 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs_inode *nfsi = NFS_I(state->inode); + struct nfs4_state_owner *sp = state->owner; unsigned char fl_flags = request->fl_flags; int status = -ENOLCK; @@ -6068,6 +6069,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock status = do_vfs_lock(state->inode, request); if (status < 0) goto out; + mutex_lock(&sp->so_delegreturn_mutex); down_read(&nfsi->rwsem); if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { /* Yes: cache locks! */ @@ -6075,9 +6077,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock request->fl_flags = fl_flags & ~FL_SLEEP; status = do_vfs_lock(state->inode, request); up_read(&nfsi->rwsem); + mutex_unlock(&sp->so_delegreturn_mutex); goto out; } up_read(&nfsi->rwsem); + mutex_unlock(&sp->so_delegreturn_mutex); status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); out: request->fl_flags = fl_flags; diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h index df13637e4017..939869c772b1 100644 --- a/include/asm-generic/asm-prototypes.h +++ b/include/asm-generic/asm-prototypes.h @@ -1,7 +1,13 @@ #include <linux/bitops.h> +#undef __memset extern void *__memset(void *, int, __kernel_size_t); +#undef __memcpy extern void *__memcpy(void *, const void *, __kernel_size_t); +#undef __memmove extern void *__memmove(void *, const void *, __kernel_size_t); +#undef memset extern void *memset(void *, int, __kernel_size_t); +#undef memcpy extern void *memcpy(void *, const void *, __kernel_size_t); +#undef memmove extern void *memmove(void *, const void *, __kernel_size_t); diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 5d2add1a6c96..864fcfa1df41 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -51,7 +51,7 @@ #ifdef CONFIG_NEED_MULTIPLE_NODES #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #else - #define cpumask_of_node(node) ((void)node, cpu_online_mask) + #define cpumask_of_node(node) ((void)(node), cpu_online_mask) #endif #endif #ifndef pcibus_to_node diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f00c4221685b..56ebb19e1768 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -434,7 +434,8 @@ struct request_queue { unsigned int sg_reserved_size; int node; #ifdef CONFIG_BLK_DEV_IO_TRACE - struct blk_trace *blk_trace; + struct blk_trace __rcu *blk_trace; + struct mutex blk_trace_mutex; #endif /* * for flush operations diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index afc1343df3c7..e644bfe50019 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...); **/ #define blk_add_trace_msg(q, fmt, ...) \ do { \ - struct blk_trace *bt = (q)->blk_trace; \ + struct blk_trace *bt; \ + \ + rcu_read_lock(); \ + bt = rcu_dereference((q)->blk_trace); \ if (unlikely(bt)) \ __trace_note_message(bt, fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ } while (0) #define BLK_TN_MAX_MSG 128 diff --git a/include/linux/cdev.h b/include/linux/cdev.h index f8763615a5f2..408bc09ce497 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h @@ -4,6 +4,7 @@ #include <linux/kobject.h> #include <linux/kdev_t.h> #include <linux/list.h> +#include <linux/device.h> struct file_operations; struct inode; @@ -26,6 +27,10 @@ void cdev_put(struct cdev *p); int cdev_add(struct cdev *, dev_t, unsigned); +void cdev_set_parent(struct cdev *p, struct kobject *kobj); +int cdev_device_add(struct cdev *cdev, struct device *dev); +void cdev_device_del(struct cdev *cdev, struct device *dev); + void cdev_del(struct cdev *); void cd_forget(struct inode *); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5f8749440c6a..e5d349d65ae9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -556,4 +556,11 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s # define __kprobes # define nokprobe_inline inline #endif + +/* + * This is needed in functions which generate the stack canary, see + * arch/x86/kernel/smpboot.c::start_secondary() for an example. + */ +#define prevent_tail_call_optimization() mb() + #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 0c84c69d54f8..9302d016b89f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -100,10 +100,6 @@ struct cpufreq_policy { * - Any routine that will write to the policy structure and/or may take away * the policy altogether (eg. CPU hotplug), will hold this lock in write * mode before doing so. - * - * Additional rules: - * - Lock should not be held across - * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); */ struct rw_semaphore rwsem; diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index e0bfecde4e7b..50f16bd149c0 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -174,6 +174,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node) for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) #define for_each_cpu_not(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) +#define for_each_cpu_wrap(cpu, mask, start) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) #define for_each_cpu_and(cpu, mask, and) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) #else @@ -246,6 +248,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node); (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) +extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); + +/** + * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask poiter + * @start: the start location + * + * The implementation does not assume any bit in @mask is set (including @start). + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_wrap(cpu, mask, start) \ + for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ + (cpu) < nr_cpumask_bits; \ + (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) + /** * for_each_cpu_and - iterate over every cpu in both masks * @cpu: the (optionally unsigned) integer iterator diff --git a/include/linux/fs.h b/include/linux/fs.h index cb599dd90018..5c06fcdd44a9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -953,7 +953,7 @@ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ - unsigned char f_handle[0]; + unsigned char f_handle[]; }; static inline struct file *get_file(struct file *f) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index c2f46cffaab4..99b7ed47fc11 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -607,6 +607,15 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) } /** + * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_any_nullfunc(__le16 fc) +{ + return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)); +} + +/** * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU * @fc: frame control field in little-endian byteorder */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 412aa988c6ad..626139de5917 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -54,7 +54,7 @@ enum { /* one minute for the sake of bringup. Generally, commands must always * complete and we may need to increase this timeout value */ - MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000, + MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; @@ -566,8 +566,10 @@ struct mlx5_cmd_work_ent { void *uout; int uout_size; mlx5_cmd_cbk_t callback; + struct delayed_work cb_timeout_work; void *context; int idx; + struct completion handling; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index a8786d27ab81..489fc317746a 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -539,6 +539,7 @@ struct mlx5_modify_qp_mbox_in { __be32 optparam; u8 rsvd1[4]; struct mlx5_qp_context ctx; + u8 rsvd2[16]; }; struct mlx5_modify_qp_mbox_out { diff --git a/include/linux/mm.h b/include/linux/mm.h index b6ad99903617..ab42cba931df 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -462,7 +462,6 @@ static inline void page_mapcount_reset(struct page *page) static inline int page_mapcount(struct page *page) { - VM_BUG_ON_PAGE(PageSlab(page), page); return atomic_read(&page->_mapcount) + 1; } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 5a9d1d4c2487..93fc37200793 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -276,7 +276,7 @@ struct nand_onfi_params { __le16 t_r; __le16 t_ccs; __le16 src_sync_timing_mode; - __le16 src_ssync_features; + u8 src_ssync_features; __le16 clk_pin_capacitance_typ; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; @@ -284,7 +284,7 @@ struct nand_onfi_params { u8 driver_strength_support; __le16 t_int_r; __le16 t_ald; - u8 reserved4[7]; + u8 reserved4[8]; /* vendor */ __le16 vendor_revision; diff --git a/include/linux/net.h b/include/linux/net.h index c00b8d182226..6de18ead3dfe 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -291,6 +291,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); +/* Routine returns the IP overhead imposed by a (caller-protected) socket. */ +u32 kernel_sock_ip_overhead(struct sock *sk); + #define MODULE_ALIAS_NETPROTO(proto) \ MODULE_ALIAS("net-pf-" __stringify(proto)) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ef891fd26353..5faf056cfd9b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2013,7 +2013,10 @@ struct napi_gro_cb { /* Number of gro_receive callbacks this packet already went through */ u8 recursion_counter:4; - /* 3 bit hole */ + /* Used in GRE, set in fou/gue_gro_receive */ + u8 is_fou:1; + + /* 2 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index 2ab2830316b7..aca42a2e79cf 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -4,7 +4,7 @@ #include <linux/netfilter/nf_conntrack_common.h> -extern const char *const pptp_msg_name[]; +const char *pptp_msg_name(u_int16_t msg); /* state of the control session */ enum pptp_ctrlsess_state { diff --git a/include/linux/padata.h b/include/linux/padata.h index 438694650471..547a8d1e4a3b 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -24,7 +24,6 @@ #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/list.h> -#include <linux/timer.h> #include <linux/notifier.h> #include <linux/kobject.h> @@ -37,6 +36,7 @@ * @list: List entry, to attach to the padata lists. * @pd: Pointer to the internal control structure. * @cb_cpu: Callback cpu for serializatioon. + * @cpu: Cpu for parallelization. * @seq_nr: Sequence number of the parallelized data object. * @info: Used to pass information from the parallel to the serial function. * @parallel: Parallel execution function. @@ -46,6 +46,7 @@ struct padata_priv { struct list_head list; struct parallel_data *pd; int cb_cpu; + int cpu; int info; void (*parallel)(struct padata_priv *padata); void (*serial)(struct padata_priv *padata); @@ -83,7 +84,6 @@ struct padata_serial_queue { * @serial: List to wait for serialization after reordering. * @pwork: work struct for parallelization. * @swork: work struct for serialization. - * @pd: Backpointer to the internal control structure. * @work: work struct for parallelization. * @num_obj: Number of objects that are processed by this cpu. * @cpu_index: Index of the cpu. @@ -91,7 +91,6 @@ struct padata_serial_queue { struct padata_parallel_queue { struct padata_list parallel; struct padata_list reorder; - struct parallel_data *pd; struct work_struct work; atomic_t num_obj; int cpu_index; @@ -118,10 +117,10 @@ struct padata_cpumask { * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. + * @cpu: Next CPU to be processed. * @cpumask: The cpumasks in use for parallel and serial workers. + * @reorder_work: work struct for reordering. * @lock: Reorder lock. - * @processed: Number of already processed objects. - * @timer: Reorder timer. */ struct parallel_data { struct padata_instance *pinst; @@ -130,10 +129,10 @@ struct parallel_data { atomic_t reorder_objects; atomic_t refcnt; atomic_t seq_nr; + int cpu; struct padata_cpumask cpumask; + struct work_struct reorder_work; spinlock_t lock ____cacheline_aligned; - unsigned int processed; - struct timer_list timer; }; /** diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 5df733b8f704..c03a368b5911 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -219,10 +219,8 @@ struct pnp_card { #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) #define to_pnp_card(n) container_of(n, struct pnp_card, dev) -#define pnp_for_each_card(card) \ - for((card) = global_to_pnp_card(pnp_cards.next); \ - (card) != global_to_pnp_card(&pnp_cards); \ - (card) = global_to_pnp_card((card)->global_list.next)) +#define pnp_for_each_card(card) \ + list_for_each_entry(card, &pnp_cards, global_list) struct pnp_card_link { struct pnp_card *card; @@ -275,14 +273,9 @@ struct pnp_dev { #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) -#define pnp_for_each_dev(dev) \ - for((dev) = global_to_pnp_dev(pnp_global.next); \ - (dev) != global_to_pnp_dev(&pnp_global); \ - (dev) = global_to_pnp_dev((dev)->global_list.next)) -#define card_for_each_dev(card,dev) \ - for((dev) = card_to_pnp_dev((card)->devices.next); \ - (dev) != card_to_pnp_dev(&(card)->devices); \ - (dev) = card_to_pnp_dev((dev)->card_list.next)) +#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list) +#define card_for_each_dev(card, dev) \ + list_for_each_entry(dev, &(card)->devices, card_list) #define pnp_dev_name(dev) (dev)->name static inline void *pnp_get_drvdata(struct pnp_dev *pdev) @@ -434,14 +427,10 @@ struct pnp_protocol { }; #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) -#define protocol_for_each_card(protocol,card) \ - for((card) = protocol_to_pnp_card((protocol)->cards.next); \ - (card) != protocol_to_pnp_card(&(protocol)->cards); \ - (card) = protocol_to_pnp_card((card)->protocol_list.next)) -#define protocol_for_each_dev(protocol,dev) \ - for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \ - (dev) != protocol_to_pnp_dev(&(protocol)->devices); \ - (dev) = protocol_to_pnp_dev((dev)->protocol_list.next)) +#define protocol_for_each_card(protocol, card) \ + list_for_each_entry(card, &(protocol)->cards, protocol_list) +#define protocol_for_each_dev(protocol, dev) \ + list_for_each_entry(dev, &(protocol)->devices, protocol_list) extern struct bus_type pnp_bus_type; diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 83b22ae9ae12..b39420a0321c 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -104,29 +104,32 @@ struct posix_clock_operations { * * @ops: Functional interface to the clock * @cdev: Character device instance for this clock - * @kref: Reference count. + * @dev: Pointer to the clock's device. * @rwsem: Protects the 'zombie' field from concurrent access. * @zombie: If 'zombie' is true, then the hardware has disappeared. - * @release: A function to free the structure when the reference count reaches - * zero. May be NULL if structure is statically allocated. * * Drivers should embed their struct posix_clock within a private * structure, obtaining a reference to it during callbacks using * container_of(). + * + * Drivers should supply an initialized but not exposed struct device + * to posix_clock_register(). It is used to manage lifetime of the + * driver's private structure. It's 'release' field should be set to + * a release function for this private structure. */ struct posix_clock { struct posix_clock_operations ops; struct cdev cdev; - struct kref kref; + struct device *dev; struct rw_semaphore rwsem; bool zombie; - void (*release)(struct posix_clock *clk); }; /** * posix_clock_register() - register a new clock - * @clk: Pointer to the clock. Caller must provide 'ops' and 'release' - * @devid: Allocated device id + * @clk: Pointer to the clock. Caller must provide 'ops' field + * @dev: Pointer to the initialized device. Caller must provide + * 'release' field * * A clock driver calls this function to register itself with the * clock device subsystem. If 'clk' points to dynamically allocated @@ -135,7 +138,7 @@ struct posix_clock { * * Returns zero on success, non-zero otherwise. */ -int posix_clock_register(struct posix_clock *clk, dev_t devid); +int posix_clock_register(struct posix_clock *clk, struct device *dev); /** * posix_clock_unregister() - unregister a clock diff --git a/include/linux/printk.h b/include/linux/printk.h index 9729565c25ff..9ccbdf2c1453 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -106,13 +106,13 @@ struct va_format { /* * Dummy printk for disabled debugging statements to use whilst maintaining - * gcc's format and side-effect checking. + * gcc's format checking. */ -static inline __printf(1, 2) -int no_printk(const char *fmt, ...) -{ - return 0; -} +#define no_printk(fmt, ...) \ +do { \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ +} while (0) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 807371357160..59cbf16eaeb5 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -158,9 +158,9 @@ typedef __be32 rpc_fraghdr; /* * Note that RFC 1833 does not put any size restrictions on the - * netid string, but all currently defined netid's fit in 4 bytes. + * netid string, but all currently defined netid's fit in 5 bytes. */ -#define RPCBIND_MAXNETIDLEN (4u) +#define RPCBIND_MAXNETIDLEN (5u) /* * Universal addresses are introduced in RFC 1833 and further spelled diff --git a/include/linux/tty.h b/include/linux/tty.h index 1c1bb90f6819..006b82142f7b 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -64,7 +64,7 @@ struct tty_buffer { int read; int flags; /* Data points here */ - unsigned long data[0]; + unsigned long data[]; }; /* Values for .flags field of tty_buffer */ diff --git a/include/media/media-device.h b/include/media/media-device.h index 6e6db78f1ee2..00bbd679864a 100644 --- a/include/media/media-device.h +++ b/include/media/media-device.h @@ -60,7 +60,7 @@ struct device; struct media_device { /* dev->driver_data points to this struct. */ struct device *dev; - struct media_devnode devnode; + struct media_devnode *devnode; char model[32]; char serial[40]; @@ -84,9 +84,6 @@ struct media_device { #define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0 #define MEDIA_DEV_NOTIFY_POST_LINK_CH 1 -/* media_devnode to media_device */ -#define to_media_device(node) container_of(node, struct media_device, devnode) - int __must_check __media_device_register(struct media_device *mdev, struct module *owner); #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE) diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h index 17ddae32060d..d5ff95bf2d4b 100644 --- a/include/media/media-devnode.h +++ b/include/media/media-devnode.h @@ -33,6 +33,8 @@ #include <linux/device.h> #include <linux/cdev.h> +struct media_device; + /* * Flag to mark the media_devnode struct as registered. Drivers must not touch * this flag directly, it will be set and cleared by media_devnode_register and @@ -67,6 +69,8 @@ struct media_file_operations { * before registering the node. */ struct media_devnode { + struct media_device *media_dev; + /* device ops */ const struct media_file_operations *fops; @@ -80,24 +84,42 @@ struct media_devnode { unsigned long flags; /* Use bitops to access flags */ /* callbacks */ - void (*release)(struct media_devnode *mdev); + void (*release)(struct media_devnode *devnode); }; /* dev to media_devnode */ #define to_media_devnode(cd) container_of(cd, struct media_devnode, dev) -int __must_check media_devnode_register(struct media_devnode *mdev, +int __must_check media_devnode_register(struct media_device *mdev, + struct media_devnode *devnode, struct module *owner); -void media_devnode_unregister(struct media_devnode *mdev); + +/** + * media_devnode_unregister_prepare - clear the media device node register bit + * @devnode: the device node to prepare for unregister + * + * This clears the passed device register bit. Future open calls will be met + * with errors. Should be called before media_devnode_unregister() to avoid + * races with unregister and device file open calls. + * + * This function can safely be called if the device node has never been + * registered or has already been unregistered. + */ +void media_devnode_unregister_prepare(struct media_devnode *devnode); + +void media_devnode_unregister(struct media_devnode *devnode); static inline struct media_devnode *media_devnode_data(struct file *filp) { return filp->private_data; } -static inline int media_devnode_is_registered(struct media_devnode *mdev) +static inline int media_devnode_is_registered(struct media_devnode *devnode) { - return test_bit(MEDIA_FLAG_REGISTERED, &mdev->flags); + if (!devnode) + return false; + + return test_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); } #endif /* _MEDIA_DEVNODE_H */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index c796f93aaa70..17a2c281d860 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -193,8 +193,10 @@ struct ipv6_stub { const struct in6_addr *addr); int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, const struct in6_addr *addr); - int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, - struct dst_entry **dst, struct flowi6 *fl6); + struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, + const struct in6_addr *final_dst); void (*udpv6_encap_enable)(void); void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, diff --git a/include/net/bonding.h b/include/net/bonding.h index d5abd3a80896..6fbfc21b27b1 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -34,6 +34,9 @@ #define BOND_DEFAULT_MIIMON 100 +#ifndef __long_aligned +#define __long_aligned __attribute__((aligned((sizeof(long))))) +#endif /* * Less bad way to call ioctl from within the kernel; this needs to be * done some other way to get the call out of interrupt context. @@ -138,7 +141,9 @@ struct bond_params { struct reciprocal_value reciprocal_packets_per_slave; u16 ad_actor_sys_prio; u16 ad_user_port_key; - u8 ad_actor_system[ETH_ALEN]; + + /* 2 bytes of padding : see ether_addr_equal_64bits() */ + u8 ad_actor_system[ETH_ALEN + 2]; }; struct bond_parm_tbl { diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index a39430e2fdcc..33f28e62b790 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -103,6 +103,9 @@ void fib6_force_start_gc(struct net *net); struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, bool anycast); +struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, + int flags); + /* * support functions for ND * diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index f6ff83b2ac87..b8dfab88c877 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -112,6 +112,7 @@ struct fib_info { unsigned char fib_scope; unsigned char fib_type; __be32 fib_prefsrc; + u32 fib_tb_id; u32 fib_priority; struct dst_metrics *fib_metrics; #define fib_mtu fib_metrics->metrics[RTAX_MTU-1] @@ -320,7 +321,7 @@ void fib_flush_external(struct net *net); /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); -int fib_sync_down_addr(struct net *net, __be32 local); +int fib_sync_down_addr(struct net_device *dev, __be32 local); int fib_sync_up(struct net_device *dev, unsigned int nh_flags); void fib_sync_mtu(struct net_device *dev, u32 orig_mtu); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index c07cf9596b6f..94880f07bc06 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -853,7 +853,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); @@ -915,6 +915,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); +int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, + int addr_len); int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, int addr_len); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 636e9e11bd5f..e3f73fd1d53a 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -98,7 +98,7 @@ struct nf_conn { possible_net_t ct_net; /* all members below initialized via memset */ - u8 __nfct_init_offset[0]; + struct { } __nfct_init_offset; /* If we were expected by an expectation, this will be it */ struct nf_conn *master; diff --git a/include/net/route.h b/include/net/route.h index 11dfd0df0e67..f828a9d83012 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -210,6 +210,9 @@ unsigned int inet_addr_type_dev_table(struct net *net, void ip_rt_multicast_event(struct in_device *); int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); +struct rtable *rt_dst_alloc(struct net_device *dev, + unsigned int flags, u16 type, + bool nopolicy, bool noxfrm, bool will_cache); struct in_ifaddr; void fib_add_ifaddr(struct in_ifaddr *); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index ccd2a964dad7..d236ce450da3 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -674,9 +674,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ if (!sch->gso_skb) { sch->gso_skb = sch->dequeue(sch); - if (sch->gso_skb) + if (sch->gso_skb) { /* it's still part of the queue */ + qdisc_qstats_backlog_inc(sch, sch->gso_skb); sch->q.qlen++; + } } return sch->gso_skb; @@ -689,6 +691,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) if (skb) { sch->gso_skb = NULL; + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; } else { skb = sch->dequeue(sch); diff --git a/include/net/sock.h b/include/net/sock.h index f5bc965dced4..e6c3505c7f02 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1208,11 +1208,13 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, unsigned long amt, int *parent_status) { - page_counter_charge(&prot->memory_allocated, amt); + struct page_counter *counter; + + if (page_counter_try_charge(&prot->memory_allocated, amt, &counter)) + return; - if (page_counter_read(&prot->memory_allocated) > - prot->memory_allocated.limit) - *parent_status = OVER_LIMIT; + page_counter_charge(&prot->memory_allocated, amt); + *parent_status = OVER_LIMIT; } static inline void memcg_memory_allocated_sub(struct cg_proto *prot, @@ -1655,7 +1657,13 @@ static inline void sock_put(struct sock *sk) */ void sock_gen_put(struct sock *sk); -int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); +int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, + unsigned int trim_cap); +static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, + const int nested) +{ + return __sk_receive_skb(sk, skb, nested, 1); +} static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) { diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c5c03fa47f8c..4b8d8af7942a 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1554,8 +1554,10 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); void xfrm4_local_error(struct sk_buff *skb, u32 mtu); int xfrm6_extract_header(struct sk_buff *skb); int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); -int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); +int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, + struct ip6_tnl *t); int xfrm6_transport_finish(struct sk_buff *skb, int async); +int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t); int xfrm6_rcv(struct sk_buff *skb); int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto); diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index d77416963f05..72f3b0d65435 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -200,11 +200,13 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); if (dev) { ip4 = in_dev_get(dev); - if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) { + if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address, (struct in6_addr *)gid); + + if (ip4) in_dev_put(ip4); - } + dev_put(dev); } } diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index 3e0171ea47f3..afffa756357a 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -76,6 +76,7 @@ struct snd_rawmidi_runtime { size_t avail_min; /* min avail for wakeup */ size_t avail; /* max used buffer for wakeup */ size_t xruns; /* over/underruns counter */ + int buffer_ref; /* buffer reference count */ /* misc */ spinlock_t lock; struct mutex realloc_mutex; diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h index 163e8adac2d6..de246e9f4974 100644 --- a/include/uapi/linux/if_pppol2tp.h +++ b/include/uapi/linux/if_pppol2tp.h @@ -17,6 +17,7 @@ #include <linux/types.h> +#include <linux/l2tp.h> /* Structure used to connect() the socket to a particular tunnel UDP * socket over IPv4. @@ -89,14 +90,12 @@ enum { PPPOL2TP_SO_REORDERTO = 5, }; -/* Debug message categories for the DEBUG socket option */ +/* Debug message categories for the DEBUG socket option (deprecated) */ enum { - PPPOL2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if - * compiled in) */ - PPPOL2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel - * interface */ - PPPOL2TP_MSG_SEQ = (1 << 2), /* sequence numbers */ - PPPOL2TP_MSG_DATA = (1 << 3), /* data packets */ + PPPOL2TP_MSG_DEBUG = L2TP_MSG_DEBUG, + PPPOL2TP_MSG_CONTROL = L2TP_MSG_CONTROL, + PPPOL2TP_MSG_SEQ = L2TP_MSG_SEQ, + PPPOL2TP_MSG_DATA = L2TP_MSG_DATA, }; diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index 347ef22a964e..c3a5c99f565b 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -9,9 +9,8 @@ #include <linux/types.h> #include <linux/socket.h> -#ifndef __KERNEL__ -#include <netinet/in.h> -#endif +#include <linux/in.h> +#include <linux/in6.h> #define IPPROTO_L2TP 115 @@ -31,7 +30,7 @@ struct sockaddr_l2tpip { __u32 l2tp_conn_id; /* Connection ID of tunnel */ /* Pad to size of `struct sockaddr'. */ - unsigned char __pad[sizeof(struct sockaddr) - + unsigned char __pad[__SOCK_SIZE__ - sizeof(__kernel_sa_family_t) - sizeof(__be16) - sizeof(struct in_addr) - sizeof(__u32)]; @@ -108,7 +107,7 @@ enum { L2TP_ATTR_VLAN_ID, /* u16 */ L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */ L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */ - L2TP_ATTR_DEBUG, /* u32 */ + L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */ L2TP_ATTR_RECV_SEQ, /* u8 */ L2TP_ATTR_SEND_SEQ, /* u8 */ L2TP_ATTR_LNS_MODE, /* u8 */ @@ -173,6 +172,21 @@ enum l2tp_seqmode { L2TP_SEQ_ALL = 2, }; +/** + * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions + * + * @L2TP_MSG_DEBUG: verbose debug (if compiled in) + * @L2TP_MSG_CONTROL: userspace - kernel interface + * @L2TP_MSG_SEQ: sequence numbers + * @L2TP_MSG_DATA: data packets + */ +enum l2tp_debug_flags { + L2TP_MSG_DEBUG = (1 << 0), + L2TP_MSG_CONTROL = (1 << 1), + L2TP_MSG_SEQ = (1 << 2), + L2TP_MSG_DATA = (1 << 3), +}; + /* * NETLINK_GENERIC related info */ diff --git a/init/main.c b/init/main.c index 33cd39aebf46..48e8370966ce 100644 --- a/init/main.c +++ b/init/main.c @@ -679,6 +679,8 @@ asmlinkage __visible void __init start_kernel(void) /* Do the rest non-__init'ed, we're now alive */ rest_init(); + + prevent_tail_call_optimization(); } /* Call all constructor functions linked into the kernel. */ diff --git a/ipc/util.c b/ipc/util.c index 2724f9071ab3..7af476b6dcdd 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -756,21 +756,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, total++; } - *new_pos = pos + 1; + ipc = NULL; if (total >= ids->in_use) - return NULL; + goto out; for (; pos < IPCMNI; pos++) { ipc = idr_find(&ids->ipcs_idr, pos); if (ipc != NULL) { rcu_read_lock(); ipc_lock_object(ipc); - return ipc; + break; } } - - /* Out of range - return NULL to terminate iteration */ - return NULL; +out: + *new_pos = pos + 1; + return ipc; } static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index fd3fd8d17ef5..01431ef8cf07 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -152,7 +152,7 @@ static int map_create(union bpf_attr *attr) err = bpf_map_charge_memlock(map); if (err) - goto free_map; + goto free_map_nouncharge; err = bpf_map_new_fd(map); if (err < 0) @@ -162,6 +162,8 @@ static int map_create(union bpf_attr *attr) return err; free_map: + bpf_map_uncharge_memlock(map); +free_map_nouncharge: map->ops->map_free(map); return err; } diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 37ddb7bda651..ec7c7eda0774 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -7,17 +7,18 @@ void irq_move_masked_irq(struct irq_data *idata) { struct irq_desc *desc = irq_data_to_desc(idata); - struct irq_chip *chip = desc->irq_data.chip; + struct irq_data *data = &desc->irq_data; + struct irq_chip *chip = data->chip; - if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) + if (likely(!irqd_is_setaffinity_pending(data))) return; - irqd_clr_move_pending(&desc->irq_data); + irqd_clr_move_pending(data); /* * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. */ - if (irqd_is_per_cpu(&desc->irq_data)) { + if (irqd_is_per_cpu(data)) { WARN_ON(1); return; } @@ -42,9 +43,20 @@ void irq_move_masked_irq(struct irq_data *idata) * For correct operation this depends on the caller * masking the irqs. */ - if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) - irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); - + if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { + int ret; + + ret = irq_do_set_affinity(data, desc->pending_mask, false); + /* + * If the there is a cleanup pending in the underlying + * vector management, reschedule the move for the next + * interrupt. Leave desc->pending_mask intact. + */ + if (ret == -EBUSY) { + irqd_set_move_pending(data); + return; + } + } cpumask_clear(desc->pending_mask); } diff --git a/kernel/padata.c b/kernel/padata.c index ae036af3f012..c50975f43b34 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -65,15 +65,11 @@ static int padata_cpu_hash(struct parallel_data *pd) static void padata_parallel_worker(struct work_struct *parallel_work) { struct padata_parallel_queue *pqueue; - struct parallel_data *pd; - struct padata_instance *pinst; LIST_HEAD(local_list); local_bh_disable(); pqueue = container_of(parallel_work, struct padata_parallel_queue, work); - pd = pqueue->pd; - pinst = pd->pinst; spin_lock(&pqueue->parallel.lock); list_replace_init(&pqueue->parallel.list, &local_list); @@ -136,6 +132,7 @@ int padata_do_parallel(struct padata_instance *pinst, padata->cb_cpu = cb_cpu; target_cpu = padata_cpu_hash(pd); + padata->cpu = target_cpu; queue = per_cpu_ptr(pd->pqueue, target_cpu); spin_lock(&queue->parallel.lock); @@ -159,8 +156,6 @@ EXPORT_SYMBOL(padata_do_parallel); * A pointer to the control struct of the next object that needs * serialization, if present in one of the percpu reorder queues. * - * NULL, if all percpu reorder queues are empty. - * * -EINPROGRESS, if the next object that needs serialization will * be parallel processed by another cpu and is not yet present in * the cpu's reorder queue. @@ -170,25 +165,12 @@ EXPORT_SYMBOL(padata_do_parallel); */ static struct padata_priv *padata_get_next(struct parallel_data *pd) { - int cpu, num_cpus; - unsigned int next_nr, next_index; struct padata_parallel_queue *next_queue; struct padata_priv *padata; struct padata_list *reorder; + int cpu = pd->cpu; - num_cpus = cpumask_weight(pd->cpumask.pcpu); - - /* - * Calculate the percpu reorder queue and the sequence - * number of the next object. - */ - next_nr = pd->processed; - next_index = next_nr % num_cpus; - cpu = padata_index_to_cpu(pd, next_index); next_queue = per_cpu_ptr(pd->pqueue, cpu); - - padata = NULL; - reorder = &next_queue->reorder; spin_lock(&reorder->lock); @@ -199,7 +181,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); - pd->processed++; + pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, + false); spin_unlock(&reorder->lock); goto out; @@ -222,6 +205,7 @@ static void padata_reorder(struct parallel_data *pd) struct padata_priv *padata; struct padata_serial_queue *squeue; struct padata_instance *pinst = pd->pinst; + struct padata_parallel_queue *next_queue; /* * We need to ensure that only one cpu can work on dequeueing of @@ -240,12 +224,11 @@ static void padata_reorder(struct parallel_data *pd) padata = padata_get_next(pd); /* - * All reorder queues are empty, or the next object that needs - * serialization is parallel processed by another cpu and is - * still on it's way to the cpu's reorder queue, nothing to - * do for now. + * If the next object that needs serialization is parallel + * processed by another cpu and is still on it's way to the + * cpu's reorder queue, nothing to do for now. */ - if (!padata || PTR_ERR(padata) == -EINPROGRESS) + if (PTR_ERR(padata) == -EINPROGRESS) break; /* @@ -254,7 +237,6 @@ static void padata_reorder(struct parallel_data *pd) * so exit immediately. */ if (PTR_ERR(padata) == -ENODATA) { - del_timer(&pd->timer); spin_unlock_bh(&pd->lock); return; } @@ -273,28 +255,27 @@ static void padata_reorder(struct parallel_data *pd) /* * The next object that needs serialization might have arrived to - * the reorder queues in the meantime, we will be called again - * from the timer function if no one else cares for it. + * the reorder queues in the meantime. * - * Ensure reorder_objects is read after pd->lock is dropped so we see - * an increment from another task in padata_do_serial. Pairs with + * Ensure reorder queue is read after pd->lock is dropped so we see + * new objects from another task in padata_do_serial. Pairs with * smp_mb__after_atomic in padata_do_serial. */ smp_mb(); - if (atomic_read(&pd->reorder_objects) - && !(pinst->flags & PADATA_RESET)) - mod_timer(&pd->timer, jiffies + HZ); - else - del_timer(&pd->timer); - return; + next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); + if (!list_empty(&next_queue->reorder.list)) + queue_work(pinst->wq, &pd->reorder_work); } -static void padata_reorder_timer(unsigned long arg) +static void invoke_padata_reorder(struct work_struct *work) { - struct parallel_data *pd = (struct parallel_data *)arg; + struct parallel_data *pd; + local_bh_disable(); + pd = container_of(work, struct parallel_data, reorder_work); padata_reorder(pd); + local_bh_enable(); } static void padata_serial_worker(struct work_struct *serial_work) @@ -341,29 +322,22 @@ static void padata_serial_worker(struct work_struct *serial_work) */ void padata_do_serial(struct padata_priv *padata) { - int cpu; - struct padata_parallel_queue *pqueue; - struct parallel_data *pd; - - pd = padata->pd; - - cpu = get_cpu(); - pqueue = per_cpu_ptr(pd->pqueue, cpu); + struct parallel_data *pd = padata->pd; + struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue, + padata->cpu); spin_lock(&pqueue->reorder.lock); - atomic_inc(&pd->reorder_objects); list_add_tail(&padata->list, &pqueue->reorder.list); + atomic_inc(&pd->reorder_objects); spin_unlock(&pqueue->reorder.lock); /* - * Ensure the atomic_inc of reorder_objects above is ordered correctly + * Ensure the addition to the reorder list is ordered correctly * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb * in padata_reorder. */ smp_mb__after_atomic(); - put_cpu(); - padata_reorder(pd); } EXPORT_SYMBOL(padata_do_serial); @@ -412,9 +386,14 @@ static void padata_init_pqueues(struct parallel_data *pd) struct padata_parallel_queue *pqueue; cpu_index = 0; - for_each_cpu(cpu, pd->cpumask.pcpu) { + for_each_possible_cpu(cpu) { pqueue = per_cpu_ptr(pd->pqueue, cpu); - pqueue->pd = pd; + + if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) { + pqueue->cpu_index = -1; + continue; + } + pqueue->cpu_index = cpu_index; cpu_index++; @@ -448,12 +427,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, padata_init_pqueues(pd); padata_init_squeues(pd); - setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); atomic_set(&pd->seq_nr, -1); atomic_set(&pd->reorder_objects, 0); atomic_set(&pd->refcnt, 1); pd->pinst = pinst; spin_lock_init(&pd->lock); + pd->cpu = cpumask_first(pd->cpumask.pcpu); + INIT_WORK(&pd->reorder_work, invoke_padata_reorder); return pd; diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index e24008c098c6..45a0a26023d4 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c @@ -25,8 +25,6 @@ #include <linux/syscalls.h> #include <linux/uaccess.h> -static void delete_clock(struct kref *kref); - /* * Returns NULL if the posix_clock instance attached to 'fp' is old and stale. */ @@ -168,7 +166,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) err = 0; if (!err) { - kref_get(&clk->kref); + get_device(clk->dev); fp->private_data = clk; } out: @@ -184,7 +182,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp) if (clk->ops.release) err = clk->ops.release(clk); - kref_put(&clk->kref, delete_clock); + put_device(clk->dev); fp->private_data = NULL; @@ -206,38 +204,35 @@ static const struct file_operations posix_clock_file_operations = { #endif }; -int posix_clock_register(struct posix_clock *clk, dev_t devid) +int posix_clock_register(struct posix_clock *clk, struct device *dev) { int err; - kref_init(&clk->kref); init_rwsem(&clk->rwsem); cdev_init(&clk->cdev, &posix_clock_file_operations); + err = cdev_device_add(&clk->cdev, dev); + if (err) { + pr_err("%s unable to add device %d:%d\n", + dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt)); + return err; + } clk->cdev.owner = clk->ops.owner; - err = cdev_add(&clk->cdev, devid, 1); + clk->dev = dev; - return err; + return 0; } EXPORT_SYMBOL_GPL(posix_clock_register); -static void delete_clock(struct kref *kref) -{ - struct posix_clock *clk = container_of(kref, struct posix_clock, kref); - - if (clk->release) - clk->release(clk); -} - void posix_clock_unregister(struct posix_clock *clk) { - cdev_del(&clk->cdev); + cdev_device_del(&clk->cdev, clk->dev); down_write(&clk->rwsem); clk->zombie = true; up_write(&clk->rwsem); - kref_put(&clk->kref, delete_clock); + put_device(clk->dev); } EXPORT_SYMBOL_GPL(posix_clock_unregister); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c39fc68c4778..54be8790941e 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -319,11 +319,12 @@ static void put_probe_ref(void) static void blk_trace_cleanup(struct blk_trace *bt) { + synchronize_rcu(); blk_trace_free(bt); put_probe_ref(); } -int blk_trace_remove(struct request_queue *q) +static int __blk_trace_remove(struct request_queue *q) { struct blk_trace *bt; @@ -336,6 +337,17 @@ int blk_trace_remove(struct request_queue *q) return 0; } + +int blk_trace_remove(struct request_queue *q) +{ + int ret; + + mutex_lock(&q->blk_trace_mutex); + ret = __blk_trace_remove(q); + mutex_unlock(&q->blk_trace_mutex); + + return ret; +} EXPORT_SYMBOL_GPL(blk_trace_remove); static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, @@ -546,9 +558,8 @@ err: return ret; } -int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, - struct block_device *bdev, - char __user *arg) +static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + struct block_device *bdev, char __user *arg) { struct blk_user_trace_setup buts; int ret; @@ -562,11 +573,24 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, return ret; if (copy_to_user(arg, &buts, sizeof(buts))) { - blk_trace_remove(q); + __blk_trace_remove(q); return -EFAULT; } return 0; } + +int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + struct block_device *bdev, + char __user *arg) +{ + int ret; + + mutex_lock(&q->blk_trace_mutex); + ret = __blk_trace_setup(q, name, dev, bdev, arg); + mutex_unlock(&q->blk_trace_mutex); + + return ret; +} EXPORT_SYMBOL_GPL(blk_trace_setup); #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) @@ -595,7 +619,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, return ret; if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { - blk_trace_remove(q); + __blk_trace_remove(q); return -EFAULT; } @@ -603,11 +627,13 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, } #endif -int blk_trace_startstop(struct request_queue *q, int start) +static int __blk_trace_startstop(struct request_queue *q, int start) { int ret; - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (bt == NULL) return -EINVAL; @@ -642,8 +668,25 @@ int blk_trace_startstop(struct request_queue *q, int start) return ret; } + +int blk_trace_startstop(struct request_queue *q, int start) +{ + int ret; + + mutex_lock(&q->blk_trace_mutex); + ret = __blk_trace_startstop(q, start); + mutex_unlock(&q->blk_trace_mutex); + + return ret; +} EXPORT_SYMBOL_GPL(blk_trace_startstop); +/* + * When reading or writing the blktrace sysfs files, the references to the + * opened sysfs or device files should prevent the underlying block device + * from being removed. So no further delete protection is really needed. + */ + /** * blk_trace_ioctl: - handle the ioctls associated with tracing * @bdev: the block device @@ -661,12 +704,12 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) if (!q) return -ENXIO; - mutex_lock(&bdev->bd_mutex); + mutex_lock(&q->blk_trace_mutex); switch (cmd) { case BLKTRACESETUP: bdevname(bdev, b); - ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); + ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); break; #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) case BLKTRACESETUP32: @@ -677,17 +720,17 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) case BLKTRACESTART: start = 1; case BLKTRACESTOP: - ret = blk_trace_startstop(q, start); + ret = __blk_trace_startstop(q, start); break; case BLKTRACETEARDOWN: - ret = blk_trace_remove(q); + ret = __blk_trace_remove(q); break; default: ret = -ENOTTY; break; } - mutex_unlock(&bdev->bd_mutex); + mutex_unlock(&q->blk_trace_mutex); return ret; } @@ -698,10 +741,14 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) **/ void blk_trace_shutdown(struct request_queue *q) { - if (q->blk_trace) { - blk_trace_startstop(q, 0); - blk_trace_remove(q); + mutex_lock(&q->blk_trace_mutex); + if (rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex))) { + __blk_trace_startstop(q, 0); + __blk_trace_remove(q); } + + mutex_unlock(&q->blk_trace_mutex); } /* @@ -722,11 +769,15 @@ void blk_trace_shutdown(struct request_queue *q) static void blk_add_trace_rq(struct request_queue *q, struct request *rq, unsigned int nr_bytes, u32 what) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct task_struct *tsk = current; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } /* * Use the bio context for all events except ISSUE and @@ -751,6 +802,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, rq->cmd_flags, what, rq->errors, 0, NULL, tsk); } + rcu_read_unlock(); } static void blk_add_trace_rq_abort(void *ignore, @@ -800,11 +852,15 @@ static void blk_add_trace_rq_complete(void *ignore, static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, u32 what, int error) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct task_struct *tsk = current; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } /* * Not all the pages in the bio are dirtied by the same task but @@ -817,6 +873,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio->bi_rw, what, error, 0, NULL, tsk); + rcu_read_unlock(); } static void blk_add_trace_bio_bounce(void *ignore, @@ -861,11 +918,14 @@ static void blk_add_trace_getrq(void *ignore, if (bio) blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); else { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL, current); + rcu_read_unlock(); } } @@ -877,28 +937,36 @@ static void blk_add_trace_sleeprq(void *ignore, if (bio) blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); else { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL, current); + rcu_read_unlock(); } } static void blk_add_trace_plug(void *ignore, struct request_queue *q) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, current); + rcu_read_unlock(); } static void blk_add_trace_unplug(void *ignore, struct request_queue *q, unsigned int depth, bool explicit) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) { __be64 rpdu = cpu_to_be64(depth); u32 what; @@ -911,15 +979,18 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q, __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, current); } + rcu_read_unlock(); } static void blk_add_trace_split(void *ignore, struct request_queue *q, struct bio *bio, unsigned int pdu) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct task_struct *tsk = current; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) { __be64 rpdu = cpu_to_be64(pdu); @@ -932,6 +1003,7 @@ static void blk_add_trace_split(void *ignore, bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu), &rpdu, tsk); } + rcu_read_unlock(); } /** @@ -951,12 +1023,16 @@ static void blk_add_trace_bio_remap(void *ignore, struct request_queue *q, struct bio *bio, dev_t dev, sector_t from) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct blk_io_trace_remap r; struct task_struct *tsk = current; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); @@ -970,6 +1046,7 @@ static void blk_add_trace_bio_remap(void *ignore, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_REMAP, bio->bi_error, sizeof(r), &r, tsk); + rcu_read_unlock(); } /** @@ -990,12 +1067,16 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, sector_t from) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct blk_io_trace_remap r; struct task_struct *tsk = current; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); @@ -1009,6 +1090,7 @@ static void blk_add_trace_rq_remap(void *ignore, __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, sizeof(r), &r, tsk); + rcu_read_unlock(); } /** @@ -1026,11 +1108,15 @@ void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct task_struct *tsk = current; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } if (bio_has_data(rq->bio) && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page && @@ -1043,6 +1129,7 @@ void blk_add_driver_data(struct request_queue *q, else __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data, tsk); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(blk_add_driver_data); @@ -1534,6 +1621,7 @@ static int blk_trace_remove_queue(struct request_queue *q) return -EINVAL; put_probe_ref(); + synchronize_rcu(); blk_trace_free(bt); return 0; } @@ -1694,6 +1782,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct hd_struct *p = dev_to_part(dev); struct request_queue *q; struct block_device *bdev; + struct blk_trace *bt; ssize_t ret = -ENXIO; bdev = bdget(part_devt(p)); @@ -1704,26 +1793,28 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, if (q == NULL) goto out_bdput; - mutex_lock(&bdev->bd_mutex); + mutex_lock(&q->blk_trace_mutex); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (attr == &dev_attr_enable) { - ret = sprintf(buf, "%u\n", !!q->blk_trace); + ret = sprintf(buf, "%u\n", !!bt); goto out_unlock_bdev; } - if (q->blk_trace == NULL) + if (bt == NULL) ret = sprintf(buf, "disabled\n"); else if (attr == &dev_attr_act_mask) - ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); + ret = blk_trace_mask2str(buf, bt->act_mask); else if (attr == &dev_attr_pid) - ret = sprintf(buf, "%u\n", q->blk_trace->pid); + ret = sprintf(buf, "%u\n", bt->pid); else if (attr == &dev_attr_start_lba) - ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); + ret = sprintf(buf, "%llu\n", bt->start_lba); else if (attr == &dev_attr_end_lba) - ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); + ret = sprintf(buf, "%llu\n", bt->end_lba); out_unlock_bdev: - mutex_unlock(&bdev->bd_mutex); + mutex_unlock(&q->blk_trace_mutex); out_bdput: bdput(bdev); out: @@ -1737,6 +1828,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, struct block_device *bdev; struct request_queue *q; struct hd_struct *p; + struct blk_trace *bt; u64 value; ssize_t ret = -EINVAL; @@ -1765,10 +1857,12 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, if (q == NULL) goto out_bdput; - mutex_lock(&bdev->bd_mutex); + mutex_lock(&q->blk_trace_mutex); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (attr == &dev_attr_enable) { - if (!!value == !!q->blk_trace) { + if (!!value == !!bt) { ret = 0; goto out_unlock_bdev; } @@ -1780,22 +1874,25 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, } ret = 0; - if (q->blk_trace == NULL) + if (bt == NULL) { ret = blk_trace_setup_queue(q, bdev); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); + } if (ret == 0) { if (attr == &dev_attr_act_mask) - q->blk_trace->act_mask = value; + bt->act_mask = value; else if (attr == &dev_attr_pid) - q->blk_trace->pid = value; + bt->pid = value; else if (attr == &dev_attr_start_lba) - q->blk_trace->start_lba = value; + bt->start_lba = value; else if (attr == &dev_attr_end_lba) - q->blk_trace->end_lba = value; + bt->end_lba = value; } out_unlock_bdev: - mutex_unlock(&bdev->bd_mutex); + mutex_unlock(&q->blk_trace_mutex); out_bdput: bdput(bdev); out: diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3dd40c736067..a71bdad638d5 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -206,6 +206,10 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) event->pmu->count) return -EINVAL; + if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && + event->attr.type != PERF_TYPE_RAW)) + return -EINVAL; + /* * we don't know if the function is run successfully by the * return value. It can be judged in other places, such as diff --git a/lib/cpumask.c b/lib/cpumask.c index 5a70f6196f57..24f06e7abf92 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -42,6 +42,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) return i; } +/** + * cpumask_next_wrap - helper to implement for_each_cpu_wrap + * @n: the cpu prior to the place to search + * @mask: the cpumask pointer + * @start: the start point of the iteration + * @wrap: assume @n crossing @start terminates the iteration + * + * Returns >= nr_cpu_ids on completion + * + * Note: the @wrap argument is required for the start condition when + * we cannot assume @start is set in @mask. + */ +int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) +{ + int next; + +again: + next = cpumask_next(n, mask); + + if (wrap && n < start && next >= start) { + return nr_cpumask_bits; + + } else if (next >= nr_cpumask_bits) { + wrap = true; + n = -1; + goto again; + } + + return next; +} +EXPORT_SYMBOL(cpumask_next_wrap); + /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK /** diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index d2ecf0a09180..f1f31c754b3e 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -756,22 +756,22 @@ do { \ do { \ if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else \ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "%r" ((USItype)(al)), \ @@ -781,36 +781,36 @@ do { \ do { \ if (__builtin_constant_p(ah) && (ah) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else \ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ @@ -821,7 +821,7 @@ do { \ do { \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("mulhwu %0,%1,%2" \ - : "=r" ((USItype) ph) \ + : "=r" (ph) \ : "%r" (__m0), \ "r" (__m1)); \ (pl) = __m0 * __m1; \ diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f95503b39768..aa03a24aee9f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1403,7 +1403,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) i++; - if (i == MAX_ORDER_NR_PAGES) + if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) continue; /* Check if we got outside of the zone */ if (zone && !zone_spans_pfn(zone, pfn + i)) @@ -1420,7 +1420,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, if (zone) { *valid_start = start; - *valid_end = end; + *valid_end = min(end, end_pfn); return 1; } else { return 0; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index eb1aca94a547..8884934b4bcb 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1540,7 +1540,7 @@ static void __vunmap(const void *addr, int deallocate_pages) addr)) return; - area = find_vmap_area((unsigned long)addr)->vm; + area = find_vm_area(addr); if (unlikely(!area)) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index de55a3f001dc..02be8ee23271 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -639,8 +639,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case SO_BINDTODEVICE: - if (optlen > IFNAMSIZ) - optlen = IFNAMSIZ; + if (optlen > IFNAMSIZ - 1) + optlen = IFNAMSIZ - 1; + + memset(devname, 0, sizeof(devname)); if (copy_from_user(devname, optval, optlen)) { res = -EFAULT; diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 2bdbaff3279b..88cea5154113 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -747,7 +747,7 @@ static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv) static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv, struct batadv_tvlv_container *tvlv) { - lockdep_assert_held(&bat_priv->tvlv.handler_list_lock); + lockdep_assert_held(&bat_priv->tvlv.container_list_lock); if (!tvlv) return; diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 86c69208da2b..91de807a8f03 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -991,15 +991,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, */ static u8 batadv_nc_random_weight_tq(u8 tq) { - u8 rand_val, rand_tq; - - get_random_bytes(&rand_val, sizeof(rand_val)); - /* randomize the estimated packet loss (max TQ - estimated TQ) */ - rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); - - /* normalize the randomized packet loss */ - rand_tq /= BATADV_TQ_MAX_VALUE; + u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq); /* convert to (randomized) estimated tq again */ return BATADV_TQ_MAX_VALUE - rand_tq; diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 67ee7c83a28d..06f366d234ff 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -614,8 +614,10 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, /* increase the refcounter of the related vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); - if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", - addr, BATADV_PRINT_VID(vid))) { + if (!vlan) { + net_ratelimited_function(batadv_info, soft_iface, + "adding TT local entry %pM to non-existent VLAN %d\n", + addr, BATADV_PRINT_VID(vid)); kfree(tt_local); tt_local = NULL; goto out; diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 09442e0f7f67..1aa1d3d4979f 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -266,7 +266,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) /* If old entry was unassociated with any port, then delete it. */ f = __br_fdb_get(br, br->dev->dev_addr, 0); - if (f && f->is_local && !f->dst) + if (f && f->is_local && !f->dst && !f->added_by_user) fdb_delete_local(br, NULL, f); fdb_insert(br, NULL, newaddr, 0); @@ -281,7 +281,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) if (!br_vlan_should_use(v)) continue; f = __br_fdb_get(br, br->dev->dev_addr, v->vid); - if (f && f->is_local && !f->dst) + if (f && f->is_local && !f->dst && !f->added_by_user) fdb_delete_local(br, NULL, f); fdb_insert(br, NULL, newaddr, v->vid); } @@ -758,20 +758,25 @@ out: } /* Update (create or replace) forwarding database entry */ -static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, - __u16 state, __u16 flags, __u16 vid) +static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, + const __u8 *addr, __u16 state, __u16 flags, __u16 vid) { - struct net_bridge *br = source->br; struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct net_bridge_fdb_entry *fdb; bool modified = false; /* If the port cannot learn allow only local and static entries */ - if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) && + if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) && !(source->state == BR_STATE_LEARNING || source->state == BR_STATE_FORWARDING)) return -EPERM; + if (!source && !(state & NUD_PERMANENT)) { + pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n", + br->dev->name); + return -EINVAL; + } + fdb = fdb_find(head, addr, vid); if (fdb == NULL) { if (!(flags & NLM_F_CREATE)) @@ -826,22 +831,28 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, return 0; } -static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, - const unsigned char *addr, u16 nlh_flags, u16 vid) +static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, + struct net_bridge_port *p, const unsigned char *addr, + u16 nlh_flags, u16 vid) { int err = 0; if (ndm->ndm_flags & NTF_USE) { + if (!p) { + pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n", + br->dev->name); + return -EINVAL; + } local_bh_disable(); rcu_read_lock(); - br_fdb_update(p->br, p, addr, vid, true); + br_fdb_update(br, p, addr, vid, true); rcu_read_unlock(); local_bh_enable(); } else { - spin_lock_bh(&p->br->hash_lock); - err = fdb_add_entry(p, addr, ndm->ndm_state, + spin_lock_bh(&br->hash_lock); + err = fdb_add_entry(br, p, addr, ndm->ndm_state, nlh_flags, vid); - spin_unlock_bh(&p->br->hash_lock); + spin_unlock_bh(&br->hash_lock); } return err; @@ -878,6 +889,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], dev->name); return -EINVAL; } + br = p->br; vg = nbp_vlan_group(p); } @@ -889,15 +901,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } /* VID was specified, so use it. */ - if (dev->priv_flags & IFF_EBRIDGE) - err = br_fdb_insert(br, NULL, addr, vid); - else - err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid); } else { - if (dev->priv_flags & IFF_EBRIDGE) - err = br_fdb_insert(br, NULL, addr, 0); - else - err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0); if (err || !vg || !vg->num_vlans) goto out; @@ -908,11 +914,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; - if (dev->priv_flags & IFF_EBRIDGE) - err = br_fdb_insert(br, NULL, addr, v->vid); - else - err = __br_fdb_add(ndm, p, addr, nlh_flags, - v->vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid); if (err) goto out; } diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e24754a0e052..920b7c0f1e2d 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -78,13 +78,10 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, BR_INPUT_SKB_CB(skb)->proxyarp_replied = false; - if (dev->flags & IFF_NOARP) + if ((dev->flags & IFF_NOARP) || + !pskb_may_pull(skb, arp_hdr_len(dev))) return; - if (!pskb_may_pull(skb, arp_hdr_len(dev))) { - dev->stats.tx_dropped++; - return; - } parp = arp_hdr(skb); if (parp->ar_pro != htons(ETH_P_IP) || diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 6e48aa69fa24..d7af67a3f19c 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -35,6 +35,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); eth->h_proto = eth_hdr(oldskb)->h_proto; skb_pull(nskb, ETH_HLEN); + + if (skb_vlan_tag_present(oldskb)) { + u16 vid = skb_vlan_tag_get(oldskb); + + __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); + } } /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) diff --git a/net/core/dev.c b/net/core/dev.c index d8b076987ec9..acc4c90c7765 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4328,6 +4328,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->encap_mark = 0; NAPI_GRO_CB(skb)->recursion_counter = 0; + NAPI_GRO_CB(skb)->is_fou = 0; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; /* Setup for GRO checksum validation */ @@ -6484,11 +6485,13 @@ static void netdev_sync_lower_features(struct net_device *upper, netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", &feature, lower->name); lower->wanted_features &= ~feature; - netdev_update_features(lower); + __netdev_update_features(lower); if (unlikely(lower->features & feature)) netdev_WARN(upper, "failed to disable %pNF on %s!\n", &feature, lower->name); + else + netdev_features_change(lower); } } } diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index a2270188b864..9bcc6fdade3e 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -159,6 +159,7 @@ static void sched_send_work(unsigned long _data) static void trace_drop_common(struct sk_buff *skb, void *location) { struct net_dm_alert_msg *msg; + struct net_dm_drop_point *point; struct nlmsghdr *nlh; struct nlattr *nla; int i; @@ -177,11 +178,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location) nlh = (struct nlmsghdr *)dskb->data; nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); + point = msg->points; for (i = 0; i < msg->entries; i++) { - if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { - msg->points[i].count++; + if (!memcmp(&location, &point->pc, sizeof(void *))) { + point->count++; goto out; } + point++; } if (msg->entries == dm_hit_limit) goto out; @@ -190,8 +193,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location) */ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); - memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); - msg->points[msg->entries].count = 1; + memcpy(point->pc, &location, sizeof(void *)); + point->count = 1; msg->entries++; if (!timer_pending(&data->send_timer)) { diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index b4f2c30e8313..9c31d7cde80b 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -180,15 +180,16 @@ ip: ip_proto = iph->protocol; - if (!dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS)) - break; + if (dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + key_addrs = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + target_container); - key_addrs = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container); - memcpy(&key_addrs->v4addrs, &iph->saddr, - sizeof(key_addrs->v4addrs)); - key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + memcpy(&key_addrs->v4addrs, &iph->saddr, + sizeof(key_addrs->v4addrs)); + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + } if (ip_is_fragment(iph)) { key_control->flags |= FLOW_DIS_IS_FRAGMENT; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a9da58204afa..e2a0aed52983 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -253,6 +253,7 @@ int rtnl_unregister(int protocol, int msgtype) rtnl_msg_handlers[protocol][msgindex].doit = NULL; rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; + rtnl_msg_handlers[protocol][msgindex].calcit = NULL; return 0; } @@ -2104,7 +2105,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) } if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { - __dev_notify_flags(dev, old_flags, 0U); + __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); } else { dev->rtnl_link_state = RTNL_LINK_INITIALIZED; __dev_notify_flags(dev, old_flags, ~0U); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 47520c651632..144f655afe8b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4477,9 +4477,8 @@ int skb_vlan_pop(struct sk_buff *skb) if (likely(skb_vlan_tag_present(skb))) { skb->vlan_tci = 0; } else { - if (unlikely((skb->protocol != htons(ETH_P_8021Q) && - skb->protocol != htons(ETH_P_8021AD)) || - skb->len < VLAN_ETH_HLEN)) + if (unlikely(skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD))) return 0; err = __skb_vlan_pop(skb, &vlan_tci); @@ -4487,9 +4486,8 @@ int skb_vlan_pop(struct sk_buff *skb) return err; } /* move next vlan tag to hw accel tag */ - if (likely((skb->protocol != htons(ETH_P_8021Q) && - skb->protocol != htons(ETH_P_8021AD)) || - skb->len < VLAN_ETH_HLEN)) + if (likely(skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD))) return 0; vlan_proto = skb->protocol; diff --git a/net/core/sock.c b/net/core/sock.c index c9091a3e2154..0ec9b43c4d34 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -484,11 +484,12 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(sock_queue_rcv_skb); -int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) +int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, + const int nested, unsigned int trim_cap) { int rc = NET_RX_SUCCESS; - if (sk_filter(sk, skb)) + if (sk_filter_trim_cap(sk, skb, trim_cap)) goto discard_and_relse; skb->dev = NULL; @@ -524,7 +525,7 @@ discard_and_relse: kfree_skb(skb); goto out; } -EXPORT_SYMBOL(sk_receive_skb); +EXPORT_SYMBOL(__sk_receive_skb); struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) { diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index ef4c44d46293..3f51280374f0 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -868,7 +868,7 @@ lookup: goto discard_and_relse; nf_reset(skb); - return sk_receive_skb(sk, skb, 1); + return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); no_dccp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index d2caa4d69159..736cc95b5201 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -209,7 +209,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; @@ -276,7 +276,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); /* sk = NULL, but it is safe for now. RST socket required. */ - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(skb, dst); ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); @@ -741,7 +741,7 @@ lookup: if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; - return sk_receive_skb(sk, skb, 1) ? -1 : 0; + return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; no_dccp_socket: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) @@ -879,7 +879,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); final_p = fl6_update_dst(&fl6, opt, &final); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 4256ac95a141..061c3939f93b 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1031,7 +1031,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, p->phy_interface = mode; phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); - if (of_phy_is_fixed_link(port_dn)) { + if (!phy_dn && of_phy_is_fixed_link(port_dn)) { /* In the case of a fixed PHY, the DT node associated * to the fixed PHY is the Port DT node */ @@ -1041,7 +1041,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, return ret; } phy_is_fixed = true; - phy_dn = port_dn; + phy_dn = of_node_get(port_dn); } if (ds->drv->get_phy_flags) @@ -1060,6 +1060,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, ret = dsa_slave_phy_connect(p, slave_dev, phy_id); if (ret) { netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret); + of_node_put(phy_dn); return ret; } } else { @@ -1068,6 +1069,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, phy_flags, p->phy_interface); } + + of_node_put(phy_dn); } if (p->phy && phy_is_fixed) diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 98ed5e43ab7b..0e83c5b08e0e 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1343,7 +1343,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, return ret_val; } - secattr->flags |= NETLBL_SECATTR_MLS_CAT; + if (secattr->attr.mls.cat) + secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; @@ -1524,7 +1525,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, return ret_val; } - secattr->flags |= NETLBL_SECATTR_MLS_CAT; + if (secattr->attr.mls.cat) + secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index a4ffb324ca6d..0b14c00b7a34 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1814,7 +1814,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, struct sk_buff *skb; int err = -ENOBUFS; - skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC); + skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL); if (!skb) goto errout; @@ -1826,7 +1826,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC); + rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL); return; errout: if (err < 0) @@ -1883,7 +1883,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb, } err = -ENOBUFS; - skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC); + skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_KERNEL); if (!skb) goto errout; @@ -2007,16 +2007,16 @@ static void inet_forward_change(struct net *net) for_each_netdev(net, dev) { struct in_device *in_dev; + if (on) dev_disable_lro(dev); - rcu_read_lock(); - in_dev = __in_dev_get_rcu(dev); + + in_dev = __in_dev_get_rtnl(dev); if (in_dev) { IN_DEV_CONF_SET(in_dev, FORWARDING, on); inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, dev->ifindex, &in_dev->cnf); } - rcu_read_unlock(); } } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 8dc9073d4a76..632d28592933 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -509,6 +509,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, if (!dev) return -ENODEV; cfg->fc_oif = dev->ifindex; + cfg->fc_table = l3mdev_fib_table(dev); if (colon) { struct in_ifaddr *ifa; struct in_device *in_dev = __in_dev_get_rtnl(dev); @@ -1035,7 +1036,7 @@ no_promotions: * First of all, we scan fib_info list searching * for stray nexthop entries, then ignite fib_flush. */ - if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local)) + if (fib_sync_down_addr(dev, ifa->ifa_local)) fib_flush(dev_net(dev)); } } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 3109b9bb95d2..498d5a929d6f 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1069,6 +1069,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) fi->fib_priority = cfg->fc_priority; fi->fib_prefsrc = cfg->fc_prefsrc; fi->fib_type = cfg->fc_type; + fi->fib_tb_id = cfg->fc_table; fi->fib_nhs = nhs; change_nexthops(fi) { @@ -1352,18 +1353,21 @@ nla_put_failure: * referring to it. * - device went down -> we must shutdown all nexthops going via it. */ -int fib_sync_down_addr(struct net *net, __be32 local) +int fib_sync_down_addr(struct net_device *dev, __be32 local) { int ret = 0; unsigned int hash = fib_laddr_hashfn(local); struct hlist_head *head = &fib_info_laddrhash[hash]; + int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; + struct net *net = dev_net(dev); struct fib_info *fi; if (!fib_info_laddrhash || local == 0) return 0; hlist_for_each_entry(fi, head, fib_lhash) { - if (!net_eq(fi->fib_net, net)) + if (!net_eq(fi->fib_net, net) || + fi->fib_tb_id != tb_id) continue; if (fi->fib_prefsrc == local) { fi->fib_flags |= RTNH_F_DEAD; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index f10f6a257d9a..484577c019ae 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1714,8 +1714,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) local_l = fib_find_node(lt, &local_tp, l->key); if (fib_insert_alias(lt, local_tp, local_l, new_fa, - NULL, l->key)) + NULL, l->key)) { + kmem_cache_free(fn_alias_kmem, new_fa); goto out; + } } /* stop loop if key wrapped back to 0 */ diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index b5a137338e50..7ac370505e44 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -205,6 +205,9 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, */ NAPI_GRO_CB(skb)->encap_mark = 0; + /* Flag this frame as already having an outer encap header */ + NAPI_GRO_CB(skb)->is_fou = 1; + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); @@ -372,6 +375,9 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, */ NAPI_GRO_CB(skb)->encap_mark = 0; + /* Flag this frame as already having an outer encap header */ + NAPI_GRO_CB(skb)->is_fou = 1; + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[guehdr->proto_ctype]); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 79ae0d7becbf..d9268af2ea44 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -151,6 +151,14 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) goto out; + /* We can only support GRE_CSUM if we can track the location of + * the GRE header. In the case of FOU/GUE we cannot because the + * outer UDP header displaces the GRE header leaving us in a state + * of limbo. + */ + if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou) + goto out; + type = greh->protocol; rcu_read_lock(); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 1e0850674fdf..41559375e58c 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -480,7 +480,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; - fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); + fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); rt = __ip_route_output_key_hash(net, fl4, @@ -505,7 +505,7 @@ static struct rtable *icmp_route_lookup(struct net *net, if (err) goto relookup_failed; - if (inet_addr_type_dev_table(net, skb_in->dev, + if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, fl4_dec.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4_dec); if (IS_ERR(rt2)) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 3e4184088082..900ee28bda99 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -399,7 +399,10 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) iph->saddr, iph->daddr, tpi->key); if (tunnel) { - skb_pop_mac_header(skb); + if (tunnel->dev->type != ARPHRD_NONE) + skb_pop_mac_header(skb); + else + skb_reset_mac_header(skb); if (tunnel->collect_md) { __be16 flags; __be64 tun_id; @@ -520,7 +523,8 @@ static struct rtable *gre_get_rt(struct sk_buff *skb, return ip_route_output_key(net, fl); } -static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) +static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev, + __be16 proto) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; @@ -563,7 +567,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) } flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); - build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), + build_header(skb, tunnel_hlen, flags, proto, tunnel_id_to_key(tun_info->key.tun_id), 0); df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; @@ -605,7 +609,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, const struct iphdr *tnl_params; if (tunnel->collect_md) { - gre_fb_xmit(skb, dev); + gre_fb_xmit(skb, dev, skb->protocol); return NETDEV_TX_OK; } @@ -649,7 +653,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, struct ip_tunnel *tunnel = netdev_priv(dev); if (tunnel->collect_md) { - gre_fb_xmit(skb, dev); + gre_fb_xmit(skb, dev, htons(ETH_P_TEB)); return NETDEV_TX_OK; } @@ -851,9 +855,16 @@ static void __gre_tunnel_init(struct net_device *dev) dev->hw_features |= GRE_FEATURES; if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { - /* TCP offload with GRE SEQ is not supported. */ - dev->features |= NETIF_F_GSO_SOFTWARE; - dev->hw_features |= NETIF_F_GSO_SOFTWARE; + /* TCP offload with GRE SEQ is not supported, nor + * can we support 2 levels of outer headers requiring + * an update. + */ + if (!(tunnel->parms.o_flags & TUNNEL_CSUM) || + (tunnel->encap.type == TUNNEL_ENCAP_NONE)) { + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; + } + /* Can use a lockless transmit, unless we generate * output sequences */ @@ -875,7 +886,7 @@ static int ipgre_tunnel_init(struct net_device *dev) netif_keep_dst(dev); dev->addr_len = 4; - if (iph->daddr) { + if (iph->daddr && !tunnel->collect_md) { #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { if (!iph->saddr) @@ -884,8 +895,9 @@ static int ipgre_tunnel_init(struct net_device *dev) dev->header_ops = &ipgre_header_ops; } #endif - } else + } else if (!tunnel->collect_md) { dev->header_ops = &ipgre_header_ops; + } return ip_tunnel_init(dev); } @@ -928,6 +940,11 @@ static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) if (flags & (GRE_VERSION|GRE_ROUTING)) return -EINVAL; + if (data[IFLA_GRE_COLLECT_METADATA] && + data[IFLA_GRE_ENCAP_TYPE] && + nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE) + return -EINVAL; + return 0; } @@ -1001,6 +1018,8 @@ static void ipgre_netlink_parms(struct net_device *dev, struct ip_tunnel *t = netdev_priv(dev); t->collect_md = true; + if (dev->type == ARPHRD_IPGRE) + dev->type = ARPHRD_NONE; } } @@ -1230,6 +1249,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; + LIST_HEAD(list_kill); struct ip_tunnel *t; int err; @@ -1245,8 +1265,10 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, t->collect_md = true; err = ipgre_newlink(net, dev, tb, NULL); - if (err < 0) - goto out; + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } /* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. @@ -1255,9 +1277,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, if (err) goto out; + err = rtnl_configure_link(dev, NULL); + if (err < 0) + goto out; + return dev; out: - free_netdev(dev); + ip_tunnel_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(gretap_fb_dev_create); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 1ea36bf778e6..9a7b60d6c670 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -279,9 +279,12 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, ipc->ttl = val; break; case IP_TOS: - if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) + if (cmsg->cmsg_len == CMSG_LEN(sizeof(int))) + val = *(int *)CMSG_DATA(cmsg); + else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8))) + val = *(u8 *)CMSG_DATA(cmsg); + else return -EINVAL; - val = *(int *)CMSG_DATA(cmsg); if (val < 0 || val > 255) return -EINVAL; ipc->tos = val; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 2fd6fce1851f..abcf431376a0 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -51,7 +51,7 @@ static int vti_net_id __read_mostly; static int vti_tunnel_init(struct net_device *dev); static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, - int encap_type) + int encap_type, bool update_skb_dev) { struct ip_tunnel *tunnel; const struct iphdr *iph = ip_hdr(skb); @@ -66,6 +66,9 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; + if (update_skb_dev) + skb->dev = tunnel->dev; + return xfrm_input(skb, nexthdr, spi, encap_type); } @@ -75,25 +78,43 @@ drop: return 0; } -static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi, - int encap_type) +static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type) { - struct ip_tunnel *tunnel; + return vti_input(skb, nexthdr, spi, encap_type, false); +} + +static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev) +{ + XFRM_SPI_SKB_CB(skb)->family = AF_INET; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); + + return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev); +} + +static int vti_rcv_proto(struct sk_buff *skb) +{ + return vti_rcv(skb, 0, false); +} + +static int vti_rcv_tunnel(struct sk_buff *skb) +{ + struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); const struct iphdr *iph = ip_hdr(skb); - struct net *net = dev_net(skb->dev); - struct ip_tunnel_net *itn = net_generic(net, vti_net_id); + struct ip_tunnel *tunnel; tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, iph->saddr, iph->daddr, 0); if (tunnel) { + struct tnl_ptk_info tpi = { + .proto = htons(ETH_P_IP), + }; + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; - - XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; - - skb->dev = tunnel->dev; - - return xfrm_input(skb, nexthdr, spi, encap_type); + if (iptunnel_pull_header(skb, 0, tpi.proto)) + goto drop; + return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); } return -EINVAL; @@ -102,22 +123,6 @@ drop: return 0; } -static int vti_rcv(struct sk_buff *skb) -{ - XFRM_SPI_SKB_CB(skb)->family = AF_INET; - XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); - - return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); -} - -static int vti_rcv_ipip(struct sk_buff *skb) -{ - XFRM_SPI_SKB_CB(skb)->family = AF_INET; - XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); - - return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0); -} - static int vti_rcv_cb(struct sk_buff *skb, int err) { unsigned short family; @@ -452,31 +457,31 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev) } static struct xfrm4_protocol vti_esp4_protocol __read_mostly = { - .handler = vti_rcv, - .input_handler = vti_input, + .handler = vti_rcv_proto, + .input_handler = vti_input_proto, .cb_handler = vti_rcv_cb, .err_handler = vti4_err, .priority = 100, }; static struct xfrm4_protocol vti_ah4_protocol __read_mostly = { - .handler = vti_rcv, - .input_handler = vti_input, + .handler = vti_rcv_proto, + .input_handler = vti_input_proto, .cb_handler = vti_rcv_cb, .err_handler = vti4_err, .priority = 100, }; static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = { - .handler = vti_rcv, - .input_handler = vti_input, + .handler = vti_rcv_proto, + .input_handler = vti_input_proto, .cb_handler = vti_rcv_cb, .err_handler = vti4_err, .priority = 100, }; static struct xfrm_tunnel ipip_handler __read_mostly = { - .handler = vti_rcv_ipip, + .handler = vti_rcv_tunnel, .err_handler = vti4_err, .priority = 0, }; diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index b3ca21b2ba9b..ddbf93e70069 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -156,8 +156,7 @@ pptp_outbound_pkt(struct sk_buff *skb, break; default: pr_debug("unknown outbound packet 0x%04x:%s\n", msg, - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : - pptp_msg_name[0]); + pptp_msg_name(msg)); /* fall through */ case PPTP_SET_LINK_INFO: /* only need to NAT in case PAC is behind NAT box */ @@ -250,9 +249,7 @@ pptp_inbound_pkt(struct sk_buff *skb, pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); break; default: - pr_debug("unknown inbound packet %s\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : - pptp_msg_name[0]); + pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg)); /* fall through */ case PPTP_START_SESSION_REQUEST: case PPTP_START_SESSION_REPLY: diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c index bf855e64fc45..0c01a270bf9f 100644 --- a/net/ipv4/netfilter/nft_dup_ipv4.c +++ b/net/ipv4/netfilter/nft_dup_ipv4.c @@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr, struct in_addr gw = { .s_addr = (__force __be32)regs->data[priv->sreg_addr], }; - int oif = regs->data[priv->sreg_dev]; + int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); } @@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr) { struct nft_dup_ipv4 *priv = nft_expr_priv(expr); - if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || + if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) + goto nla_put_failure; + if (priv->sreg_dev && nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) goto nla_put_failure; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 671e221070a5..2fcb524cd75b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -482,6 +482,10 @@ u32 ip_idents_reserve(u32 hash, int segs) if (old != now && cmpxchg(p_tstamp, old, now) == old) delta = prandom_u32_max(now - old); + /* If UBSAN reports an error there, please make sure your compiler + * supports -fno-strict-overflow before reporting it that was a bug + * in UBSAN, and it has been fixed in GCC-8. + */ return atomic_add_return(segs + delta, p_id) - segs; } EXPORT_SYMBOL(ip_idents_reserve); @@ -896,7 +900,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) /* Check for load limit; set rate_last to the latest sent * redirect. */ - if (peer->rate_tokens == 0 || + if (peer->n_redirects == 0 || time_after(jiffies, (peer->rate_last + (ip_rt_redirect_load << peer->n_redirects)))) { @@ -1500,9 +1504,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, #endif } -static struct rtable *rt_dst_alloc(struct net_device *dev, - unsigned int flags, u16 type, - bool nopolicy, bool noxfrm, bool will_cache) +struct rtable *rt_dst_alloc(struct net_device *dev, + unsigned int flags, u16 type, + bool nopolicy, bool noxfrm, bool will_cache) { struct rtable *rt; @@ -1531,6 +1535,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev, return rt; } +EXPORT_SYMBOL(rt_dst_alloc); /* called in rcu_read_lock() section */ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b61a35813ae9..4436d60b38b4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2927,7 +2927,10 @@ static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) { const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; struct rtt_meas *m = tcp_sk(sk)->rtt_min; - struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now }; + struct rtt_meas rttm = { + .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1), + .ts = now, + }; u32 elapsed; /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ce90a12b1e6e..241847c611a8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1691,10 +1691,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, if (use_hash2) { hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & - udp_table.mask; - hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; + udptable->mask; + hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udp_table.hash2[hash2]; + hslot = &udptable->hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } @@ -1760,8 +1760,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, } } - return skb_checksum_init_zero_check(skb, proto, uh->check, - inet_compute_pseudo); + /* Note, we are only interested in != 0 or == 0, thus the + * force to int. + */ + return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, + inet_compute_pseudo); } /* diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5183466df92a..441f6519e2e7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -569,7 +569,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, struct sk_buff *skb; int err = -ENOBUFS; - skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_ATOMIC); + skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL); if (!skb) goto errout; @@ -581,7 +581,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC); + rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL); return; errout: rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err); @@ -800,7 +800,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) } if (p == &net->ipv6.devconf_all->forwarding) { + int old_dflt = net->ipv6.devconf_dflt->forwarding; + net->ipv6.devconf_dflt->forwarding = newf; + if ((!newf) ^ (!old_dflt)) + inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, + NETCONFA_IFINDEX_DEFAULT, + net->ipv6.devconf_dflt); + addrconf_forward_change(net, newf); if ((!newf) ^ (!old)) inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, @@ -3215,6 +3222,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; struct inet6_dev *idev = __in6_dev_get(dev); struct net *net = dev_net(dev); int run_pending = 0; @@ -3376,6 +3384,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, case NETDEV_POST_TYPE_CHANGE: addrconf_type_change(dev, event); break; + + case NETDEV_CHANGEUPPER: + info = ptr; + + /* flush all routes if dev is linked to or unlinked from + * an L3 master device (e.g., VRF) + */ + if (info->upper_dev && netif_is_l3_master(info->upper_dev)) + addrconf_ifdown(dev, 0); } return NOTIFY_OK; diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index bfa941fc1165..129324b36fb6 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -107,15 +107,16 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v) } EXPORT_SYMBOL(inet6addr_notifier_call_chain); -static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1, - struct dst_entry **u2, - struct flowi6 *u3) +static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, + const struct in6_addr *final_dst) { - return -EAFNOSUPPORT; + return ERR_PTR(-EAFNOSUPPORT); } const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { - .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup, + .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, }; EXPORT_SYMBOL_GPL(ipv6_stub); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 28e03fce9e89..8e38e058a66c 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -700,7 +700,7 @@ int inet6_sk_rebuild_header(struct sock *sk) &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { sk->sk_route_caps = 0; sk->sk_err_soft = -PTR_ERR(dst); @@ -858,7 +858,7 @@ static struct pernet_operations inet6_net_ops = { static const struct ipv6_stub ipv6_stub_impl = { .ipv6_sock_mc_join = ipv6_sock_mc_join, .ipv6_sock_mc_drop = ipv6_sock_mc_drop, - .ipv6_dst_lookup = ip6_dst_lookup, + .ipv6_dst_lookup_flow = ip6_dst_lookup_flow, .udpv6_encap_enable = udpv6_encap_enable, .ndisc_send_na = ndisc_send_na, .nd_tbl = &nd_tbl, @@ -1046,11 +1046,11 @@ netfilter_fail: igmp_fail: ndisc_cleanup(); ndisc_fail: - ip6_mr_cleanup(); + icmpv6_cleanup(); icmp_fail: - unregister_pernet_subsys(&inet6_net_ops); + ip6_mr_cleanup(); ipmr_fail: - icmpv6_cleanup(); + unregister_pernet_subsys(&inet6_net_ops); register_pernet_fail: sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 674fc3861e41..c41b717b0ef2 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -40,7 +40,8 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); } -static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, + int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); @@ -180,7 +181,7 @@ ipv4_connected: final_p = fl6_update_dst(&fl6, opt, &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); err = 0; if (IS_ERR(dst)) { err = PTR_ERR(dst); @@ -219,6 +220,7 @@ out: fl6_sock_release(flowlabel); return err; } +EXPORT_SYMBOL_GPL(__ip6_datagram_connect); int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 3ae2fbe07b25..463f533bd289 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -446,6 +446,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) if (__ipv6_addr_needs_scope_id(addr_type)) iif = skb->dev->ifindex; + else { + dst = skb_dst(skb); + iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev); + } /* * Must not send error if the source does not uniquely @@ -501,9 +505,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; - if (!fl6.flowi6_oif) - fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev); - dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index dc79ebc14189..ab9eda00dde5 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -89,7 +89,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, fl6->flowi6_uid = sk->sk_uid; security_req_classify_flow(req, flowi6_to_flowi(fl6)); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(dst)) return NULL; @@ -144,7 +144,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, dst = __inet6_csk_dst_check(sk, np->dst_cookie); if (!dst) { - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (!IS_ERR(dst)) ip6_dst_store(sk, dst, NULL, NULL); diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c index 391a8fedb27e..1132624edee9 100644 --- a/net/ipv6/ip6_checksum.c +++ b/net/ipv6/ip6_checksum.c @@ -84,9 +84,12 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) * we accept a checksum of zero here. When we find the socket * for the UDP packet we'll check if that socket allows zero checksum * for IPv6 (set by socket option). + * + * Note, we are only interested in != 0 or == 0, thus the + * force to int. */ - return skb_checksum_init_zero_check(skb, proto, uh->check, - ip6_compute_pseudo); + return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, + ip6_compute_pseudo); } EXPORT_SYMBOL(udp6_csum_init); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index e39dc94486b2..1e2b8d33d303 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1057,13 +1057,13 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); * It returns a valid dst pointer on success, or a pointer encoded * error code. */ -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst) { struct dst_entry *dst = NULL; int err; - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); + err = ip6_dst_lookup_tail(net, sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) @@ -1071,7 +1071,7 @@ struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, if (!fl6->flowi6_oif) fl6->flowi6_oif = l3mdev_fib_oif(dst->dev); - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); + return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); @@ -1096,7 +1096,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, dst = ip6_sk_dst_check(sk, dst, fl6); if (!dst) - dst = ip6_dst_lookup_flow(sk, fl6, final_dst); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst); return dst; } diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 8d4c5b126117..8151c7b9659b 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -324,11 +324,9 @@ static int vti6_rcv(struct sk_buff *skb) goto discard; } - XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; - rcu_read_unlock(); - return xfrm6_rcv(skb); + return xfrm6_rcv_tnl(skb, t); } rcu_read_unlock(); return -EINVAL; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 91f16e679f63..20812e8b24dd 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1594,14 +1594,15 @@ static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk) if (likely(mrt->mroute6_sk == NULL)) { mrt->mroute6_sk = sk; net->ipv6.devconf_all->mc_forwarding++; - inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, - NETCONFA_IFINDEX_ALL, - net->ipv6.devconf_all); - } - else + } else { err = -EADDRINUSE; + } write_unlock_bh(&mrt_lock); + if (!err) + inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, + NETCONFA_IFINDEX_ALL, + net->ipv6.devconf_all); rtnl_unlock(); return err; @@ -1619,11 +1620,11 @@ int ip6mr_sk_done(struct sock *sk) write_lock_bh(&mrt_lock); mrt->mroute6_sk = NULL; net->ipv6.devconf_all->mc_forwarding--; + write_unlock_bh(&mrt_lock); inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); - write_unlock_bh(&mrt_lock); mroute_clean_tables(mrt, false); err = 0; diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c index 8bfd470cbe72..831f86e1ec08 100644 --- a/net/ipv6/netfilter/nft_dup_ipv6.c +++ b/net/ipv6/netfilter/nft_dup_ipv6.c @@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr, { struct nft_dup_ipv6 *priv = nft_expr_priv(expr); struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr]; - int oif = regs->data[priv->sreg_dev]; + int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); } @@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr) { struct nft_dup_ipv6 *priv = nft_expr_priv(expr); - if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || + if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) + goto nla_put_failure; + if (priv->sreg_dev && nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) goto nla_put_failure; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 1a940af30300..8614321b4c54 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -890,7 +890,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (hdrincl) fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0b74b01db2f6..ee43d46452a8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -336,9 +336,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net, return rt; } -static struct rt6_info *ip6_dst_alloc(struct net *net, - struct net_device *dev, - int flags) +struct rt6_info *ip6_dst_alloc(struct net *net, + struct net_device *dev, + int flags) { struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); @@ -362,6 +362,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net, return rt; } +EXPORT_SYMBOL(ip6_dst_alloc); static void ip6_dst_destroy(struct dst_entry *dst) { diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 7f3667635431..50ec2f3b92f7 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -232,7 +232,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) fl6.flowi6_uid = sk->sk_uid; security_req_classify_flow(req, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) goto out_free; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 27d5bc29188a..ae5a5d06c218 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -246,7 +246,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; @@ -816,8 +816,13 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 fl6.flowi6_proto = IPPROTO_TCP; if (rt6_need_strict(&fl6.daddr) && !oif) fl6.flowi6_oif = tcp_v6_iif(skb); - else + else { + if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) + oif = skb->skb_iif; + fl6.flowi6_oif = oif; + } + fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; @@ -828,7 +833,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 * Underlying function will use this to retrieve the network * namespace */ - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e9db0ff606b9..c89cf8e5eff5 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -802,10 +802,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, if (use_hash2) { hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & - udp_table.mask; - hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; + udptable->mask; + hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udp_table.hash2[hash2]; + hslot = &udptable->hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 0eaab1fa6be5..b5789562aded 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -21,8 +21,10 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb) return xfrm6_extract_header(skb); } -int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) +int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, + struct ip6_tnl *t) { + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; XFRM_SPI_SKB_CB(skb)->family = AF_INET6; XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); return xfrm_input(skb, nexthdr, spi, 0); @@ -48,13 +50,18 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) return -1; } -int xfrm6_rcv(struct sk_buff *skb) +int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t) { return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff], - 0); + 0, t); } -EXPORT_SYMBOL(xfrm6_rcv); +EXPORT_SYMBOL(xfrm6_rcv_tnl); +int xfrm6_rcv(struct sk_buff *skb) +{ + return xfrm6_rcv_tnl(skb, NULL); +} +EXPORT_SYMBOL(xfrm6_rcv); int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto) { diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index f9d493c59d6c..07b7b2540579 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -239,7 +239,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb) __be32 spi; spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr); - return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi); + return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL); } static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 7cc9db38e1b6..0e8f8a3f7b23 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -839,7 +839,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) struct sock *sk = sock->sk; struct irda_sock *new, *self = irda_sk(sk); struct sock *newsk; - struct sk_buff *skb; + struct sk_buff *skb = NULL; int err; err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); @@ -907,7 +907,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) err = -EPERM; /* value does not seem to make sense. -arnd */ if (!new->tsap) { pr_debug("%s(), dup failed!\n", __func__); - kfree_skb(skb); goto out; } @@ -926,7 +925,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) /* Clean up the original one to keep it in listen state */ irttp_listen(self->tsap); - kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; @@ -934,6 +932,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) irda_connect_response(new); err = 0; out: + kfree_skb(skb); release_sock(sk); return err; } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 2b8b5c57c7f0..0233c496fc51 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -112,53 +112,19 @@ struct l2tp_net { spinlock_t l2tp_session_hlist_lock; }; -static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) { return sk->sk_user_data; } -static inline struct l2tp_net *l2tp_pernet(struct net *net) +static inline struct l2tp_net *l2tp_pernet(const struct net *net) { BUG_ON(!net); return net_generic(net, l2tp_net_id); } -/* Tunnel reference counts. Incremented per session that is added to - * the tunnel. - */ -static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) -{ - atomic_inc(&tunnel->ref_count); -} - -static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) -{ - if (atomic_dec_and_test(&tunnel->ref_count)) - l2tp_tunnel_free(tunnel); -} -#ifdef L2TP_REFCNT_DEBUG -#define l2tp_tunnel_inc_refcount(_t) \ -do { \ - pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \ - __func__, __LINE__, (_t)->name, \ - atomic_read(&_t->ref_count)); \ - l2tp_tunnel_inc_refcount_1(_t); \ -} while (0) -#define l2tp_tunnel_dec_refcount(_t) \ -do { \ - pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ - __func__, __LINE__, (_t)->name, \ - atomic_read(&_t->ref_count)); \ - l2tp_tunnel_dec_refcount_1(_t); \ -} while (0) -#else -#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) -#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) -#endif - /* Session hash global list for L2TPv3. * The session_id SHOULD be random according to RFC3931, but several * L2TP implementations use incrementing session_ids. So we do a real @@ -216,27 +182,6 @@ static void l2tp_tunnel_sock_put(struct sock *sk) sock_put(sk); } -/* Lookup a session by id in the global session list - */ -static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) -{ - struct l2tp_net *pn = l2tp_pernet(net); - struct hlist_head *session_list = - l2tp_session_id_hash_2(pn, session_id); - struct l2tp_session *session; - - rcu_read_lock_bh(); - hlist_for_each_entry_rcu(session, session_list, global_hlist) { - if (session->session_id == session_id) { - rcu_read_unlock_bh(); - return session; - } - } - rcu_read_unlock_bh(); - - return NULL; -} - /* Session hash list. * The session_id SHOULD be random according to RFC2661, but several * L2TP implementations (Cisco and Microsoft) use incrementing @@ -249,38 +194,31 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; } -/* Lookup a session by id - */ -struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id) +/* Lookup a tunnel. A new reference is held on the returned tunnel. */ +struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) { - struct hlist_head *session_list; - struct l2tp_session *session; + const struct l2tp_net *pn = l2tp_pernet(net); + struct l2tp_tunnel *tunnel; - /* In L2TPv3, session_ids are unique over all tunnels and we - * sometimes need to look them up before we know the - * tunnel. - */ - if (tunnel == NULL) - return l2tp_session_find_2(net, session_id); + rcu_read_lock_bh(); + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { + if (tunnel->tunnel_id == tunnel_id) { + l2tp_tunnel_inc_refcount(tunnel); + rcu_read_unlock_bh(); - session_list = l2tp_session_id_hash(tunnel, session_id); - read_lock_bh(&tunnel->hlist_lock); - hlist_for_each_entry(session, session_list, hlist) { - if (session->session_id == session_id) { - read_unlock_bh(&tunnel->hlist_lock); - return session; + return tunnel; } } - read_unlock_bh(&tunnel->hlist_lock); + rcu_read_unlock_bh(); return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find); +EXPORT_SYMBOL_GPL(l2tp_tunnel_get); -/* Like l2tp_session_find() but takes a reference on the returned session. +/* Lookup a session. A new reference is held on the returned session. * Optionally calls session->ref() too if do_ref is true. */ -struct l2tp_session *l2tp_session_get(struct net *net, +struct l2tp_session *l2tp_session_get(const struct net *net, struct l2tp_tunnel *tunnel, u32 session_id, bool do_ref) { @@ -355,7 +293,9 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth); /* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces. */ -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) +struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, + const char *ifname, + bool do_ref) { struct l2tp_net *pn = l2tp_pernet(net); int hash; @@ -365,7 +305,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { if (!strcmp(session->ifname, ifname)) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); rcu_read_unlock_bh(); + return session; } } @@ -375,22 +319,30 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); +EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); -static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, - struct l2tp_session *session) +int l2tp_session_register(struct l2tp_session *session, + struct l2tp_tunnel *tunnel) { struct l2tp_session *session_walk; struct hlist_head *g_head; struct hlist_head *head; struct l2tp_net *pn; + int err; head = l2tp_session_id_hash(tunnel, session->session_id); write_lock_bh(&tunnel->hlist_lock); + if (!tunnel->acpt_newsess) { + err = -ENODEV; + goto err_tlock; + } + hlist_for_each_entry(session_walk, head, hlist) - if (session_walk->session_id == session->session_id) - goto exist; + if (session_walk->session_id == session->session_id) { + err = -EEXIST; + goto err_tlock; + } if (tunnel->version == L2TP_HDR_VER_3) { pn = l2tp_pernet(tunnel->l2tp_net); @@ -398,30 +350,44 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, session->session_id); spin_lock_bh(&pn->l2tp_session_hlist_lock); + hlist_for_each_entry(session_walk, g_head, global_hlist) - if (session_walk->session_id == session->session_id) - goto exist_glob; + if (session_walk->session_id == session->session_id) { + err = -EEXIST; + goto err_tlock_pnlock; + } + l2tp_tunnel_inc_refcount(tunnel); + sock_hold(tunnel->sock); hlist_add_head_rcu(&session->global_hlist, g_head); + spin_unlock_bh(&pn->l2tp_session_hlist_lock); + } else { + l2tp_tunnel_inc_refcount(tunnel); + sock_hold(tunnel->sock); } hlist_add_head(&session->hlist, head); write_unlock_bh(&tunnel->hlist_lock); + /* Ignore management session in session count value */ + if (session->session_id != 0) + atomic_inc(&l2tp_session_count); + return 0; -exist_glob: +err_tlock_pnlock: spin_unlock_bh(&pn->l2tp_session_hlist_lock); -exist: +err_tlock: write_unlock_bh(&tunnel->hlist_lock); - return -EEXIST; + return err; } +EXPORT_SYMBOL_GPL(l2tp_session_register); /* Lookup a tunnel by id */ -struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) +struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id) { struct l2tp_tunnel *tunnel; struct l2tp_net *pn = l2tp_pernet(net); @@ -439,7 +405,7 @@ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) } EXPORT_SYMBOL_GPL(l2tp_tunnel_find); -struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth) +struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth) { struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel; @@ -1307,7 +1273,6 @@ static void l2tp_tunnel_destruct(struct sock *sk) /* Remove hooks into tunnel socket */ sk->sk_destruct = tunnel->old_sk_destruct; sk->sk_user_data = NULL; - tunnel->sock = NULL; /* Remove the tunnel struct from the tunnel list */ pn = l2tp_pernet(tunnel->l2tp_net); @@ -1317,6 +1282,8 @@ static void l2tp_tunnel_destruct(struct sock *sk) atomic_dec(&l2tp_tunnel_count); l2tp_tunnel_closeall(tunnel); + + tunnel->sock = NULL; l2tp_tunnel_dec_refcount(tunnel); /* Call the original destructor */ @@ -1341,6 +1308,7 @@ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) tunnel->name); write_lock_bh(&tunnel->hlist_lock); + tunnel->acpt_newsess = false; for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { again: hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { @@ -1394,17 +1362,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk) } } -/* Really kill the tunnel. - * Come here only when all sessions have been cleared from the tunnel. - */ -static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) -{ - BUG_ON(atomic_read(&tunnel->ref_count) != 0); - BUG_ON(tunnel->sock != NULL); - l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); - kfree_rcu(tunnel, rcu); -} - /* Workqueue tunnel deletion function */ static void l2tp_tunnel_del_work(struct work_struct *work) { @@ -1655,6 +1612,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 tunnel->magic = L2TP_TUNNEL_MAGIC; sprintf(&tunnel->name[0], "tunl %u", tunnel_id); rwlock_init(&tunnel->hlist_lock); + tunnel->acpt_newsess = true; /* The net we belong to */ tunnel->l2tp_net = net; @@ -1840,7 +1798,6 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct l2tp_session *session; - int err; session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); if (session != NULL) { @@ -1896,25 +1853,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn l2tp_session_set_header_len(session, tunnel->version); - err = l2tp_session_add_to_tunnel(tunnel, session); - if (err) { - kfree(session); - - return ERR_PTR(err); - } - - /* Bump the reference count. The session context is deleted - * only when this drops to zero. - */ l2tp_session_inc_refcount(session); - l2tp_tunnel_inc_refcount(tunnel); - - /* Ensure tunnel socket isn't deleted */ - sock_hold(tunnel->sock); - - /* Ignore management session in session count value */ - if (session->session_id != 0) - atomic_inc(&l2tp_session_count); return session; } @@ -1953,6 +1892,9 @@ static __net_exit void l2tp_exit_net(struct net *net) l2tp_tunnel_delete(tunnel); } rcu_read_unlock_bh(); + + flush_workqueue(l2tp_wq); + rcu_barrier(); } static struct pernet_operations l2tp_net_ops = { diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 06323a12d62c..57da0f1d62dd 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -23,16 +23,6 @@ #define L2TP_HASH_BITS_2 8 #define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2) -/* Debug message categories for the DEBUG socket option */ -enum { - L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if - * compiled in) */ - L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel - * interface */ - L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */ - L2TP_MSG_DATA = (1 << 3), /* data packets */ -}; - struct sk_buff; struct l2tp_stats { @@ -175,6 +165,10 @@ struct l2tp_tunnel { struct rcu_head rcu; rwlock_t hlist_lock; /* protect session_hlist */ + bool acpt_newsess; /* Indicates whether this + * tunnel accepts new sessions. + * Protected by hlist_lock. + */ struct hlist_head session_hlist[L2TP_HASH_SIZE]; /* hashed list of sessions, * hashed by id */ @@ -210,7 +204,9 @@ struct l2tp_tunnel { }; struct l2tp_nl_cmd_ops { - int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); + int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel, + u32 session_id, u32 peer_session_id, + struct l2tp_session_cfg *cfg); int (*session_delete)(struct l2tp_session *session); }; @@ -244,17 +240,18 @@ out: return tunnel; } -struct l2tp_session *l2tp_session_get(struct net *net, +struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); + +struct l2tp_session *l2tp_session_get(const struct net *net, struct l2tp_tunnel *tunnel, u32 session_id, bool do_ref); -struct l2tp_session *l2tp_session_find(struct net *net, - struct l2tp_tunnel *tunnel, - u32 session_id); struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, bool do_ref); -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); -struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); -struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); +struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, + const char *ifname, + bool do_ref); +struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id); +struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth); int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, @@ -265,6 +262,9 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); +int l2tp_session_register(struct l2tp_session *session, + struct l2tp_tunnel *tunnel); + void __l2tp_session_unhash(struct l2tp_session *session); int l2tp_session_delete(struct l2tp_session *session); void l2tp_session_free(struct l2tp_session *session); @@ -283,6 +283,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); +static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel) +{ + atomic_inc(&tunnel->ref_count); +} + +static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) +{ + if (atomic_dec_and_test(&tunnel->ref_count)) + kfree_rcu(tunnel, rcu); +} + /* Session reference counts. Incremented when code obtains a reference * to a session. */ diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index c94160df71af..e0a65ee1e830 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -30,6 +30,9 @@ #include <net/xfrm.h> #include <net/net_namespace.h> #include <net/netns/generic.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/udp.h> #include "l2tp_core.h" @@ -41,7 +44,6 @@ struct l2tp_eth { struct net_device *dev; struct sock *tunnel_sock; struct l2tp_session *session; - struct list_head list; atomic_long_t tx_bytes; atomic_long_t tx_packets; atomic_long_t tx_dropped; @@ -52,20 +54,9 @@ struct l2tp_eth { /* via l2tp_session_priv() */ struct l2tp_eth_sess { - struct net_device *dev; + struct net_device __rcu *dev; }; -/* per-net private data for this module */ -static unsigned int l2tp_eth_net_id; -struct l2tp_eth_net { - struct list_head l2tp_eth_dev_list; - spinlock_t l2tp_eth_lock; -}; - -static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) -{ - return net_generic(net, l2tp_eth_net_id); -} static struct lock_class_key l2tp_eth_tx_busylock; static int l2tp_eth_dev_init(struct net_device *dev) @@ -82,12 +73,13 @@ static int l2tp_eth_dev_init(struct net_device *dev) static void l2tp_eth_dev_uninit(struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); - struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); + struct l2tp_eth_sess *spriv; - spin_lock(&pn->l2tp_eth_lock); - list_del_init(&priv->list); - spin_unlock(&pn->l2tp_eth_lock); - dev_put(dev); + spriv = l2tp_session_priv(priv->session); + RCU_INIT_POINTER(spriv->dev, NULL); + /* No need for synchronize_net() here. We're called by + * unregister_netdev*(), which does the synchronisation for us. + */ } static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) @@ -141,8 +133,8 @@ static void l2tp_eth_dev_setup(struct net_device *dev) static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct l2tp_eth_sess *spriv = l2tp_session_priv(session); - struct net_device *dev = spriv->dev; - struct l2tp_eth *priv = netdev_priv(dev); + struct net_device *dev; + struct l2tp_eth *priv; if (session->debug & L2TP_MSG_DATA) { unsigned int length; @@ -166,16 +158,25 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, skb_dst_drop(skb); nf_reset(skb); + rcu_read_lock(); + dev = rcu_dereference(spriv->dev); + if (!dev) + goto error_rcu; + + priv = netdev_priv(dev); if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { atomic_long_inc(&priv->rx_packets); atomic_long_add(data_len, &priv->rx_bytes); } else { atomic_long_inc(&priv->rx_errors); } + rcu_read_unlock(); + return; +error_rcu: + rcu_read_unlock(); error: - atomic_long_inc(&priv->rx_errors); kfree_skb(skb); } @@ -186,11 +187,15 @@ static void l2tp_eth_delete(struct l2tp_session *session) if (session) { spriv = l2tp_session_priv(session); - dev = spriv->dev; + + rtnl_lock(); + dev = rtnl_dereference(spriv->dev); if (dev) { - unregister_netdev(dev); - spriv->dev = NULL; + unregister_netdevice(dev); + rtnl_unlock(); module_put(THIS_MODULE); + } else { + rtnl_unlock(); } } } @@ -200,35 +205,89 @@ static void l2tp_eth_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; struct l2tp_eth_sess *spriv = l2tp_session_priv(session); - struct net_device *dev = spriv->dev; + struct net_device *dev; + + rcu_read_lock(); + dev = rcu_dereference(spriv->dev); + if (!dev) { + rcu_read_unlock(); + return; + } + dev_hold(dev); + rcu_read_unlock(); seq_printf(m, " interface %s\n", dev->name); + + dev_put(dev); } #endif -static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) +static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel, + struct l2tp_session *session, + struct net_device *dev) +{ + unsigned int overhead = 0; + struct dst_entry *dst; + u32 l3_overhead = 0; + + /* if the encap is UDP, account for UDP header size */ + if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { + overhead += sizeof(struct udphdr); + dev->needed_headroom += sizeof(struct udphdr); + } + if (session->mtu != 0) { + dev->mtu = session->mtu; + dev->needed_headroom += session->hdr_len; + return; + } + lock_sock(tunnel->sock); + l3_overhead = kernel_sock_ip_overhead(tunnel->sock); + release_sock(tunnel->sock); + if (l3_overhead == 0) { + /* L3 Overhead couldn't be identified, this could be + * because tunnel->sock was NULL or the socket's + * address family was not IPv4 or IPv6, + * dev mtu stays at 1500. + */ + return; + } + /* Adjust MTU, factor overhead - underlay L3, overlay L2 hdr + * UDP overhead, if any, was already factored in above. + */ + overhead += session->hdr_len + ETH_HLEN + l3_overhead; + + /* If PMTU discovery was enabled, use discovered MTU on L2TP device */ + dst = sk_dst_get(tunnel->sock); + if (dst) { + /* dst_mtu will use PMTU if found, else fallback to intf MTU */ + u32 pmtu = dst_mtu(dst); + + if (pmtu != 0) + dev->mtu = pmtu; + dst_release(dst); + } + session->mtu = dev->mtu - overhead; + dev->mtu = session->mtu; + dev->needed_headroom += session->hdr_len; +} + +static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, + u32 session_id, u32 peer_session_id, + struct l2tp_session_cfg *cfg) { struct net_device *dev; char name[IFNAMSIZ]; - struct l2tp_tunnel *tunnel; struct l2tp_session *session; struct l2tp_eth *priv; struct l2tp_eth_sess *spriv; int rc; - struct l2tp_eth_net *pn; - - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (!tunnel) { - rc = -ENODEV; - goto out; - } if (cfg->ifname) { dev = dev_get_by_name(net, cfg->ifname); if (dev) { dev_put(dev); rc = -EEXIST; - goto out; + goto err; } strlcpy(name, cfg->ifname, IFNAMSIZ); } else @@ -238,26 +297,22 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p peer_session_id, cfg); if (IS_ERR(session)) { rc = PTR_ERR(session); - goto out; + goto err; } dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN, l2tp_eth_dev_setup); if (!dev) { rc = -ENOMEM; - goto out_del_session; + goto err_sess; } dev_net_set(dev, net); - if (session->mtu == 0) - session->mtu = dev->mtu - session->hdr_len; - dev->mtu = session->mtu; - dev->needed_headroom += session->hdr_len; + l2tp_eth_adjust_mtu(tunnel, session, dev); priv = netdev_priv(dev); priv->dev = dev; priv->session = session; - INIT_LIST_HEAD(&priv->list); priv->tunnel_sock = tunnel->sock; session->recv_skb = l2tp_eth_dev_recv; @@ -267,48 +322,50 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p #endif spriv = l2tp_session_priv(session); - spriv->dev = dev; - rc = register_netdev(dev); - if (rc < 0) - goto out_del_dev; + l2tp_session_inc_refcount(session); - __module_get(THIS_MODULE); - /* Must be done after register_netdev() */ - strlcpy(session->ifname, dev->name, IFNAMSIZ); + rtnl_lock(); - dev_hold(dev); - pn = l2tp_eth_pernet(dev_net(dev)); - spin_lock(&pn->l2tp_eth_lock); - list_add(&priv->list, &pn->l2tp_eth_dev_list); - spin_unlock(&pn->l2tp_eth_lock); + /* Register both device and session while holding the rtnl lock. This + * ensures that l2tp_eth_delete() will see that there's a device to + * unregister, even if it happened to run before we assign spriv->dev. + */ + rc = l2tp_session_register(session, tunnel); + if (rc < 0) { + rtnl_unlock(); + goto err_sess_dev; + } - return 0; + rc = register_netdevice(dev); + if (rc < 0) { + rtnl_unlock(); + l2tp_session_delete(session); + l2tp_session_dec_refcount(session); + free_netdev(dev); -out_del_dev: - free_netdev(dev); - spriv->dev = NULL; -out_del_session: - l2tp_session_delete(session); -out: - return rc; -} + return rc; + } -static __net_init int l2tp_eth_init_net(struct net *net) -{ - struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id); + strlcpy(session->ifname, dev->name, IFNAMSIZ); + rcu_assign_pointer(spriv->dev, dev); + + rtnl_unlock(); - INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); - spin_lock_init(&pn->l2tp_eth_lock); + l2tp_session_dec_refcount(session); + + __module_get(THIS_MODULE); return 0; -} -static struct pernet_operations l2tp_eth_net_ops = { - .init = l2tp_eth_init_net, - .id = &l2tp_eth_net_id, - .size = sizeof(struct l2tp_eth_net), -}; +err_sess_dev: + l2tp_session_dec_refcount(session); + free_netdev(dev); +err_sess: + kfree(session); +err: + return rc; +} static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { @@ -323,25 +380,18 @@ static int __init l2tp_eth_init(void) err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); if (err) - goto out; - - err = register_pernet_device(&l2tp_eth_net_ops); - if (err) - goto out_unreg; + goto err; pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); return 0; -out_unreg: - l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); -out: +err: return err; } static void __exit l2tp_eth_exit(void) { - unregister_pernet_device(&l2tp_eth_net_ops); l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); } diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 58f87bdd12c7..fd7363f8405a 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -122,6 +122,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) unsigned char *ptr, *optr; struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; + struct iphdr *iph; int length; if (!pskb_may_pull(skb, 4)) @@ -180,23 +181,16 @@ pass_up: goto discard; tunnel_id = ntohl(*(__be32 *) &skb->data[4]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel) { - sk = tunnel->sock; - sock_hold(sk); - } else { - struct iphdr *iph = (struct iphdr *) skb_network_header(skb); - - read_lock_bh(&l2tp_ip_lock); - sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); - if (!sk) { - read_unlock_bh(&l2tp_ip_lock); - goto discard; - } + iph = (struct iphdr *)skb_network_header(skb); - sock_hold(sk); + read_lock_bh(&l2tp_ip_lock); + sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); + if (!sk) { read_unlock_bh(&l2tp_ip_lock); + goto discard; } + sock_hold(sk); + read_unlock_bh(&l2tp_ip_lock); if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; @@ -269,15 +263,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (addr->l2tp_family != AF_INET) return -EINVAL; - ret = -EADDRINUSE; - read_lock_bh(&l2tp_ip_lock); - if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, - sk->sk_bound_dev_if, addr->l2tp_conn_id)) - goto out_in_use; - - read_unlock_bh(&l2tp_ip_lock); - lock_sock(sk); + + ret = -EINVAL; if (!sock_flag(sk, SOCK_ZAPPED)) goto out; @@ -294,14 +282,22 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ - sk_dst_reset(sk); + write_lock_bh(&l2tp_ip_lock); + if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, + sk->sk_bound_dev_if, addr->l2tp_conn_id)) { + write_unlock_bh(&l2tp_ip_lock); + ret = -EADDRINUSE; + goto out; + } + + sk_dst_reset(sk); l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; - write_lock_bh(&l2tp_ip_lock); sk_add_bind_node(sk, &l2tp_ip_bind_table); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); + ret = 0; sock_reset_flag(sk, SOCK_ZAPPED); @@ -309,11 +305,6 @@ out: release_sock(sk); return ret; - -out_in_use: - read_unlock_bh(&l2tp_ip_lock); - - return ret; } static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) @@ -321,21 +312,24 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; int rc; - if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ - return -EINVAL; - if (addr_len < sizeof(*lsa)) return -EINVAL; if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) return -EINVAL; - rc = ip4_datagram_connect(sk, uaddr, addr_len); - if (rc < 0) - return rc; - lock_sock(sk); + /* Must bind first - autobinding does not work */ + if (sock_flag(sk, SOCK_ZAPPED)) { + rc = -EINVAL; + goto out_sk; + } + + rc = __ip4_datagram_connect(sk, uaddr, addr_len); + if (rc < 0) + goto out_sk; + l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); @@ -343,7 +337,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len sk_add_bind_node(sk, &l2tp_ip_bind_table); write_unlock_bh(&l2tp_ip_lock); +out_sk: release_sock(sk); + return rc; } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 52ff87c0dfcb..25db99dc684f 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -134,6 +134,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) unsigned char *ptr, *optr; struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; + struct ipv6hdr *iph; int length; if (!pskb_may_pull(skb, 4)) @@ -193,24 +194,16 @@ pass_up: goto discard; tunnel_id = ntohl(*(__be32 *) &skb->data[4]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel) { - sk = tunnel->sock; - sock_hold(sk); - } else { - struct ipv6hdr *iph = ipv6_hdr(skb); - - read_lock_bh(&l2tp_ip6_lock); - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, - 0, tunnel_id); - if (!sk) { - read_unlock_bh(&l2tp_ip6_lock); - goto discard; - } + iph = ipv6_hdr(skb); - sock_hold(sk); + read_lock_bh(&l2tp_ip6_lock); + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, 0, tunnel_id); + if (!sk) { read_unlock_bh(&l2tp_ip6_lock); + goto discard; } + sock_hold(sk); + read_unlock_bh(&l2tp_ip6_lock); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; @@ -278,6 +271,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; struct net *net = sock_net(sk); __be32 v4addr = 0; + int bound_dev_if; int addr_type; int err; @@ -296,13 +290,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (addr_type & IPV6_ADDR_MULTICAST) return -EADDRNOTAVAIL; - err = -EADDRINUSE; - read_lock_bh(&l2tp_ip6_lock); - if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, - sk->sk_bound_dev_if, addr->l2tp_conn_id)) - goto out_in_use; - read_unlock_bh(&l2tp_ip6_lock); - lock_sock(sk); err = -EINVAL; @@ -312,28 +299,25 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (sk->sk_state != TCP_CLOSE) goto out_unlock; + bound_dev_if = sk->sk_bound_dev_if; + /* Check if the address belongs to the host. */ rcu_read_lock(); if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; if (addr_type & IPV6_ADDR_LINKLOCAL) { - if (addr_len >= sizeof(struct sockaddr_in6) && - addr->l2tp_scope_id) { - /* Override any existing binding, if another - * one is supplied by user. - */ - sk->sk_bound_dev_if = addr->l2tp_scope_id; - } + if (addr->l2tp_scope_id) + bound_dev_if = addr->l2tp_scope_id; /* Binding to link-local address requires an - interface */ - if (!sk->sk_bound_dev_if) + * interface. + */ + if (!bound_dev_if) goto out_unlock_rcu; err = -ENODEV; - dev = dev_get_by_index_rcu(sock_net(sk), - sk->sk_bound_dev_if); + dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if); if (!dev) goto out_unlock_rcu; } @@ -348,13 +332,22 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) } rcu_read_unlock(); - inet->inet_rcv_saddr = inet->inet_saddr = v4addr; + write_lock_bh(&l2tp_ip6_lock); + if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if, + addr->l2tp_conn_id)) { + write_unlock_bh(&l2tp_ip6_lock); + err = -EADDRINUSE; + goto out_unlock; + } + + inet->inet_saddr = v4addr; + inet->inet_rcv_saddr = v4addr; + sk->sk_bound_dev_if = bound_dev_if; sk->sk_v6_rcv_saddr = addr->l2tp_addr; np->saddr = addr->l2tp_addr; l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; - write_lock_bh(&l2tp_ip6_lock); sk_add_bind_node(sk, &l2tp_ip6_bind_table); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip6_lock); @@ -367,10 +360,7 @@ out_unlock_rcu: rcu_read_unlock(); out_unlock: release_sock(sk); - return err; -out_in_use: - read_unlock_bh(&l2tp_ip6_lock); return err; } @@ -383,9 +373,6 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_type; int rc; - if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ - return -EINVAL; - if (addr_len < sizeof(*lsa)) return -EINVAL; @@ -402,10 +389,18 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, return -EINVAL; } - rc = ip6_datagram_connect(sk, uaddr, addr_len); - lock_sock(sk); + /* Must bind first - autobinding does not work */ + if (sock_flag(sk, SOCK_ZAPPED)) { + rc = -EINVAL; + goto out_sk; + } + + rc = __ip6_datagram_connect(sk, uaddr, addr_len); + if (rc < 0) + goto out_sk; + l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; write_lock_bh(&l2tp_ip6_lock); @@ -413,6 +408,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, sk_add_bind_node(sk, &l2tp_ip6_bind_table); write_unlock_bh(&l2tp_ip6_lock); +out_sk: release_sock(sk); return rc; @@ -620,7 +616,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index fb3248ff8b48..d3a84a181348 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -55,7 +55,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, /* Accessed under genl lock */ static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; -static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) +static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info, + bool do_ref) { u32 tunnel_id; u32 session_id; @@ -66,14 +67,17 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) if (info->attrs[L2TP_ATTR_IFNAME]) { ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); - session = l2tp_session_find_by_ifname(net, ifname); + session = l2tp_session_get_by_ifname(net, ifname, do_ref); } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && (info->attrs[L2TP_ATTR_CONN_ID])) { tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel) - session = l2tp_session_find(net, tunnel, session_id); + tunnel = l2tp_tunnel_get(net, tunnel_id); + if (tunnel) { + session = l2tp_session_get(net, tunnel, session_id, + do_ref); + l2tp_tunnel_dec_refcount(tunnel); + } } return session; @@ -276,8 +280,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel == NULL) { + tunnel = l2tp_tunnel_get(net, tunnel_id); + if (!tunnel) { ret = -ENODEV; goto out; } @@ -287,6 +291,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info l2tp_tunnel_delete(tunnel); + l2tp_tunnel_dec_refcount(tunnel); + out: return ret; } @@ -304,8 +310,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel == NULL) { + tunnel = l2tp_tunnel_get(net, tunnel_id); + if (!tunnel) { ret = -ENODEV; goto out; } @@ -316,6 +322,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel, L2TP_CMD_TUNNEL_MODIFY); + l2tp_tunnel_dec_refcount(tunnel); + out: return ret; } @@ -420,34 +428,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; - goto out; + goto err; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel == NULL) { - ret = -ENODEV; - goto out; - } - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto out; + goto err; + } + + tunnel = l2tp_tunnel_get(net, tunnel_id); + if (!tunnel) { + ret = -ENODEV; + goto err_nlmsg; } ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET); if (ret < 0) - goto err_out; + goto err_nlmsg_tunnel; + + l2tp_tunnel_dec_refcount(tunnel); return genlmsg_unicast(net, msg, info->snd_portid); -err_out: +err_nlmsg_tunnel: + l2tp_tunnel_dec_refcount(tunnel); +err_nlmsg: nlmsg_free(msg); - -out: +err: return ret; } @@ -491,8 +502,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf ret = -EINVAL; goto out; } + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); + tunnel = l2tp_tunnel_get(net, tunnel_id); if (!tunnel) { ret = -ENODEV; goto out; @@ -500,29 +512,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf if (!info->attrs[L2TP_ATTR_SESSION_ID]) { ret = -EINVAL; - goto out; + goto out_tunnel; } session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); - session = l2tp_session_find(net, tunnel, session_id); - if (session) { - ret = -EEXIST; - goto out; - } if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { ret = -EINVAL; - goto out; + goto out_tunnel; } peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); if (!info->attrs[L2TP_ATTR_PW_TYPE]) { ret = -EINVAL; - goto out; + goto out_tunnel; } cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { ret = -EINVAL; - goto out; + goto out_tunnel; } if (tunnel->version > 2) { @@ -544,7 +551,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); if (len > 8) { ret = -EINVAL; - goto out; + goto out_tunnel; } cfg.cookie_len = len; memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); @@ -553,7 +560,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); if (len > 8) { ret = -EINVAL; - goto out; + goto out_tunnel; } cfg.peer_cookie_len = len; memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); @@ -596,7 +603,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { ret = -EPROTONOSUPPORT; - goto out; + goto out_tunnel; } /* Check that pseudowire-specific params are present */ @@ -606,7 +613,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf case L2TP_PWTYPE_ETH_VLAN: if (!info->attrs[L2TP_ATTR_VLAN_ID]) { ret = -EINVAL; - goto out; + goto out_tunnel; } break; case L2TP_PWTYPE_ETH: @@ -620,18 +627,22 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf break; } - ret = -EPROTONOSUPPORT; - if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create) - ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id, - session_id, peer_session_id, &cfg); + ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel, + session_id, + peer_session_id, + &cfg); if (ret >= 0) { - session = l2tp_session_find(net, tunnel, session_id); - if (session) + session = l2tp_session_get(net, tunnel, session_id, false); + if (session) { ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_CREATE); + l2tp_session_dec_refcount(session); + } } +out_tunnel: + l2tp_tunnel_dec_refcount(tunnel); out: return ret; } @@ -642,7 +653,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf struct l2tp_session *session; u16 pw_type; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, true); if (session == NULL) { ret = -ENODEV; goto out; @@ -656,6 +667,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -665,7 +680,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf int ret = 0; struct l2tp_session *session; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; goto out; @@ -700,6 +715,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_MODIFY); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -786,29 +803,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; int ret; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; - goto out; + goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto out; + goto err_ref; } ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 0, session, L2TP_CMD_SESSION_GET); if (ret < 0) - goto err_out; + goto err_ref_msg; - return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); + ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); -err_out: - nlmsg_free(msg); + l2tp_session_dec_refcount(session); -out: + return ret; + +err_ref_msg: + nlmsg_free(msg); +err_ref: + l2tp_session_dec_refcount(session); +err: return ret; } diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index bc5d6b8f8ede..8ff5352bb0e3 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -122,8 +122,11 @@ struct pppol2tp_session { int owner; /* pid that opened the socket */ - struct sock *sock; /* Pointer to the session + struct mutex sk_lock; /* Protects .sk */ + struct sock __rcu *sk; /* Pointer to the session * PPPoX socket */ + struct sock *__sk; /* Copy of .sk, for cleanup */ + struct rcu_head rcu; /* For asynchronous release */ struct sock *tunnel_sock; /* Pointer to the tunnel UDP * socket */ int flags; /* accessed by PPPIOCGFLAGS. @@ -138,6 +141,24 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = { static const struct proto_ops pppol2tp_ops; +/* Retrieves the pppol2tp socket associated to a session. + * A reference is held on the returned socket, so this function must be paired + * with sock_put(). + */ +static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session) +{ + struct pppol2tp_session *ps = l2tp_session_priv(session); + struct sock *sk; + + rcu_read_lock(); + sk = rcu_dereference(ps->sk); + if (sk) + sock_hold(sk); + rcu_read_unlock(); + + return sk; +} + /* Helpers to obtain tunnel/session contexts from sockets. */ static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) @@ -224,13 +245,14 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int /* If the socket is bound, send it in to PPP's input queue. Otherwise * queue it on the session socket. */ - sk = ps->sock; + rcu_read_lock(); + sk = rcu_dereference(ps->sk); if (sk == NULL) goto no_sock; if (sk->sk_state & PPPOX_BOUND) { struct pppox_sock *po; - l2tp_dbg(session, PPPOL2TP_MSG_DATA, + l2tp_dbg(session, L2TP_MSG_DATA, "%s: recv %d byte data frame, passing to ppp\n", session->name, data_len); @@ -253,7 +275,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int po = pppox_sk(sk); ppp_input(&po->chan, skb); } else { - l2tp_dbg(session, PPPOL2TP_MSG_DATA, + l2tp_dbg(session, L2TP_MSG_DATA, "%s: recv %d byte data frame, passing to L2TP socket\n", session->name, data_len); @@ -262,30 +284,16 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int kfree_skb(skb); } } + rcu_read_unlock(); return; no_sock: - l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name); + rcu_read_unlock(); + l2tp_info(session, L2TP_MSG_DATA, "%s: no socket\n", session->name); kfree_skb(skb); } -static void pppol2tp_session_sock_hold(struct l2tp_session *session) -{ - struct pppol2tp_session *ps = l2tp_session_priv(session); - - if (ps->sock) - sock_hold(ps->sock); -} - -static void pppol2tp_session_sock_put(struct l2tp_session *session) -{ - struct pppol2tp_session *ps = l2tp_session_priv(session); - - if (ps->sock) - sock_put(ps->sock); -} - /************************************************************************ * Transmit handling ***********************************************************************/ @@ -446,17 +454,16 @@ abort: */ static void pppol2tp_session_close(struct l2tp_session *session) { - struct pppol2tp_session *ps = l2tp_session_priv(session); - struct sock *sk = ps->sock; - struct socket *sock = sk->sk_socket; + struct sock *sk; BUG_ON(session->magic != L2TP_SESSION_MAGIC); - if (sock) - inet_shutdown(sock, SEND_SHUTDOWN); - - /* Don't let the session go away before our socket does */ - l2tp_session_inc_refcount(session); + sk = pppol2tp_session_get_sock(session); + if (sk) { + if (sk->sk_socket) + inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); + sock_put(sk); + } } /* Really kill the session socket. (Called from sock_put() if @@ -476,6 +483,14 @@ static void pppol2tp_session_destruct(struct sock *sk) } } +static void pppol2tp_put_sk(struct rcu_head *head) +{ + struct pppol2tp_session *ps; + + ps = container_of(head, typeof(*ps), rcu); + sock_put(ps->__sk); +} + /* Called when the PPPoX socket (session) is closed. */ static int pppol2tp_release(struct socket *sock) @@ -501,11 +516,23 @@ static int pppol2tp_release(struct socket *sock) session = pppol2tp_sock_to_session(sk); - /* Purge any queued data */ if (session != NULL) { - __l2tp_session_unhash(session); - l2tp_session_queue_purge(session); - sock_put(sk); + struct pppol2tp_session *ps; + + l2tp_session_delete(session); + + ps = l2tp_session_priv(session); + mutex_lock(&ps->sk_lock); + ps->__sk = rcu_dereference_protected(ps->sk, + lockdep_is_held(&ps->sk_lock)); + RCU_INIT_POINTER(ps->sk, NULL); + mutex_unlock(&ps->sk_lock); + call_rcu(&ps->rcu, pppol2tp_put_sk); + + /* Rely on the sock_put() call at the end of the function for + * dropping the reference held by pppol2tp_sock_to_session(). + * The last reference will be dropped by pppol2tp_put_sk(). + */ } release_sock(sk); @@ -572,16 +599,47 @@ out: static void pppol2tp_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; - struct pppol2tp_session *ps = l2tp_session_priv(session); + struct sock *sk; + + sk = pppol2tp_session_get_sock(session); + if (sk) { + struct pppox_sock *po = pppox_sk(sk); - if (ps) { - struct pppox_sock *po = pppox_sk(ps->sock); - if (po) - seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); + seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); + sock_put(sk); } } #endif +static void pppol2tp_session_init(struct l2tp_session *session) +{ + struct pppol2tp_session *ps; + struct dst_entry *dst; + + session->recv_skb = pppol2tp_recv; + session->session_close = pppol2tp_session_close; +#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) + session->show = pppol2tp_show; +#endif + + ps = l2tp_session_priv(session); + mutex_init(&ps->sk_lock); + ps->tunnel_sock = session->tunnel->sock; + ps->owner = current->pid; + + /* If PMTU discovery was enabled, use the MTU that was discovered */ + dst = sk_dst_get(session->tunnel->sock); + if (dst) { + u32 pmtu = dst_mtu(dst); + + if (pmtu) { + session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD; + session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD; + } + dst_release(dst); + } +} + /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket */ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, @@ -593,7 +651,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, struct l2tp_session *session = NULL; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; - struct dst_entry *dst; struct l2tp_session_cfg cfg = { 0, }; int error = 0; u32 tunnel_id, peer_tunnel_id; @@ -715,13 +772,17 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, /* Using a pre-existing session is fine as long as it hasn't * been connected yet. */ - if (ps->sock) { + mutex_lock(&ps->sk_lock); + if (rcu_dereference_protected(ps->sk, + lockdep_is_held(&ps->sk_lock))) { + mutex_unlock(&ps->sk_lock); error = -EEXIST; goto end; } /* consistency checks */ if (ps->tunnel_sock != tunnel->sock) { + mutex_unlock(&ps->sk_lock); error = -EEXIST; goto end; } @@ -737,35 +798,19 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, error = PTR_ERR(session); goto end; } - } - - /* Associate session with its PPPoL2TP socket */ - ps = l2tp_session_priv(session); - ps->owner = current->pid; - ps->sock = sk; - ps->tunnel_sock = tunnel->sock; - session->recv_skb = pppol2tp_recv; - session->session_close = pppol2tp_session_close; -#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) - session->show = pppol2tp_show; -#endif - - /* We need to know each time a skb is dropped from the reorder - * queue. - */ - session->ref = pppol2tp_session_sock_hold; - session->deref = pppol2tp_session_sock_put; - - /* If PMTU discovery was enabled, use the MTU that was discovered */ - dst = sk_dst_get(tunnel->sock); - if (dst != NULL) { - u32 pmtu = dst_mtu(dst); + pppol2tp_session_init(session); + ps = l2tp_session_priv(session); + l2tp_session_inc_refcount(session); - if (pmtu != 0) - session->mtu = session->mru = pmtu - - PPPOL2TP_HEADER_OVERHEAD; - dst_release(dst); + mutex_lock(&ps->sk_lock); + error = l2tp_session_register(session, tunnel); + if (error < 0) { + mutex_unlock(&ps->sk_lock); + kfree(session); + goto end; + } + drop_refcnt = true; } /* Special case: if source & dest session_id == 0x0000, this @@ -790,14 +835,25 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu = session->mtu; error = ppp_register_net_channel(sock_net(sk), &po->chan); - if (error) + if (error) { + mutex_unlock(&ps->sk_lock); goto end; + } out_no_ppp: /* This is how we get the session context from the socket. */ sk->sk_user_data = session; + rcu_assign_pointer(ps->sk, sk); + mutex_unlock(&ps->sk_lock); + + /* Keep the reference we've grabbed on the session: sk doesn't expect + * the session to disappear. pppol2tp_session_destruct() is responsible + * for dropping it. + */ + drop_refcnt = false; + sk->sk_state = PPPOX_CONNECTED; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: created\n", session->name); end: @@ -810,25 +866,19 @@ end: #ifdef CONFIG_L2TP_V3 -/* Called when creating sessions via the netlink interface. - */ -static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) +/* Called when creating sessions via the netlink interface. */ +static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel, + u32 session_id, u32 peer_session_id, + struct l2tp_session_cfg *cfg) { int error; - struct l2tp_tunnel *tunnel; struct l2tp_session *session; - struct pppol2tp_session *ps; - - tunnel = l2tp_tunnel_find(net, tunnel_id); - - /* Error if we can't find the tunnel */ - error = -ENOENT; - if (tunnel == NULL) - goto out; /* Error if tunnel socket is not prepped */ - if (tunnel->sock == NULL) - goto out; + if (!tunnel->sock) { + error = -ENOENT; + goto err; + } /* Default MTU values. */ if (cfg->mtu == 0) @@ -842,18 +892,20 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i peer_session_id, cfg); if (IS_ERR(session)) { error = PTR_ERR(session); - goto out; + goto err; } - ps = l2tp_session_priv(session); - ps->tunnel_sock = tunnel->sock; + pppol2tp_session_init(session); - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", - session->name); + error = l2tp_session_register(session, tunnel); + if (error < 0) + goto err_sess; - error = 0; + return 0; -out: +err_sess: + kfree(session); +err: return error; } @@ -1010,16 +1062,14 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_ioc_stats stats; - l2tp_dbg(session, PPPOL2TP_MSG_CONTROL, + l2tp_dbg(session, L2TP_MSG_CONTROL, "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", session->name, cmd, arg); - sk = ps->sock; + sk = pppol2tp_session_get_sock(session); if (!sk) return -EBADR; - sock_hold(sk); - switch (cmd) { case SIOCGIFMTU: err = -ENXIO; @@ -1033,7 +1083,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) break; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mtu=%d\n", session->name, session->mtu); err = 0; break; @@ -1049,7 +1099,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, session->mtu = ifr.ifr_mtu; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mtu=%d\n", session->name, session->mtu); err = 0; break; @@ -1063,7 +1113,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (put_user(session->mru, (int __user *) arg)) break; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mru=%d\n", session->name, session->mru); err = 0; break; @@ -1078,7 +1128,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, break; session->mru = val; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mru=%d\n", session->name, session->mru); err = 0; break; @@ -1088,7 +1138,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (put_user(ps->flags, (int __user *) arg)) break; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get flags=%d\n", session->name, ps->flags); err = 0; break; @@ -1098,7 +1148,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (get_user(val, (int __user *) arg)) break; ps->flags = val; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set flags=%d\n", session->name, ps->flags); err = 0; break; @@ -1115,7 +1165,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) break; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get L2TP stats\n", session->name); err = 0; break; @@ -1143,7 +1193,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, struct sock *sk; struct pppol2tp_ioc_stats stats; - l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL, + l2tp_dbg(tunnel, L2TP_MSG_CONTROL, "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name, cmd, arg); @@ -1186,7 +1236,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, err = -EFAULT; break; } - l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get L2TP stats\n", tunnel->name); err = 0; break; @@ -1276,7 +1326,7 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_DEBUG: tunnel->debug = val; - l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: set debug=%x\n", tunnel->name, tunnel->debug); break; @@ -1295,7 +1345,6 @@ static int pppol2tp_session_setsockopt(struct sock *sk, int optname, int val) { int err = 0; - struct pppol2tp_session *ps = l2tp_session_priv(session); switch (optname) { case PPPOL2TP_SO_RECVSEQ: @@ -1304,7 +1353,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, break; } session->recv_seq = val ? -1 : 0; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set recv_seq=%d\n", session->name, session->recv_seq); break; @@ -1316,13 +1365,13 @@ static int pppol2tp_session_setsockopt(struct sock *sk, } session->send_seq = val ? -1 : 0; { - struct sock *ssk = ps->sock; - struct pppox_sock *po = pppox_sk(ssk); + struct pppox_sock *po = pppox_sk(sk); + po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; } l2tp_session_set_header_len(session, session->tunnel->version); - l2tp_info(session, PPPOL2TP_MSG_CONTROL, + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set send_seq=%d\n", session->name, session->send_seq); break; @@ -1333,20 +1382,20 @@ static int pppol2tp_session_setsockopt(struct sock *sk, break; } session->lns_mode = val ? -1 : 0; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set lns_mode=%d\n", session->name, session->lns_mode); break; case PPPOL2TP_SO_DEBUG: session->debug = val; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set debug=%x\n", session->name, session->debug); break; case PPPOL2TP_SO_REORDERTO: session->reorder_timeout = msecs_to_jiffies(val); - l2tp_info(session, PPPOL2TP_MSG_CONTROL, + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); break; @@ -1427,7 +1476,7 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_DEBUG: *val = tunnel->debug; - l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n", + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get debug=%x\n", tunnel->name, tunnel->debug); break; @@ -1450,31 +1499,31 @@ static int pppol2tp_session_getsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_RECVSEQ: *val = session->recv_seq; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get recv_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_SENDSEQ: *val = session->send_seq; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get send_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_LNSMODE: *val = session->lns_mode; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get lns_mode=%d\n", session->name, *val); break; case PPPOL2TP_SO_DEBUG: *val = session->debug; - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n", + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get debug=%d\n", session->name, *val); break; case PPPOL2TP_SO_REORDERTO: *val = (int) jiffies_to_msecs(session->reorder_timeout); - l2tp_info(session, PPPOL2TP_MSG_CONTROL, + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get reorder_timeout=%d\n", session->name, *val); break; @@ -1653,8 +1702,9 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) { struct l2tp_session *session = v; struct l2tp_tunnel *tunnel = session->tunnel; - struct pppol2tp_session *ps = l2tp_session_priv(session); - struct pppox_sock *po = pppox_sk(ps->sock); + unsigned char state; + char user_data_ok; + struct sock *sk; u32 ip = 0; u16 port = 0; @@ -1664,6 +1714,15 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) port = ntohs(inet->inet_sport); } + sk = pppol2tp_session_get_sock(session); + if (sk) { + state = sk->sk_state; + user_data_ok = (session == sk->sk_user_data) ? 'Y' : 'N'; + } else { + state = 0; + user_data_ok = 'N'; + } + seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " "%04X/%04X %d %c\n", session->name, ip, port, @@ -1671,9 +1730,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) session->session_id, tunnel->peer_tunnel_id, session->peer_session_id, - ps->sock->sk_state, - (session == ps->sock->sk_user_data) ? - 'Y' : 'N'); + state, user_data_ok); seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", session->mtu, session->mru, session->recv_seq ? 'R' : '-', @@ -1690,8 +1747,12 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) atomic_long_read(&session->stats.rx_bytes), atomic_long_read(&session->stats.rx_errors)); - if (po) + if (sk) { + struct pppox_sock *po = pppox_sk(sk); + seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); + sock_put(sk); + } } static int pppol2tp_seq_show(struct seq_file *m, void *v) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index e8742ae11e9d..b10fba4a9613 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1707,6 +1707,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta); enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta); void ieee80211_sta_set_rx_nss(struct sta_info *sta); +enum ieee80211_sta_rx_bandwidth +ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width); +enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta); +void ieee80211_sta_set_rx_nss(struct sta_info *sta); u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, enum nl80211_band band); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e6fa55e84a7b..174486a6912d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2283,7 +2283,7 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, if (!ieee80211_is_data(hdr->frame_control)) return; - if (ieee80211_is_nullfunc(hdr->frame_control) && + if (ieee80211_is_any_nullfunc(hdr->frame_control) && sdata->u.mgd.probe_send_count > 0) { if (ack) ieee80211_sta_reset_conn_monitor(sdata); diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index b6be51940ead..af489405d5b3 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -308,11 +308,10 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) /* was never transmitted */ if (roc->frame) { - cfg80211_mgmt_tx_status(&roc->sdata->wdev, - (unsigned long)roc->frame, + cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie, roc->frame->data, roc->frame->len, false, GFP_KERNEL); - kfree_skb(roc->frame); + ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame); } if (!roc->mgmt_tx_cookie) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d6896b5b3eef..f44be677e1ad 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1111,8 +1111,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (ieee80211_is_ctl(hdr->frame_control) || - ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control) || + ieee80211_is_any_nullfunc(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return RX_CONTINUE; @@ -1488,8 +1487,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) * Drop (qos-)data::nullfunc frames silently, since they * are used only to control station power saving mode. */ - if (ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) { + if (ieee80211_is_any_nullfunc(hdr->frame_control)) { I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); /* @@ -1978,7 +1976,7 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) /* Drop unencrypted frames if key is set. */ if (unlikely(!ieee80211_has_protected(fc) && - !ieee80211_is_nullfunc(fc) && + !ieee80211_is_any_nullfunc(fc) && ieee80211_is_data(fc) && rx->key)) return -EACCES; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 2582d7a3437d..eeaa865f3080 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -556,6 +556,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) __cleanup_single_sta(sta); out_err: mutex_unlock(&local->sta_mtx); + kfree(sinfo); rcu_read_lock(); return err; } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index d221300e59e5..618479e0d648 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -474,8 +474,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, rcu_read_lock(); sdata = ieee80211_sdata_from_skb(local, skb); if (sdata) { - if (ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) + if (ieee80211_is_any_nullfunc(hdr->frame_control)) cfg80211_probe_status(sdata->dev, hdr->addr1, cookie, acked, GFP_ATOMIC); @@ -905,7 +904,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) I802_DEBUG_INC(local->dot11FailedCount); } - if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) && + if (ieee80211_is_any_nullfunc(fc) && ieee80211_has_pm(fc) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && !(info->flags & IEEE80211_TX_CTL_INJECTED) && local->ps_sdata && !(local->scanning)) { diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 543c64034d26..9eeacf4db494 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -4,7 +4,7 @@ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2014, Intel Corporation * Copyright 2014 Intel Mobile Communications GmbH - * Copyright 2015 Intel Deutschland GmbH + * Copyright 2015 - 2016 Intel Deutschland GmbH * * This file is GPLv2 as found in COPYING. */ @@ -15,6 +15,7 @@ #include <linux/rtnetlink.h> #include "ieee80211_i.h" #include "driver-ops.h" +#include "rate.h" /* give usermode some time for retries in setting up the TDLS session */ #define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) @@ -302,7 +303,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, /* IEEE802.11ac-2013 Table E-4 */ u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 }; struct cfg80211_chan_def uc = sta->tdls_chandef; - enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta); + enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta); int i; /* only support upgrading non-narrow channels up to 80Mhz */ @@ -313,7 +314,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, if (max_width > NL80211_CHAN_WIDTH_80) max_width = NL80211_CHAN_WIDTH_80; - if (uc.width == max_width) + if (uc.width >= max_width) return; /* * Channel usage constrains in the IEEE802.11ac-2013 specification only @@ -324,6 +325,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++) if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) { uc.center_freq1 = centers_80mhz[i]; + uc.center_freq2 = 0; uc.width = NL80211_CHAN_WIDTH_80; break; } @@ -332,7 +334,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, return; /* proceed to downgrade the chandef until usable or the same */ - while (uc.width > max_width && + while (uc.width > max_width || !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, sdata->wdev.iftype)) ieee80211_chandef_downgrade(&uc); @@ -1242,18 +1244,44 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, return ret; } -static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata) +static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; + enum nl80211_chan_width width; + struct ieee80211_supported_band *sband; mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (conf) { + width = conf->def.width; + sband = local->hw.wiphy->bands[conf->def.chan->band]; ctx = container_of(conf, struct ieee80211_chanctx, conf); ieee80211_recalc_chanctx_chantype(local, ctx); + + /* if width changed and a peer is given, update its BW */ + if (width != conf->def.width && sta && + test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) { + enum ieee80211_sta_rx_bandwidth bw; + + bw = ieee80211_chan_width_to_rx_bw(conf->def.width); + bw = min(bw, ieee80211_sta_cap_rx_bw(sta)); + if (bw != sta->sta.bandwidth) { + sta->sta.bandwidth = bw; + rate_control_rate_update(local, sband, sta, + IEEE80211_RC_BW_CHANGED); + /* + * if a TDLS peer BW was updated, we need to + * recalc the chandef width again, to get the + * correct chanctx min_def + */ + ieee80211_recalc_chanctx_chantype(local, ctx); + } + } + } mutex_unlock(&local->chanctx_mtx); } @@ -1350,8 +1378,6 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, break; } - iee80211_tdls_recalc_chanctx(sdata); - mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); if (!sta) { @@ -1360,6 +1386,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, break; } + iee80211_tdls_recalc_chanctx(sdata, sta); iee80211_tdls_recalc_ht_protection(sdata, sta); set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); @@ -1390,7 +1417,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, iee80211_tdls_recalc_ht_protection(sdata, NULL); mutex_unlock(&local->sta_mtx); - iee80211_tdls_recalc_chanctx(sdata); + iee80211_tdls_recalc_chanctx(sdata, NULL); break; default: ret = -ENOTSUPP; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index fc214e7f8880..7f0ac9fd0bd1 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -291,7 +291,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && !ieee80211_is_probe_req(hdr->frame_control) && - !ieee80211_is_nullfunc(hdr->frame_control)) + !ieee80211_is_any_nullfunc(hdr->frame_control)) /* * When software scanning only nullfunc frames (to notify * the sleep state to the AP) and probe requests (for the diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index c6243ecdbbca..47f708cd3e5b 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -299,7 +299,30 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta) return IEEE80211_STA_RX_BW_80; } -static enum ieee80211_sta_rx_bandwidth +enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta) +{ + struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; + u32 cap_width; + + if (!vht_cap->vht_supported) { + if (!sta->sta.ht_cap.ht_supported) + return NL80211_CHAN_WIDTH_20_NOHT; + + return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? + NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20; + } + + cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + + if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) + return NL80211_CHAN_WIDTH_160; + else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + return NL80211_CHAN_WIDTH_80P80; + + return NL80211_CHAN_WIDTH_80; +} + +enum ieee80211_sta_rx_bandwidth ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) { switch (width) { @@ -327,10 +350,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) bw = ieee80211_sta_cap_rx_bw(sta); bw = min(bw, sta->cur_max_bandwidth); - - /* do not cap the BW of TDLS WIDER_BW peers by the bss */ - if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) - bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); + bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); return bw; } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index c2ce7dec5198..50d9138b2a1c 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -470,16 +470,15 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, struct net_device *dev; struct dst_entry *dst; struct flowi6 fl6; - int err; if (!ipv6_stub) return ERR_PTR(-EAFNOSUPPORT); memset(&fl6, 0, sizeof(fl6)); memcpy(&fl6.daddr, addr, sizeof(struct in6_addr)); - err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6); - if (err) - return ERR_PTR(err); + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return ERR_CAST(dst); dev = dst->dev; dev_hold(dev); diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index bbede95c9f68..085711b35a99 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -60,7 +60,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, /* Don't lookup sub-counters at all */ opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) - opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; + opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index d637d151d0c7..2ed50ba6c1af 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -898,9 +898,9 @@ __nf_conntrack_alloc(struct net *net, /* Don't set timer yet: wait for confirmation */ setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); write_pnet(&ct->ct_net, net); - memset(&ct->__nfct_init_offset[0], 0, + memset(&ct->__nfct_init_offset, 0, offsetof(struct nf_conn, proto) - - offsetof(struct nf_conn, __nfct_init_offset[0])); + offsetof(struct nf_conn, __nfct_init_offset)); if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0) goto out_free; diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 5588c7ae1ac2..9f9f92d637ad 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -71,24 +71,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) /* PptpControlMessageType names */ -const char *const pptp_msg_name[] = { - "UNKNOWN_MESSAGE", - "START_SESSION_REQUEST", - "START_SESSION_REPLY", - "STOP_SESSION_REQUEST", - "STOP_SESSION_REPLY", - "ECHO_REQUEST", - "ECHO_REPLY", - "OUT_CALL_REQUEST", - "OUT_CALL_REPLY", - "IN_CALL_REQUEST", - "IN_CALL_REPLY", - "IN_CALL_CONNECT", - "CALL_CLEAR_REQUEST", - "CALL_DISCONNECT_NOTIFY", - "WAN_ERROR_NOTIFY", - "SET_LINK_INFO" +static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = { + [0] = "UNKNOWN_MESSAGE", + [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST", + [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY", + [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST", + [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY", + [PPTP_ECHO_REQUEST] = "ECHO_REQUEST", + [PPTP_ECHO_REPLY] = "ECHO_REPLY", + [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST", + [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY", + [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST", + [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY", + [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT", + [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST", + [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY", + [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY", + [PPTP_SET_LINK_INFO] = "SET_LINK_INFO" }; + +const char *pptp_msg_name(u_int16_t msg) +{ + if (msg > PPTP_MSG_MAX) + return pptp_msg_name_array[0]; + + return pptp_msg_name_array[msg]; +} EXPORT_SYMBOL(pptp_msg_name); #endif @@ -278,7 +286,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; msg = ntohs(ctlh->messageType); - pr_debug("inbound control message %s\n", pptp_msg_name[msg]); + pr_debug("inbound control message %s\n", pptp_msg_name(msg)); switch (msg) { case PPTP_START_SESSION_REPLY: @@ -313,7 +321,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, pcid = pptpReq->ocack.peersCallID; if (info->pns_call_id != pcid) goto invalid; - pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], + pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg), ntohs(cid), ntohs(pcid)); if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { @@ -330,7 +338,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, goto invalid; cid = pptpReq->icreq.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->cstate = PPTP_CALL_IN_REQ; info->pac_call_id = cid; break; @@ -349,7 +357,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, if (info->pns_call_id != pcid) goto invalid; - pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); + pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid)); info->cstate = PPTP_CALL_IN_CONF; /* we expect a GRE connection from PAC to PNS */ @@ -359,7 +367,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, case PPTP_CALL_DISCONNECT_NOTIFY: /* server confirms disconnect */ cid = pptpReq->disc.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->cstate = PPTP_CALL_NONE; /* untrack this call id, unexpect GRE packets */ @@ -386,7 +394,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], + pptp_msg_name(msg), msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; @@ -406,7 +414,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; msg = ntohs(ctlh->messageType); - pr_debug("outbound control message %s\n", pptp_msg_name[msg]); + pr_debug("outbound control message %s\n", pptp_msg_name(msg)); switch (msg) { case PPTP_START_SESSION_REQUEST: @@ -428,7 +436,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, info->cstate = PPTP_CALL_OUT_REQ; /* track PNS call id */ cid = pptpReq->ocreq.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->pns_call_id = cid; break; @@ -442,7 +450,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, pcid = pptpReq->icack.peersCallID; if (info->pac_call_id != pcid) goto invalid; - pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], + pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg), ntohs(cid), ntohs(pcid)); if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { @@ -482,7 +490,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], + pptp_msg_name(msg), msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a7967af0da82..6203995003a5 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2849,12 +2849,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) - goto err2; + goto err3; list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0; +err3: + ops->destroy(set); err2: kfree(set); err1: diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 99bc2f87a974..204be9374657 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -130,7 +130,7 @@ next_rule: list_for_each_entry_continue_rcu(rule, &chain->rules, list) { /* This rule is not active, skip. */ - if (unlikely(rule->genmask & (1 << gencursor))) + if (unlikely(rule->genmask & gencursor)) continue; rulenum++; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 044559c10e98..f01764b94b34 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -309,14 +309,14 @@ replay: #endif { nfnl_unlock(subsys_id); - netlink_ack(skb, nlh, -EOPNOTSUPP); + netlink_ack(oskb, nlh, -EOPNOTSUPP); return kfree_skb(skb); } } if (!ss->commit || !ss->abort) { nfnl_unlock(subsys_id); - netlink_ack(skb, nlh, -EOPNOTSUPP); + netlink_ack(oskb, nlh, -EOPNOTSUPP); return kfree_skb(skb); } @@ -406,7 +406,7 @@ ack: * pointing to the batch header. */ nfnl_err_reset(&err_list); - netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); + netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM); status |= NFNL_BATCH_FAILURE; goto done; } diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 0a5df0cbaa28..a6c29c5bbfbd 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -121,6 +121,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, return PTR_ERR(set); } + if (set->ops->update == NULL) + return -EOPNOTSUPP; + if (set->flags & NFT_SET_CONSTANT) return -EBUSY; diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index bfa2b6d5b5cf..25ab12e25e05 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -605,6 +605,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap, if ((off & (BITS_PER_LONG - 1)) != 0) return -EINVAL; + /* a null catmap is equivalent to an empty one */ + if (!catmap) { + *offset = (u32)-1; + return 0; + } + if (off < catmap->startbit) { off = catmap->startbit; *offset = off; diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 67583ad7f610..6ac1a8d19b88 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -610,14 +610,14 @@ int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, struct nci_core_conn_create_cmd *cmd; struct core_conn_create_data data; + if (!number_destination_params) + return -EINVAL; + data.length = params_len + sizeof(struct nci_core_conn_create_cmd); cmd = kzalloc(data.length, GFP_KERNEL); if (!cmd) return -ENOMEM; - if (!number_destination_params) - return -EINVAL; - cmd->destination_type = destination_type; cmd->number_destination_params = number_destination_params; memcpy(cmd->params, params, params_len); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 7cb8184ac165..828fdced4ecd 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -137,11 +137,22 @@ static bool is_flow_key_valid(const struct sw_flow_key *key) return !!key->eth.type; } +static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr, + __be16 ethertype) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) { + __be16 diff[] = { ~(hdr->h_proto), ethertype }; + + skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); + } + + hdr->h_proto = ethertype; +} + static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, const struct ovs_action_push_mpls *mpls) { __be32 *new_mpls_lse; - struct ethhdr *hdr; /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ if (skb->encapsulation) @@ -160,9 +171,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); - hdr = eth_hdr(skb); - hdr->h_proto = mpls->mpls_ethertype; - + update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype); if (!skb->inner_protocol) skb_set_inner_protocol(skb, skb->protocol); skb->protocol = mpls->mpls_ethertype; @@ -193,7 +202,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, * field correctly in the presence of VLAN tags. */ hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); - hdr->h_proto = ethertype; + update_ethertype(skb, hdr, ethertype); if (eth_p_mpls(skb->protocol)) skb->protocol = ethertype; @@ -217,8 +226,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, if (skb->ip_summed == CHECKSUM_COMPLETE) { __be32 diff[] = { ~(*stack), lse }; - skb->csum = ~csum_partial((char *)diff, sizeof(diff), - ~skb->csum); + skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); } *stack = lse; diff --git a/net/rds/tcp.c b/net/rds/tcp.c index c10622a9321c..465756fe7958 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -110,7 +110,7 @@ void rds_tcp_restore_callbacks(struct socket *sock, /* * This is the only path that sets tc->t_sock. Send and receive trust that - * it is set. The RDS_CONN_CONNECTED bit protects those paths from being + * it is set. The RDS_CONN_UP bit protects those paths from being * called while it isn't set. */ void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index e353e3255206..5213cd781c24 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -115,24 +115,32 @@ int rds_tcp_accept_one(struct socket *sock) * rds_tcp_state_change() will do that cleanup */ rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data; - if (rs_tcp->t_sock && - ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) { - struct sock *nsk = new_sock->sk; - - nsk->sk_user_data = NULL; - nsk->sk_prot->disconnect(nsk, 0); - tcp_done(nsk); - new_sock = NULL; - ret = 0; - goto out; - } else if (rs_tcp->t_sock) { - rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); - conn->c_outgoing = 0; - } - rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING); + if (rs_tcp->t_sock) { + /* Need to resolve a duelling SYN between peers. + * We have an outstanding SYN to this peer, which may + * potentially have transitioned to the RDS_CONN_UP state, + * so we must quiesce any send threads before resetting + * c_transport_data. + */ + wait_event(conn->c_waitq, + !test_bit(RDS_IN_XMIT, &conn->c_flags)); + if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) { + struct sock *nsk = new_sock->sk; + + nsk->sk_user_data = NULL; + nsk->sk_prot->disconnect(nsk, 0); + tcp_done(nsk); + new_sock = NULL; + ret = 0; + goto out; + } else if (rs_tcp->t_sock) { + rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); + conn->c_outgoing = 0; + } + } rds_tcp_set_callbacks(new_sock, conn); - rds_connect_complete(conn); + rds_connect_complete(conn); /* marks RDS_CONN_UP */ new_sock = NULL; ret = 0; diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 3eef0215e53f..cdfb8d33bcba 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -107,8 +107,9 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, } if (prog->exts_integrated) { - res->class = prog->res.class; - res->classid = qdisc_skb_cb(skb)->tc_classid; + res->class = 0; + res->classid = TC_H_MAJ(prog->res.classid) | + qdisc_skb_cb(skb)->tc_classid; ret = cls_bpf_exec_opcode(filter_res); if (ret == TC_ACT_UNSPEC) @@ -118,10 +119,12 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, if (filter_res == 0) continue; - - *res = prog->res; - if (filter_res != -1) + if (filter_res != -1) { + res->class = 0; res->classid = filter_res; + } else { + *res = prog->res; + } ret = tcf_exts_exec(skb, &prog->exts, res); if (ret < 0) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 5ab8205f988b..a97096a7f801 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -351,12 +351,10 @@ static int fl_init_hashtable(struct cls_fl_head *head, #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member)) -#define FL_KEY_MEMBER_END_OFFSET(member) \ - (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member)) -#define FL_KEY_IN_RANGE(mask, member) \ - (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \ - FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start) +#define FL_KEY_IS_MASKED(mask, member) \ + memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ + 0, FL_KEY_MEMBER_SIZE(member)) \ #define FL_KEY_SET(keys, cnt, id, member) \ do { \ @@ -365,9 +363,9 @@ static int fl_init_hashtable(struct cls_fl_head *head, cnt++; \ } while(0); -#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \ +#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ do { \ - if (FL_KEY_IN_RANGE(mask, member)) \ + if (FL_KEY_IS_MASKED(mask, member)) \ FL_KEY_SET(keys, cnt, id, member); \ } while(0); @@ -379,14 +377,14 @@ static void fl_init_dissector(struct cls_fl_head *head, FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_PORTS, tp); skb_flow_dissector_init(&head->dissector, keys, cnt); } diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index e8dcf94a23c8..2812de74c9a7 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -396,7 +396,8 @@ static void choke_reset(struct Qdisc *sch) qdisc_drop(skb, sch); } - memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); + if (q->tab) + memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q->head = q->tail = 0; red_restart(&q->vars); } diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index d6e3ad43cecb..06e42727590a 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->deficit = cl->quantum; } + qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; return err; } @@ -405,6 +406,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) bstats_update(&cl->bstats, skb); qdisc_bstats_update(sch, skb); + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; return skb; } @@ -426,6 +428,7 @@ static unsigned int drr_drop(struct Qdisc *sch) if (cl->qdisc->ops->drop) { len = cl->qdisc->ops->drop(cl->qdisc); if (len > 0) { + sch->qstats.backlog -= len; sch->q.qlen--; if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); @@ -461,6 +464,7 @@ static void drr_reset_qdisc(struct Qdisc *sch) qdisc_reset(cl->qdisc); } } + sch->qstats.backlog = 0; sch->q.qlen = 0; } diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index eb814ffc0902..f4aa2ab4713a 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -830,20 +830,24 @@ nla_put_failure: static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct fq_sched_data *q = qdisc_priv(sch); - u64 now = ktime_get_ns(); - struct tc_fq_qd_stats st = { - .gc_flows = q->stat_gc_flows, - .highprio_packets = q->stat_internal_packets, - .tcp_retrans = q->stat_tcp_retrans, - .throttled = q->stat_throttled, - .flows_plimit = q->stat_flows_plimit, - .pkts_too_long = q->stat_pkts_too_long, - .allocation_errors = q->stat_allocation_errors, - .flows = q->flows, - .inactive_flows = q->inactive_flows, - .throttled_flows = q->throttled_flows, - .time_next_delayed_flow = q->time_next_delayed_flow - now, - }; + struct tc_fq_qd_stats st; + + sch_tree_lock(sch); + + st.gc_flows = q->stat_gc_flows; + st.highprio_packets = q->stat_internal_packets; + st.tcp_retrans = q->stat_tcp_retrans; + st.throttled = q->stat_throttled; + st.flows_plimit = q->stat_flows_plimit; + st.pkts_too_long = q->stat_pkts_too_long; + st.allocation_errors = q->stat_allocation_errors; + st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns(); + st.flows = q->flows; + st.inactive_flows = q->inactive_flows; + st.throttled_flows = q->throttled_flows; + st.pad = 0; + + sch_tree_unlock(sch); return gnet_stats_copy_app(d, &st, sizeof(st)); } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 1800f7977595..70e0dfd21f04 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -588,7 +588,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, qs.backlog = q->backlogs[idx]; qs.drops = flow->dropped; } - if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) + if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) return -1; if (idx < q->flows_cnt) return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index e614b303ebd1..cad0b26cc1b8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { q->gso_skb = skb; q->qstats.requeues++; + qdisc_qstats_backlog_inc(q, skb); q->q.qlen++; /* it's still part of the queue */ __netif_schedule(q); @@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { q->gso_skb = NULL; + qdisc_qstats_backlog_dec(q, skb); q->q.qlen--; } else skb = NULL; @@ -629,18 +631,19 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, struct Qdisc *sch; if (!try_module_get(ops->owner)) - goto errout; + return NULL; sch = qdisc_alloc(dev_queue, ops); - if (IS_ERR(sch)) - goto errout; + if (IS_ERR(sch)) { + module_put(ops->owner); + return NULL; + } sch->parent = parentid; if (!ops->init || ops->init(sch, NULL) == 0) return sch; qdisc_destroy(sch); -errout: return NULL; } EXPORT_SYMBOL(qdisc_create_dflt); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index d783d7cc3348..1ac9f9f03fe3 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch) q->eligible = RB_ROOT; INIT_LIST_HEAD(&q->droplist); qdisc_watchdog_cancel(&q->watchdog); + sch->qstats.backlog = 0; sch->q.qlen = 0; } @@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) struct hfsc_sched *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_hfsc_qopt qopt; - struct hfsc_class *cl; - unsigned int i; - - sch->qstats.backlog = 0; - for (i = 0; i < q->clhash.hashsize; i++) { - hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) - sch->qstats.backlog += cl->qdisc->qstats.backlog; - } qopt.defcls = q->defcls; if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) @@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (cl->qdisc->q.qlen == 1) set_active(cl, qdisc_pkt_len(skb)); + qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; @@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch) qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; return skb; @@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch) } cl->qstats.drops++; qdisc_qstats_drop(sch); + sch->qstats.backlog -= len; sch->q.qlen--; return len; } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 7530067849e6..81ea3a87bc04 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -87,6 +87,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { + qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } @@ -125,6 +126,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch) struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); if (skb) { qdisc_bstats_update(sch, skb); + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; return skb; } @@ -143,6 +145,7 @@ static unsigned int prio_drop(struct Qdisc *sch) for (prio = q->bands-1; prio >= 0; prio--) { qdisc = q->queues[prio]; if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { + sch->qstats.backlog -= len; sch->q.qlen--; return len; } @@ -159,6 +162,7 @@ prio_reset(struct Qdisc *sch) for (prio = 0; prio < q->bands; prio++) qdisc_reset(q->queues[prio]); + sch->qstats.backlog = 0; sch->q.qlen = 0; q->enable_flow = 1; } diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 8d2d8d953432..8dabd8257b49 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1150,6 +1150,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) if (!skb) return NULL; + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; qdisc_bstats_update(sch, skb); @@ -1250,6 +1251,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } bstats_update(&cl->bstats, skb); + qdisc_qstats_backlog_inc(sch, skb); ++sch->q.qlen; agg = cl->agg; @@ -1516,6 +1518,7 @@ static void qfq_reset_qdisc(struct Qdisc *sch) qdisc_reset(cl->qdisc); } } + sch->qstats.backlog = 0; sch->q.qlen = 0; } diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 10c0b184cdbe..624b5e6fa52f 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -400,6 +400,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) enqueue: ret = qdisc_enqueue(skb, child); if (likely(ret == NET_XMIT_SUCCESS)) { + qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; increment_qlen(skb, q); } else if (net_xmit_drop_count(ret)) { @@ -428,6 +429,7 @@ static struct sk_buff *sfb_dequeue(struct Qdisc *sch) if (skb) { qdisc_bstats_update(sch, skb); + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; decrement_qlen(skb, q); } @@ -450,6 +452,7 @@ static void sfb_reset(struct Qdisc *sch) struct sfb_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); + sch->qstats.backlog = 0; sch->q.qlen = 0; q->slot = 0; q->double_buffering = false; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index e2e4ebc0c4c3..7929c1a11e12 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -635,6 +635,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; + + /* slot->allot is a short, make sure quantum is not too big. */ + if (ctl->quantum) { + unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); + + if (scaled <= 0 || scaled > SHRT_MAX) + return -EINVAL; + } + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog)) return -EINVAL; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 05c7a66f64da..87dee4deb66e 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -197,6 +197,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) return ret; } + qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } @@ -207,6 +208,7 @@ static unsigned int tbf_drop(struct Qdisc *sch) unsigned int len = 0; if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { + sch->qstats.backlog -= len; sch->q.qlen--; qdisc_qstats_drop(sch); } @@ -253,6 +255,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) q->t_c = now; q->tokens = toks; q->ptokens = ptoks; + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); @@ -284,6 +287,7 @@ static void tbf_reset(struct Qdisc *sch) struct tbf_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); + sch->qstats.backlog = 0; sch->q.qlen = 0; q->t_c = ktime_get_ns(); q->tokens = q->buffer; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index f085b01b6603..f24d31f12cb4 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1290,7 +1290,7 @@ static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, if (score_curr > score_best) return curr; else if (score_curr == score_best) - return sctp_trans_elect_tie(curr, best); + return sctp_trans_elect_tie(best, curr); else return best; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index dd097e065f39..1a6849add0e3 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -268,7 +268,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (!asoc || saddr) { t->dst = dst; memcpy(fl, &_fl, sizeof(_fl)); @@ -326,7 +326,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->saddr = laddr->a.v6.sin6_addr; fl6->fl6_sport = laddr->a.v6.sin6_port; final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); - bdst = ip6_dst_lookup_flow(sk, fl6, final_p); + bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(bdst)) continue; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 509e9426a056..e3e44237de1c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -857,7 +857,11 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, sctp_shutdownhdr_t shut; __u32 ctsn; - ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + if (chunk && chunk->asoc) + ctsn = sctp_tsnmap_get_ctsn(&chunk->asoc->peer.tsn_map); + else + ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index adaaaaad527d..a9a72f7e0cd7 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1793,12 +1793,13 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); - if (sctp_state(asoc, SHUTDOWN_PENDING) && + if ((sctp_state(asoc, SHUTDOWN_PENDING) || + sctp_state(asoc, SHUTDOWN_SENT)) && (sctp_sstate(asoc->base.sk, CLOSING) || sock_flag(asoc->base.sk, SOCK_DEAD))) { - /* if were currently in SHUTDOWN_PENDING, but the socket - * has been closed by user, don't transition to ESTABLISHED. - * Instead trigger SHUTDOWN bundled with COOKIE_ACK. + /* If the socket has been closed by user, don't + * transition to ESTABLISHED. Instead trigger SHUTDOWN + * bundled with COOKIE_ACK. */ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, diff --git a/net/sctp/transport.c b/net/sctp/transport.c index aab9e3f29755..fbbe268e34e7 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -72,7 +72,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net, */ peer->rto = msecs_to_jiffies(net->sctp.rto_initial); - peer->last_time_heard = ktime_get(); + peer->last_time_heard = ktime_set(0, 0); peer->last_time_ecne_reduced = jiffies; peer->param_flags = SPP_HB_DISABLE | diff --git a/net/socket.c b/net/socket.c index c7dae7e2dffc..2a320f49cfe6 100644 --- a/net/socket.c +++ b/net/socket.c @@ -3389,3 +3389,49 @@ int sockev_unregister_notify(struct notifier_block *nb) return blocking_notifier_chain_unregister(&sockev_notifier_list, nb); } EXPORT_SYMBOL(sockev_unregister_notify); + +/* This routine returns the IP overhead imposed by a socket i.e. + * the length of the underlying IP header, depending on whether + * this is an IPv4 or IPv6 socket and the length from IP options turned + * on at the socket. Assumes that the caller has a lock on the socket. + */ +u32 kernel_sock_ip_overhead(struct sock *sk) +{ + struct inet_sock *inet; + struct ip_options_rcu *opt; + u32 overhead = 0; + bool owned_by_user; +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6_pinfo *np; + struct ipv6_txoptions *optv6 = NULL; +#endif /* IS_ENABLED(CONFIG_IPV6) */ + + if (!sk) + return overhead; + + owned_by_user = sock_owned_by_user(sk); + switch (sk->sk_family) { + case AF_INET: + inet = inet_sk(sk); + overhead += sizeof(struct iphdr); + opt = rcu_dereference_protected(inet->inet_opt, + owned_by_user); + if (opt) + overhead += opt->opt.optlen; + return overhead; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + np = inet6_sk(sk); + overhead += sizeof(struct ipv6hdr); + if (np) + optv6 = rcu_dereference_protected(np->opt, + owned_by_user); + if (optv6) + overhead += (optv6->opt_flen + optv6->opt_nflen); + return overhead; +#endif /* IS_ENABLED(CONFIG_IPV6) */ + default: /* Returns 0 overhead if the socket is not ipv4 or ipv6 */ + return overhead; + } +} +EXPORT_SYMBOL(kernel_sock_ip_overhead); diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 2dcb44f69e53..ddd70aec4d88 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -42,8 +42,8 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt, size_t size; req = rpcrdma_create_req(r_xprt); - if (!req) - return -ENOMEM; + if (IS_ERR(req)) + return PTR_ERR(req); req->rl_backchannel = true; size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst); @@ -84,25 +84,13 @@ out_fail: static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt, unsigned int count) { - struct rpcrdma_buffer *buffers = &r_xprt->rx_buf; - struct rpcrdma_rep *rep; - unsigned long flags; int rc = 0; while (count--) { - rep = rpcrdma_create_rep(r_xprt); - if (IS_ERR(rep)) { - pr_err("RPC: %s: reply buffer alloc failed\n", - __func__); - rc = PTR_ERR(rep); + rc = rpcrdma_create_rep(r_xprt); + if (rc) break; - } - - spin_lock_irqsave(&buffers->rb_lock, flags); - list_add(&rep->rr_list, &buffers->rb_recv_bufs); - spin_unlock_irqrestore(&buffers->rb_lock, flags); } - return rc; } @@ -341,6 +329,8 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, rqst->rq_reply_bytes_recvd = 0; rqst->rq_bytes_sent = 0; rqst->rq_xid = headerp->rm_xid; + + rqst->rq_private_buf.len = size; set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); buf = &rqst->rq_rcv_buf; diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 8c545f7d7525..740bddcf3488 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -576,6 +576,9 @@ xprt_rdma_free(void *buffer) rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]); req = rb->rg_owner; + if (req->rl_backchannel) + return; + r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index eadd1655145a..b6879a1986a7 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -911,10 +911,17 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) return req; } -struct rpcrdma_rep * -rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) +/** + * rpcrdma_create_rep - Allocate an rpcrdma_rep object + * @r_xprt: controlling transport + * + * Returns 0 on success or a negative errno on failure. + */ +int + rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) { struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_rep *rep; int rc; @@ -934,12 +941,18 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) rep->rr_device = ia->ri_device; rep->rr_rxprt = r_xprt; INIT_WORK(&rep->rr_work, rpcrdma_receive_worker); - return rep; + + spin_lock(&buf->rb_lock); + list_add(&rep->rr_list, &buf->rb_recv_bufs); + spin_unlock(&buf->rb_lock); + return 0; out_free: kfree(rep); out: - return ERR_PTR(rc); + dprintk("RPC: %s: reply buffer %d alloc failed\n", + __func__, rc); + return rc; } int @@ -975,17 +988,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) } INIT_LIST_HEAD(&buf->rb_recv_bufs); - for (i = 0; i < buf->rb_max_requests + 2; i++) { - struct rpcrdma_rep *rep; - - rep = rpcrdma_create_rep(r_xprt); - if (IS_ERR(rep)) { - dprintk("RPC: %s: reply buffer %d alloc failed\n", - __func__, i); - rc = PTR_ERR(rep); + for (i = 0; i <= buf->rb_max_requests; i++) { + rc = rpcrdma_create_rep(r_xprt); + if (rc) goto out; - } - list_add(&rep->rr_list, &buf->rb_recv_bufs); } return 0; @@ -1337,15 +1343,14 @@ rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count) struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_ep *ep = &r_xprt->rx_ep; struct rpcrdma_rep *rep; - unsigned long flags; int rc; while (count--) { - spin_lock_irqsave(&buffers->rb_lock, flags); + spin_lock(&buffers->rb_lock); if (list_empty(&buffers->rb_recv_bufs)) goto out_reqbuf; rep = rpcrdma_buffer_get_rep_locked(buffers); - spin_unlock_irqrestore(&buffers->rb_lock, flags); + spin_unlock(&buffers->rb_lock); rc = rpcrdma_ep_post_recv(ia, ep, rep); if (rc) @@ -1355,7 +1360,7 @@ rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count) return 0; out_reqbuf: - spin_unlock_irqrestore(&buffers->rb_lock, flags); + spin_unlock(&buffers->rb_lock); pr_warn("%s: no extra receive buffers\n", __func__); return -ENOMEM; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index ac7f8d4f632a..36ec6a602665 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -431,8 +431,8 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *, * Buffer calls - xprtrdma/verbs.c */ struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *); -struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *); void rpcrdma_destroy_req(struct rpcrdma_ia *, struct rpcrdma_req *); +int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt); int rpcrdma_buffer_create(struct rpcrdma_xprt *); void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 78d6b78de29d..ac2079439242 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -200,10 +200,13 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, .saddr = src->ipv6, .flowi6_proto = IPPROTO_UDP }; - err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst, - &fl6); - if (err) + ndst = ipv6_stub->ipv6_dst_lookup_flow(net, + ub->ubsock->sk, + &fl6, NULL); + if (IS_ERR(ndst)) { + err = PTR_ERR(ndst); goto tx_error; + } ttl = ip6_dst_hoplimit(ndst); err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, ndst->dev, &src->ipv6, @@ -405,10 +408,13 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, tuncfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg); - if (enable_mcast(ub, remote)) + err = enable_mcast(ub, remote); + if (err) goto err; return 0; err: + if (ub->ubsock) + udp_tunnel_sock_release(ub->ubsock); kfree(ub); return err; } diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 1c4ad477ce93..1e87639f2c27 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -207,15 +207,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) family = XFRM_SPI_SKB_CB(skb)->family; /* if tunnel is present override skb->mark value with tunnel i_key */ - if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { - switch (family) { - case AF_INET: + switch (family) { + case AF_INET: + if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); - break; - case AF_INET6: + break; + case AF_INET6: + if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); - break; - } + break; } /* Allocate new secpath or COW existing one. */ @@ -302,7 +302,7 @@ resume: dev_put(skb->dev); spin_lock(&x->lock); - if (nexthdr <= 0) { + if (nexthdr < 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 16e828f2540f..6986da7e4ce8 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -240,7 +240,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) if (skb->protocol == htons(ETH_P_IP)) proto = AF_INET; - else if (skb->protocol == htons(ETH_P_IPV6)) + else if (skb->protocol == htons(ETH_P_IPV6) && + skb->sk->sk_family == AF_INET6) proto = AF_INET6; else return; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7537d9da9110..2c34636218c0 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -740,12 +740,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, static bool xfrm_policy_mark_match(struct xfrm_policy *policy, struct xfrm_policy *pol) { - u32 mark = policy->mark.v & policy->mark.m; - - if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) - return true; - - if ((mark & pol->mark.m) == pol->mark.v && + if (policy->mark.v == pol->mark.v && policy->priority == pol->priority) return true; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 7d50a371574c..73e772c91bfd 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -332,6 +332,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) { tasklet_hrtimer_cancel(&x->mtimer); del_timer_sync(&x->rtimer); + kfree(x->aead); kfree(x->aalg); kfree(x->ealg); kfree(x->calg); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 021d306d7e15..3e4b69fa3af6 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -612,9 +612,12 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, if (err) goto error; - if (attrs[XFRMA_SEC_CTX] && - security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) - goto error; + if (attrs[XFRMA_SEC_CTX]) { + err = security_xfrm_state_alloc(x, + nla_data(attrs[XFRMA_SEC_CTX])); + if (err) + goto error; + } if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, attrs[XFRMA_REPLAY_ESN_VAL]))) @@ -931,7 +934,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb) struct sock *sk = cb->skb->sk; struct net *net = sock_net(sk); - xfrm_state_walk_done(walk, net); + if (cb->args[0]) + xfrm_state_walk_done(walk, net); return 0; } @@ -956,8 +960,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) u8 proto = 0; int err; - cb->args[0] = 1; - err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, xfrma_policy); if (err < 0) @@ -974,6 +976,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) proto = nla_get_u8(attrs[XFRMA_PROTO]); xfrm_state_walk_init(walk, proto, filter); + cb->args[0] = 1; } (void) xfrm_state_walk(net, walk, dump_one_state, &info); diff --git a/scripts/config b/scripts/config index 026aeb4f32ee..73de17d39698 100755 --- a/scripts/config +++ b/scripts/config @@ -6,6 +6,9 @@ myname=${0##*/} # If no prefix forced, use the default CONFIG_ CONFIG_="${CONFIG_-CONFIG_}" +# We use an uncommon delimiter for sed substitutions +SED_DELIM=$(echo -en "\001") + usage() { cat >&2 <<EOL Manipulate options in a .config file from the command line. @@ -82,7 +85,7 @@ txt_subst() { local infile="$3" local tmpfile="$infile.swp" - sed -e "s:$before:$after:" "$infile" >"$tmpfile" + sed -e "s$SED_DELIM$before$SED_DELIM$after$SED_DELIM" "$infile" >"$tmpfile" # replace original file with the edited one mv "$tmpfile" "$infile" } diff --git a/scripts/decodecode b/scripts/decodecode index d8824f37acce..aae7a035242b 100755 --- a/scripts/decodecode +++ b/scripts/decodecode @@ -98,7 +98,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \ faultline=`cat $T.dis | head -1 | cut -d":" -f2-` faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` -cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" +cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" echo cat $T.aa cleanup diff --git a/security/commoncap.c b/security/commoncap.c index 7fa251aea32f..368631db0933 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -505,6 +505,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm) int ret; kuid_t root_uid; + new->cap_ambient = old->cap_ambient; if (WARN_ON(!cap_ambient_invariant_ok(old))) return -EPERM; diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index 461f8d891579..44352b0b7510 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -47,7 +47,7 @@ static struct shash_desc *init_desc(char type) algo = evm_hash; } - if (*tfm == NULL) { + if (IS_ERR_OR_NULL(*tfm)) { mutex_lock(&mutex); if (*tfm) goto out; diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index 36d2416f90d9..96b737adf4d2 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c @@ -228,14 +228,14 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw, if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; /* check whether the dsp was already loaded */ - if (hw->dsp_loaded & (1 << info.index)) + if (hw->dsp_loaded & (1u << info.index)) return -EBUSY; if (!access_ok(VERIFY_READ, info.image, info.length)) return -EFAULT; err = hw->ops.dsp_load(hw, &info); if (err < 0) return err; - hw->dsp_loaded |= (1 << info.index); + hw->dsp_loaded |= (1u << info.index); return 0; } diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index e445483090b1..ef5743af33de 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -460,6 +460,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, no_delta_check: if (runtime->status->hw_ptr == new_hw_ptr) { + runtime->hw_ptr_jiffies = curr_jiffies; update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return 0; } diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 89ae6ea6ea28..11fdc1d9797e 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -108,6 +108,17 @@ static void snd_rawmidi_input_event_work(struct work_struct *work) runtime->event(runtime->substream); } +/* buffer refcount management: call with runtime->lock held */ +static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime) +{ + runtime->buffer_ref++; +} + +static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime) +{ + runtime->buffer_ref--; +} + static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) { struct snd_rawmidi_runtime *runtime; @@ -126,7 +137,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) runtime->avail = 0; else runtime->avail = runtime->buffer_size; - if ((runtime->buffer = kmalloc(runtime->buffer_size, GFP_KERNEL)) == NULL) { + if ((runtime->buffer = kzalloc(runtime->buffer_size, GFP_KERNEL)) == NULL) { kfree(runtime); return -ENOMEM; } @@ -977,10 +988,12 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, long result = 0, count1; struct snd_rawmidi_runtime *runtime = substream->runtime; unsigned long appl_ptr; + int err = 0; if (userbuf) mutex_lock(&runtime->realloc_mutex); spin_lock_irqsave(&runtime->lock, flags); + snd_rawmidi_buffer_ref(runtime); while (count > 0 && runtime->avail) { count1 = runtime->buffer_size - runtime->appl_ptr; if (count1 > count) @@ -999,19 +1012,21 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, if (userbuf) { spin_unlock_irqrestore(&runtime->lock, flags); if (copy_to_user(userbuf + result, - runtime->buffer + appl_ptr, count1)) { - mutex_unlock(&runtime->realloc_mutex); - return result > 0 ? result : -EFAULT; - } + runtime->buffer + appl_ptr, count1)) + err = -EFAULT; spin_lock_irqsave(&runtime->lock, flags); + if (err) + goto out; } result += count1; count -= count1; } + out: + snd_rawmidi_buffer_unref(runtime); spin_unlock_irqrestore(&runtime->lock, flags); if (userbuf) mutex_unlock(&runtime->realloc_mutex); - return result; + return result > 0 ? result : err; } long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream, @@ -1286,6 +1301,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, return -EAGAIN; } } + snd_rawmidi_buffer_ref(runtime); while (count > 0 && runtime->avail > 0) { count1 = runtime->buffer_size - runtime->appl_ptr; if (count1 > count) @@ -1317,6 +1333,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, } __end: count1 = runtime->avail < runtime->buffer_size; + snd_rawmidi_buffer_unref(runtime); spin_unlock_irqrestore(&runtime->lock, flags); if (userbuf) mutex_unlock(&runtime->realloc_mutex); diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c index d6e89a6d0bb9..71a00b55d5ea 100644 --- a/sound/pci/fm801.c +++ b/sound/pci/fm801.c @@ -1088,26 +1088,20 @@ static int wait_for_codec(struct fm801 *chip, unsigned int codec_id, return -EIO; } -static int snd_fm801_chip_init(struct fm801 *chip, int resume) +static int reset_codec(struct fm801 *chip) { - unsigned short cmdw; - - if (chip->tea575x_tuner & TUNER_ONLY) - goto __ac97_ok; - /* codec cold reset + AC'97 warm reset */ fm801_writew(chip, CODEC_CTRL, (1 << 5) | (1 << 6)); fm801_readw(chip, CODEC_CTRL); /* flush posting data */ udelay(100); fm801_writew(chip, CODEC_CTRL, 0); - if (wait_for_codec(chip, 0, AC97_RESET, msecs_to_jiffies(750)) < 0) - if (!resume) { - dev_info(chip->card->dev, - "Primary AC'97 codec not found, assume SF64-PCR (tuner-only)\n"); - chip->tea575x_tuner = 3 | TUNER_ONLY; - goto __ac97_ok; - } + return wait_for_codec(chip, 0, AC97_RESET, msecs_to_jiffies(750)); +} + +static void snd_fm801_chip_multichannel_init(struct fm801 *chip) +{ + unsigned short cmdw; if (chip->multichannel) { if (chip->secondary_addr) { @@ -1134,8 +1128,11 @@ static int snd_fm801_chip_init(struct fm801 *chip, int resume) /* cause timeout problems */ wait_for_codec(chip, 0, AC97_VENDOR_ID1, msecs_to_jiffies(750)); } +} - __ac97_ok: +static void snd_fm801_chip_init(struct fm801 *chip) +{ + unsigned short cmdw; /* init volume */ fm801_writew(chip, PCM_VOL, 0x0808); @@ -1156,11 +1153,8 @@ static int snd_fm801_chip_init(struct fm801 *chip, int resume) /* interrupt clear */ fm801_writew(chip, IRQ_STATUS, FM801_IRQ_PLAYBACK | FM801_IRQ_CAPTURE | FM801_IRQ_MPU); - - return 0; } - static int snd_fm801_free(struct fm801 *chip) { unsigned short cmdw; @@ -1173,6 +1167,8 @@ static int snd_fm801_free(struct fm801 *chip) cmdw |= 0x00c3; fm801_writew(chip, IRQ_MASK, cmdw); + devm_free_irq(&chip->pci->dev, chip->irq, chip); + __end_hw: #ifdef CONFIG_SND_FM801_TEA575X_BOOL if (!(chip->tea575x_tuner & TUNER_DISABLED)) { @@ -1215,7 +1211,21 @@ static int snd_fm801_create(struct snd_card *card, if ((err = pci_request_regions(pci, "FM801")) < 0) return err; chip->port = pci_resource_start(pci, 0); - if ((tea575x_tuner & TUNER_ONLY) == 0) { + + if (pci->revision >= 0xb1) /* FM801-AU */ + chip->multichannel = 1; + + if (!(chip->tea575x_tuner & TUNER_ONLY)) { + if (reset_codec(chip) < 0) { + dev_info(chip->card->dev, + "Primary AC'97 codec not found, assume SF64-PCR (tuner-only)\n"); + chip->tea575x_tuner = 3 | TUNER_ONLY; + } else { + snd_fm801_chip_multichannel_init(chip); + } + } + + if ((chip->tea575x_tuner & TUNER_ONLY) == 0) { if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq); @@ -1226,12 +1236,7 @@ static int snd_fm801_create(struct snd_card *card, pci_set_master(pci); } - if (pci->revision >= 0xb1) /* FM801-AU */ - chip->multichannel = 1; - - snd_fm801_chip_init(chip, 0); - /* init might set tuner access method */ - tea575x_tuner = chip->tea575x_tuner; + snd_fm801_chip_init(chip); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_fm801_free(chip); @@ -1249,14 +1254,16 @@ static int snd_fm801_create(struct snd_card *card, chip->tea.private_data = chip; chip->tea.ops = &snd_fm801_tea_ops; sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci)); - if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 && - (tea575x_tuner & TUNER_TYPE_MASK) < 4) { + if ((chip->tea575x_tuner & TUNER_TYPE_MASK) > 0 && + (chip->tea575x_tuner & TUNER_TYPE_MASK) < 4) { if (snd_tea575x_init(&chip->tea, THIS_MODULE)) { dev_err(card->dev, "TEA575x radio not found\n"); snd_fm801_free(chip); return -ENODEV; } - } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) { + } else if ((chip->tea575x_tuner & TUNER_TYPE_MASK) == 0) { + unsigned int tuner_only = chip->tea575x_tuner & TUNER_ONLY; + /* autodetect tuner connection */ for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) { chip->tea575x_tuner = tea575x_tuner; @@ -1271,6 +1278,8 @@ static int snd_fm801_create(struct snd_card *card, dev_err(card->dev, "TEA575x radio not found\n"); chip->tea575x_tuner = TUNER_DISABLED; } + + chip->tea575x_tuner |= tuner_only; } if (!(chip->tea575x_tuner & TUNER_DISABLED)) { strlcpy(chip->tea.card, get_tea575x_gpio(chip)->name, @@ -1389,7 +1398,13 @@ static int snd_fm801_resume(struct device *dev) struct fm801 *chip = card->private_data; int i; - snd_fm801_chip_init(chip, 1); + if (chip->tea575x_tuner & TUNER_ONLY) { + snd_fm801_chip_init(chip); + } else { + reset_codec(chip); + snd_fm801_chip_multichannel_init(chip); + snd_fm801_chip_init(chip); + } snd_ac97_resume(chip->ac97); snd_ac97_resume(chip->ac97_sec); for (i = 0; i < ARRAY_SIZE(saved_regs); i++) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index da9f6749b3be..8dd6cf0b8939 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1977,9 +1977,10 @@ static const struct hdac_io_ops pci_hda_io_ops = { * some HD-audio PCI entries are exposed without any codecs, and such devices * should be ignored from the beginning. */ -static const struct snd_pci_quirk driver_blacklist[] = { - SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0), - SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0), +static const struct pci_device_id driver_blacklist[] = { + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */ + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */ + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */ {} }; @@ -2002,7 +2003,7 @@ static int azx_probe(struct pci_dev *pci, bool schedule_probe; int err; - if (snd_pci_quirk_lookup(pci, driver_blacklist)) { + if (pci_match_id(driver_blacklist, pci)) { dev_info(&pci->dev, "Skipping the blacklisted device\n"); return -ENODEV; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b236e94b5808..7c5bbc6b91b9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4840,6 +4840,7 @@ enum { ALC269_FIXUP_HP_LINE1_MIC1_LED, ALC269_FIXUP_INV_DMIC, ALC269_FIXUP_LENOVO_DOCK, + ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, ALC269_FIXUP_NO_SHUTUP, ALC286_FIXUP_SONY_MIC_NO_PRESENCE, ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, @@ -5106,6 +5107,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT }, + [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC269_FIXUP_LENOVO_DOCK, + }, [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pincfg_no_hp_to_lineout, @@ -5760,7 +5767,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), - SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST), SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), @@ -5870,6 +5877,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"}, {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, + {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"}, {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, @@ -6333,8 +6341,6 @@ static int patch_alc269(struct hda_codec *codec) alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ break; case 0x10ec0225: - codec->power_save_node = 1; - /* fall through */ case 0x10ec0295: case 0x10ec0299: spec->codec_variant = ALC269_TYPE_ALC225; diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index d46e9ad600b4..06736bea422e 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -146,6 +146,7 @@ static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg) case CCSR_SSI_SRX1: case CCSR_SSI_SISR: case CCSR_SSI_SFCSR: + case CCSR_SSI_SACNT: case CCSR_SSI_SACADD: case CCSR_SSI_SACDAT: case CCSR_SSI_SATAG: @@ -239,8 +240,9 @@ struct fsl_ssi_private { unsigned int baudclk_streams; unsigned int bitclk_freq; - /*regcache for SFCSR*/ + /* regcache for volatile regs */ u32 regcache_sfcsr; + u32 regcache_sacnt; /* DMA params */ struct snd_dmaengine_dai_dma_data dma_params_tx; @@ -1597,6 +1599,8 @@ static int fsl_ssi_suspend(struct device *dev) regmap_read(regs, CCSR_SSI_SFCSR, &ssi_private->regcache_sfcsr); + regmap_read(regs, CCSR_SSI_SACNT, + &ssi_private->regcache_sacnt); regcache_cache_only(regs, true); regcache_mark_dirty(regs); @@ -1615,6 +1619,8 @@ static int fsl_ssi_resume(struct device *dev) CCSR_SSI_SFCSR_RFWM1_MASK | CCSR_SSI_SFCSR_TFWM1_MASK | CCSR_SSI_SFCSR_RFWM0_MASK | CCSR_SSI_SFCSR_TFWM0_MASK, ssi_private->regcache_sfcsr); + regmap_write(regs, CCSR_SSI_SACNT, + ssi_private->regcache_sacnt); return regcache_sync(regs); } diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c index e83da42a8c03..c798f8d4ae43 100644 --- a/sound/soc/intel/atom/sst/sst_stream.c +++ b/sound/soc/intel/atom/sst/sst_stream.c @@ -108,7 +108,7 @@ int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params) str_id, pipe_id); ret = sst_prepare_and_post_msg(sst_drv_ctx, task_id, IPC_CMD, IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param), - &alloc_param, data, true, true, false, true); + &alloc_param, &data, true, true, false, true); if (ret < 0) { dev_err(sst_drv_ctx->dev, "FW alloc failed ret %d\n", ret); diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c index ba272e21a6fa..deb597f7c302 100644 --- a/sound/soc/tegra/tegra_alc5632.c +++ b/sound/soc/tegra/tegra_alc5632.c @@ -101,12 +101,16 @@ static const struct snd_kcontrol_new tegra_alc5632_controls[] = { static int tegra_alc5632_asoc_init(struct snd_soc_pcm_runtime *rtd) { + int ret; struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(rtd->card); - snd_soc_card_jack_new(rtd->card, "Headset Jack", SND_JACK_HEADSET, - &tegra_alc5632_hs_jack, - tegra_alc5632_hs_jack_pins, - ARRAY_SIZE(tegra_alc5632_hs_jack_pins)); + ret = snd_soc_card_jack_new(rtd->card, "Headset Jack", + SND_JACK_HEADSET, + &tegra_alc5632_hs_jack, + tegra_alc5632_hs_jack_pins, + ARRAY_SIZE(tegra_alc5632_hs_jack_pins)); + if (ret) + return ret; if (gpio_is_valid(machine->gpio_hp_det)) { tegra_alc5632_hp_jack_gpio.gpio = machine->gpio_hp_det; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 9ba072aeb76b..281a748d4200 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1049,7 +1049,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, cval->res = 384; } break; - + case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ + if ((strstr(kctl->id.name, "Playback Volume") != NULL) || + strstr(kctl->id.name, "Capture Volume") != NULL) { + cval->min >>= 8; + cval->max = 0; + cval->res = 1; + } + break; case USB_ID(0x1130, 0x1620): /* Logitech Speakers S150 */ /* This audio device has 2 channels and it explicitly requires the * host to send SET_CUR command on the volume control of both the @@ -1059,7 +1066,6 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, (cval->control << 8) | 2, 7936); break; - } } diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 6b8eb13e14e4..c4023f22f287 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -12,18 +12,18 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) int i, idx = 0; u64 mask = regs->mask; - if (regs->cache_mask & (1 << id)) + if (regs->cache_mask & (1ULL << id)) goto out; - if (!(mask & (1 << id))) + if (!(mask & (1ULL << id))) return -EINVAL; for (i = 0; i < id; i++) { - if (mask & (1 << i)) + if (mask & (1ULL << i)) idx++; } - regs->cache_mask |= (1 << id); + regs->cache_mask |= (1ULL << id); regs->cache_regs[id] = regs->regs[idx]; out: diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c index 1b2ce334bb3f..47c074d73e61 100644 --- a/tools/testing/selftests/ipc/msgque.c +++ b/tools/testing/selftests/ipc/msgque.c @@ -135,7 +135,7 @@ int dump_queue(struct msgque_data *msgque) for (kern_id = 0; kern_id < 256; kern_id++) { ret = msgctl(kern_id, MSG_STAT, &ds); if (ret < 0) { - if (errno == -EINVAL) + if (errno == EINVAL) continue; printf("Failed to get stats for IPC queue with id %d\n", kern_id); |